aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-12-01 19:58:33 -0500
committerDave Airlie <airlied@redhat.com>2014-12-01 19:58:33 -0500
commite8115e79aa62b6ebdb3e8e61ca4092cc32938afc (patch)
tree42b791ab54ef9d5c73dcd49f907b8b37fa2f7e19 /mm
parent9be23ae4350bfd71c0cc2ea3494671ee90e5603b (diff)
parent009d0431c3914de64666bec0d350e54fdd59df6a (diff)
Merge tag 'v3.18-rc7' into drm-next
This fixes a bunch of conflicts prior to merging i915 tree. Linux 3.18-rc7 Conflicts: drivers/gpu/drm/exynos/exynos_drm_drv.c drivers/gpu/drm/i915/i915_drv.c drivers/gpu/drm/i915/intel_pm.c drivers/gpu/drm/tegra/dc.c
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c9
-rw-r--r--mm/compaction.c18
-rw-r--r--mm/internal.h25
-rw-r--r--mm/iov_iter.c4
-rw-r--r--mm/memory_hotplug.c26
-rw-r--r--mm/nobootmem.c8
-rw-r--r--mm/page_alloc.c68
-rw-r--r--mm/page_isolation.c43
-rw-r--r--mm/slab_common.c4
9 files changed, 153 insertions, 52 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8a000cebb0d7..477be696511d 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
243 243
244static int reset_managed_pages_done __initdata; 244static int reset_managed_pages_done __initdata;
245 245
246static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 246void reset_node_managed_pages(pg_data_t *pgdat)
247{ 247{
248 struct zone *z; 248 struct zone *z;
249 249
250 if (reset_managed_pages_done)
251 return;
252
253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 250 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
254 z->managed_pages = 0; 251 z->managed_pages = 0;
255} 252}
@@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void)
258{ 255{
259 struct pglist_data *pgdat; 256 struct pglist_data *pgdat;
260 257
258 if (reset_managed_pages_done)
259 return;
260
261 for_each_online_pgdat(pgdat) 261 for_each_online_pgdat(pgdat)
262 reset_node_managed_pages(pgdat); 262 reset_node_managed_pages(pgdat);
263
263 reset_managed_pages_done = 1; 264 reset_managed_pages_done = 1;
264} 265}
265 266
diff --git a/mm/compaction.c b/mm/compaction.c
index ec74cf0123ef..f9792ba3537c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc,
479 479
480 block_end_pfn = min(block_end_pfn, end_pfn); 480 block_end_pfn = min(block_end_pfn, end_pfn);
481 481
482 /*
483 * pfn could pass the block_end_pfn if isolated freepage
484 * is more than pageblock order. In this case, we adjust
485 * scanning range to right one.
486 */
487 if (pfn >= block_end_pfn) {
488 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
489 block_end_pfn = min(block_end_pfn, end_pfn);
490 }
491
482 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 492 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
483 break; 493 break;
484 494
@@ -1029,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1029 } 1039 }
1030 1040
1031 acct_isolated(zone, cc); 1041 acct_isolated(zone, cc);
1032 /* Record where migration scanner will be restarted */ 1042 /*
1033 cc->migrate_pfn = low_pfn; 1043 * Record where migration scanner will be restarted. If we end up in
1044 * the same pageblock as the free scanner, make the scanners fully
1045 * meet so that compact_finished() terminates compaction.
1046 */
1047 cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1034 1048
1035 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1049 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1036} 1050}
diff --git a/mm/internal.h b/mm/internal.h
index 829304090b90..a4f90ba7068e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
108/* 108/*
109 * in mm/page_alloc.c 109 * in mm/page_alloc.c
110 */ 110 */
111
112/*
113 * Locate the struct page for both the matching buddy in our
114 * pair (buddy1) and the combined O(n+1) page they form (page).
115 *
116 * 1) Any buddy B1 will have an order O twin B2 which satisfies
117 * the following equation:
118 * B2 = B1 ^ (1 << O)
119 * For example, if the starting buddy (buddy2) is #8 its order
120 * 1 buddy is #10:
121 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
122 *
123 * 2) Any buddy B will have an order O+1 parent P which
124 * satisfies the following equation:
125 * P = B & ~(1 << O)
126 *
127 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
128 */
129static inline unsigned long
130__find_buddy_index(unsigned long page_idx, unsigned int order)
131{
132 return page_idx ^ (1 << order);
133}
134
135extern int __isolate_free_page(struct page *page, unsigned int order);
111extern void __free_pages_bootmem(struct page *page, unsigned int order); 136extern void __free_pages_bootmem(struct page *page, unsigned int order);
112extern void prep_compound_page(struct page *page, unsigned long order); 137extern void prep_compound_page(struct page *page, unsigned long order);
113#ifdef CONFIG_MEMORY_FAILURE 138#ifdef CONFIG_MEMORY_FAILURE
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index eafcf60f6b83..e34a3cb6aad6 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
911 if (i->nr_segs == 1) 911 if (i->nr_segs == 1)
912 return i->count; 912 return i->count;
913 else if (i->type & ITER_BVEC) 913 else if (i->type & ITER_BVEC)
914 return min(i->count, i->iov->iov_len - i->iov_offset);
915 else
916 return min(i->count, i->bvec->bv_len - i->iov_offset); 914 return min(i->count, i->bvec->bv_len - i->iov_offset);
915 else
916 return min(i->count, i->iov->iov_len - i->iov_offset);
917} 917}
918EXPORT_SYMBOL(iov_iter_single_seg_count); 918EXPORT_SYMBOL(iov_iter_single_seg_count);
919 919
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 252e1dbbed86..1bf4807cb21e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -31,6 +31,7 @@
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/bootmem.h>
34 35
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
36 37
@@ -1066,6 +1067,16 @@ out:
1066} 1067}
1067#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1068#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1068 1069
1070static void reset_node_present_pages(pg_data_t *pgdat)
1071{
1072 struct zone *z;
1073
1074 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1075 z->present_pages = 0;
1076
1077 pgdat->node_present_pages = 0;
1078}
1079
1069/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1080/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1070static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1081static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1071{ 1082{
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1096 build_all_zonelists(pgdat, NULL); 1107 build_all_zonelists(pgdat, NULL);
1097 mutex_unlock(&zonelists_mutex); 1108 mutex_unlock(&zonelists_mutex);
1098 1109
1110 /*
1111 * zone->managed_pages is set to an approximate value in
1112 * free_area_init_core(), which will cause
1113 * /sys/device/system/node/nodeX/meminfo has wrong data.
1114 * So reset it to 0 before any memory is onlined.
1115 */
1116 reset_node_managed_pages(pgdat);
1117
1118 /*
1119 * When memory is hot-added, all the memory is in offline state. So
1120 * clear all zones' present_pages because they will be updated in
1121 * online_pages() and offline_pages().
1122 */
1123 reset_node_present_pages(pgdat);
1124
1099 return pgdat; 1125 return pgdat;
1100} 1126}
1101 1127
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7c7ab32ee503..90b50468333e 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void)
145 145
146static int reset_managed_pages_done __initdata; 146static int reset_managed_pages_done __initdata;
147 147
148static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 148void reset_node_managed_pages(pg_data_t *pgdat)
149{ 149{
150 struct zone *z; 150 struct zone *z;
151 151
152 if (reset_managed_pages_done)
153 return;
154 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 152 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
155 z->managed_pages = 0; 153 z->managed_pages = 0;
156} 154}
@@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void)
159{ 157{
160 struct pglist_data *pgdat; 158 struct pglist_data *pgdat;
161 159
160 if (reset_managed_pages_done)
161 return;
162
162 for_each_online_pgdat(pgdat) 163 for_each_online_pgdat(pgdat)
163 reset_node_managed_pages(pgdat); 164 reset_node_managed_pages(pgdat);
165
164 reset_managed_pages_done = 1; 166 reset_managed_pages_done = 1;
165} 167}
166 168
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9cd36b822444..616a2c956b4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -467,29 +467,6 @@ static inline void rmv_page_order(struct page *page)
467} 467}
468 468
469/* 469/*
470 * Locate the struct page for both the matching buddy in our
471 * pair (buddy1) and the combined O(n+1) page they form (page).
472 *
473 * 1) Any buddy B1 will have an order O twin B2 which satisfies
474 * the following equation:
475 * B2 = B1 ^ (1 << O)
476 * For example, if the starting buddy (buddy2) is #8 its order
477 * 1 buddy is #10:
478 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
479 *
480 * 2) Any buddy B will have an order O+1 parent P which
481 * satisfies the following equation:
482 * P = B & ~(1 << O)
483 *
484 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
485 */
486static inline unsigned long
487__find_buddy_index(unsigned long page_idx, unsigned int order)
488{
489 return page_idx ^ (1 << order);
490}
491
492/*
493 * This function checks whether a page is free && is the buddy 470 * This function checks whether a page is free && is the buddy
494 * we can do coalesce a page and its buddy if 471 * we can do coalesce a page and its buddy if
495 * (a) the buddy is not in a hole && 472 * (a) the buddy is not in a hole &&
@@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page,
569 unsigned long combined_idx; 546 unsigned long combined_idx;
570 unsigned long uninitialized_var(buddy_idx); 547 unsigned long uninitialized_var(buddy_idx);
571 struct page *buddy; 548 struct page *buddy;
549 int max_order = MAX_ORDER;
572 550
573 VM_BUG_ON(!zone_is_initialized(zone)); 551 VM_BUG_ON(!zone_is_initialized(zone));
574 552
@@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page,
577 return; 555 return;
578 556
579 VM_BUG_ON(migratetype == -1); 557 VM_BUG_ON(migratetype == -1);
558 if (is_migrate_isolate(migratetype)) {
559 /*
560 * We restrict max order of merging to prevent merge
561 * between freepages on isolate pageblock and normal
562 * pageblock. Without this, pageblock isolation
563 * could cause incorrect freepage accounting.
564 */
565 max_order = min(MAX_ORDER, pageblock_order + 1);
566 } else {
567 __mod_zone_freepage_state(zone, 1 << order, migratetype);
568 }
580 569
581 page_idx = pfn & ((1 << MAX_ORDER) - 1); 570 page_idx = pfn & ((1 << max_order) - 1);
582 571
583 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 572 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
584 VM_BUG_ON_PAGE(bad_range(zone, page), page); 573 VM_BUG_ON_PAGE(bad_range(zone, page), page);
585 574
586 while (order < MAX_ORDER-1) { 575 while (order < max_order - 1) {
587 buddy_idx = __find_buddy_index(page_idx, order); 576 buddy_idx = __find_buddy_index(page_idx, order);
588 buddy = page + (buddy_idx - page_idx); 577 buddy = page + (buddy_idx - page_idx);
589 if (!page_is_buddy(page, buddy, order)) 578 if (!page_is_buddy(page, buddy, order))
@@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page,
594 */ 583 */
595 if (page_is_guard(buddy)) { 584 if (page_is_guard(buddy)) {
596 clear_page_guard_flag(buddy); 585 clear_page_guard_flag(buddy);
597 set_page_private(page, 0); 586 set_page_private(buddy, 0);
598 __mod_zone_freepage_state(zone, 1 << order, 587 if (!is_migrate_isolate(migratetype)) {
599 migratetype); 588 __mod_zone_freepage_state(zone, 1 << order,
589 migratetype);
590 }
600 } else { 591 } else {
601 list_del(&buddy->lru); 592 list_del(&buddy->lru);
602 zone->free_area[order].nr_free--; 593 zone->free_area[order].nr_free--;
@@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
715 /* must delete as __free_one_page list manipulates */ 706 /* must delete as __free_one_page list manipulates */
716 list_del(&page->lru); 707 list_del(&page->lru);
717 mt = get_freepage_migratetype(page); 708 mt = get_freepage_migratetype(page);
709 if (unlikely(has_isolate_pageblock(zone)))
710 mt = get_pageblock_migratetype(page);
711
718 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 712 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
719 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 713 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
720 trace_mm_page_pcpu_drain(page, 0, mt); 714 trace_mm_page_pcpu_drain(page, 0, mt);
721 if (likely(!is_migrate_isolate_page(page))) {
722 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
723 if (is_migrate_cma(mt))
724 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
725 }
726 } while (--to_free && --batch_free && !list_empty(list)); 715 } while (--to_free && --batch_free && !list_empty(list));
727 } 716 }
728 spin_unlock(&zone->lock); 717 spin_unlock(&zone->lock);
@@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone,
739 if (nr_scanned) 728 if (nr_scanned)
740 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 729 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
741 730
731 if (unlikely(has_isolate_pageblock(zone) ||
732 is_migrate_isolate(migratetype))) {
733 migratetype = get_pfnblock_migratetype(page, pfn);
734 }
742 __free_one_page(page, pfn, zone, order, migratetype); 735 __free_one_page(page, pfn, zone, order, migratetype);
743 if (unlikely(!is_migrate_isolate(migratetype)))
744 __mod_zone_freepage_state(zone, 1 << order, migratetype);
745 spin_unlock(&zone->lock); 736 spin_unlock(&zone->lock);
746} 737}
747 738
@@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order)
1484} 1475}
1485EXPORT_SYMBOL_GPL(split_page); 1476EXPORT_SYMBOL_GPL(split_page);
1486 1477
1487static int __isolate_free_page(struct page *page, unsigned int order) 1478int __isolate_free_page(struct page *page, unsigned int order)
1488{ 1479{
1489 unsigned long watermark; 1480 unsigned long watermark;
1490 struct zone *zone; 1481 struct zone *zone;
@@ -6408,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6408 6399
6409 /* Make sure the range is really isolated. */ 6400 /* Make sure the range is really isolated. */
6410 if (test_pages_isolated(outer_start, end, false)) { 6401 if (test_pages_isolated(outer_start, end, false)) {
6411 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 6402 pr_info("%s: [%lx, %lx) PFNs busy\n",
6412 outer_start, end); 6403 __func__, outer_start, end);
6413 ret = -EBUSY; 6404 ret = -EBUSY;
6414 goto done; 6405 goto done;
6415 } 6406 }
6416 6407
6417
6418 /* Grab isolated pages from freelists. */ 6408 /* Grab isolated pages from freelists. */
6419 outer_end = isolate_freepages_range(&cc, outer_start, end); 6409 outer_end = isolate_freepages_range(&cc, outer_start, end);
6420 if (!outer_end) { 6410 if (!outer_end) {
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index d1473b2e9481..c8778f7e208e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -60,6 +60,7 @@ out:
60 int migratetype = get_pageblock_migratetype(page); 60 int migratetype = get_pageblock_migratetype(page);
61 61
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 62 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
63 zone->nr_isolate_pageblock++;
63 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
64 65
65 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
75{ 76{
76 struct zone *zone; 77 struct zone *zone;
77 unsigned long flags, nr_pages; 78 unsigned long flags, nr_pages;
79 struct page *isolated_page = NULL;
80 unsigned int order;
81 unsigned long page_idx, buddy_idx;
82 struct page *buddy;
78 83
79 zone = page_zone(page); 84 zone = page_zone(page);
80 spin_lock_irqsave(&zone->lock, flags); 85 spin_lock_irqsave(&zone->lock, flags);
81 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 86 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
82 goto out; 87 goto out;
83 nr_pages = move_freepages_block(zone, page, migratetype); 88
84 __mod_zone_freepage_state(zone, nr_pages, migratetype); 89 /*
90 * Because freepage with more than pageblock_order on isolated
91 * pageblock is restricted to merge due to freepage counting problem,
92 * it is possible that there is free buddy page.
93 * move_freepages_block() doesn't care of merge so we need other
94 * approach in order to merge them. Isolation and free will make
95 * these pages to be merged.
96 */
97 if (PageBuddy(page)) {
98 order = page_order(page);
99 if (order >= pageblock_order) {
100 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
101 buddy_idx = __find_buddy_index(page_idx, order);
102 buddy = page + (buddy_idx - page_idx);
103
104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order);
106 set_page_refcounted(page);
107 isolated_page = page;
108 }
109 }
110 }
111
112 /*
113 * If we isolate freepage with more than pageblock_order, there
114 * should be no freepage in the range, so we could avoid costly
115 * pageblock scanning for freepage moving.
116 */
117 if (!isolated_page) {
118 nr_pages = move_freepages_block(zone, page, migratetype);
119 __mod_zone_freepage_state(zone, nr_pages, migratetype);
120 }
85 set_pageblock_migratetype(page, migratetype); 121 set_pageblock_migratetype(page, migratetype);
122 zone->nr_isolate_pageblock--;
86out: 123out:
87 spin_unlock_irqrestore(&zone->lock, flags); 124 spin_unlock_irqrestore(&zone->lock, flags);
125 if (isolated_page)
126 __free_pages(isolated_page, order);
88} 127}
89 128
90static inline struct page * 129static inline struct page *
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 406944207b61..dcdab81bd240 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -259,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
259 if (s->size - size >= sizeof(void *)) 259 if (s->size - size >= sizeof(void *))
260 continue; 260 continue;
261 261
262 if (IS_ENABLED(CONFIG_SLAB) && align &&
263 (align > s->align || s->align % align))
264 continue;
265
262 return s; 266 return s;
263 } 267 }
264 return NULL; 268 return NULL;