diff options
author | Olof Johansson <olof@lixom.net> | 2013-01-14 13:20:02 -0500 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-01-14 13:20:02 -0500 |
commit | 8d84981e395850aab31c3f2ca7e2738e03f671d7 (patch) | |
tree | 933425fddb23d28be802277471df3fe3f6c2711d /mm | |
parent | 00c82d64405631967dca3890a9ce80ab35d04cc7 (diff) | |
parent | 77cc982f6a3b33a5aa058ad3b20cda8866db2948 (diff) |
Merge branch 'clocksource/cleanup' into next/cleanup
Clockevent cleanup series from Shawn Guo.
Resolved move/change conflict in mach-pxa/time.c due to the sys_timer
cleanup.
* clocksource/cleanup:
clocksource: use clockevents_config_and_register() where possible
ARM: use clockevents_config_and_register() where possible
clockevents: export clockevents_config_and_register for module use
+ sync to Linux 3.8-rc3
Signed-off-by: Olof Johansson <olof@lixom.net>
Conflicts:
arch/arm/mach-pxa/time.c
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_isolation.c | 26 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
4 files changed, 17 insertions, 53 deletions
diff --git a/mm/memory.c b/mm/memory.c index e0a9b0ce4f10..bb1369f7b9b4 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -184,10 +184,14 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
184 | return 1; | 184 | return 1; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) | ||
188 | return 0; | ||
189 | |||
187 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | 190 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
188 | if (!batch) | 191 | if (!batch) |
189 | return 0; | 192 | return 0; |
190 | 193 | ||
194 | tlb->batch_count++; | ||
191 | batch->next = NULL; | 195 | batch->next = NULL; |
192 | batch->nr = 0; | 196 | batch->nr = 0; |
193 | batch->max = MAX_GATHER_BATCH; | 197 | batch->max = MAX_GATHER_BATCH; |
@@ -216,6 +220,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |||
216 | tlb->local.nr = 0; | 220 | tlb->local.nr = 0; |
217 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | 221 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
218 | tlb->active = &tlb->local; | 222 | tlb->active = &tlb->local; |
223 | tlb->batch_count = 0; | ||
219 | 224 | ||
220 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 225 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
221 | tlb->batch = NULL; | 226 | tlb->batch = NULL; |
@@ -3706,6 +3711,14 @@ retry: | |||
3706 | if (pmd_trans_huge(orig_pmd)) { | 3711 | if (pmd_trans_huge(orig_pmd)) { |
3707 | unsigned int dirty = flags & FAULT_FLAG_WRITE; | 3712 | unsigned int dirty = flags & FAULT_FLAG_WRITE; |
3708 | 3713 | ||
3714 | /* | ||
3715 | * If the pmd is splitting, return and retry the | ||
3716 | * the fault. Alternative: wait until the split | ||
3717 | * is done, and goto retry. | ||
3718 | */ | ||
3719 | if (pmd_trans_splitting(orig_pmd)) | ||
3720 | return 0; | ||
3721 | |||
3709 | if (pmd_numa(orig_pmd)) | 3722 | if (pmd_numa(orig_pmd)) |
3710 | return do_huge_pmd_numa_page(mm, vma, address, | 3723 | return do_huge_pmd_numa_page(mm, vma, address, |
3711 | orig_pmd, pmd); | 3724 | orig_pmd, pmd); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4ba5e37127fc..bc6cc0e913bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
221 | 221 | ||
222 | int page_group_by_mobility_disabled __read_mostly; | 222 | int page_group_by_mobility_disabled __read_mostly; |
223 | 223 | ||
224 | /* | ||
225 | * NOTE: | ||
226 | * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly. | ||
227 | * Instead, use {un}set_pageblock_isolate. | ||
228 | */ | ||
229 | void set_pageblock_migratetype(struct page *page, int migratetype) | 224 | void set_pageblock_migratetype(struct page *page, int migratetype) |
230 | { | 225 | { |
231 | 226 | ||
@@ -1655,20 +1650,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1655 | return true; | 1650 | return true; |
1656 | } | 1651 | } |
1657 | 1652 | ||
1658 | #ifdef CONFIG_MEMORY_ISOLATION | ||
1659 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1660 | { | ||
1661 | if (unlikely(zone->nr_pageblock_isolate)) | ||
1662 | return zone->nr_pageblock_isolate * pageblock_nr_pages; | ||
1663 | return 0; | ||
1664 | } | ||
1665 | #else | ||
1666 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1667 | { | ||
1668 | return 0; | ||
1669 | } | ||
1670 | #endif | ||
1671 | |||
1672 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1653 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
1673 | int classzone_idx, int alloc_flags) | 1654 | int classzone_idx, int alloc_flags) |
1674 | { | 1655 | { |
@@ -1684,14 +1665,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |||
1684 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) | 1665 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) |
1685 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); | 1666 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); |
1686 | 1667 | ||
1687 | /* | ||
1688 | * If the zone has MIGRATE_ISOLATE type free pages, we should consider | ||
1689 | * it. nr_zone_isolate_freepages is never accurate so kswapd might not | ||
1690 | * sleep although it could do so. But this is more desirable for memory | ||
1691 | * hotplug than sleeping which can cause a livelock in the direct | ||
1692 | * reclaim path. | ||
1693 | */ | ||
1694 | free_pages -= nr_zone_isolate_freepages(z); | ||
1695 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 1668 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
1696 | free_pages); | 1669 | free_pages); |
1697 | } | 1670 | } |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 9d2264ea4606..383bdbb98b04 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -8,28 +8,6 @@ | |||
8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include "internal.h" | 9 | #include "internal.h" |
10 | 10 | ||
11 | /* called while holding zone->lock */ | ||
12 | static void set_pageblock_isolate(struct page *page) | ||
13 | { | ||
14 | if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) | ||
15 | return; | ||
16 | |||
17 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | ||
18 | page_zone(page)->nr_pageblock_isolate++; | ||
19 | } | ||
20 | |||
21 | /* called while holding zone->lock */ | ||
22 | static void restore_pageblock_isolate(struct page *page, int migratetype) | ||
23 | { | ||
24 | struct zone *zone = page_zone(page); | ||
25 | if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) | ||
26 | return; | ||
27 | |||
28 | BUG_ON(zone->nr_pageblock_isolate <= 0); | ||
29 | set_pageblock_migratetype(page, migratetype); | ||
30 | zone->nr_pageblock_isolate--; | ||
31 | } | ||
32 | |||
33 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) | 11 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) |
34 | { | 12 | { |
35 | struct zone *zone; | 13 | struct zone *zone; |
@@ -80,7 +58,7 @@ out: | |||
80 | unsigned long nr_pages; | 58 | unsigned long nr_pages; |
81 | int migratetype = get_pageblock_migratetype(page); | 59 | int migratetype = get_pageblock_migratetype(page); |
82 | 60 | ||
83 | set_pageblock_isolate(page); | 61 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
84 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); | 62 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
85 | 63 | ||
86 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); | 64 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
@@ -103,7 +81,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
103 | goto out; | 81 | goto out; |
104 | nr_pages = move_freepages_block(zone, page, migratetype); | 82 | nr_pages = move_freepages_block(zone, page, migratetype); |
105 | __mod_zone_freepage_state(zone, nr_pages, migratetype); | 83 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
106 | restore_pageblock_isolate(page, migratetype); | 84 | set_pageblock_migratetype(page, migratetype); |
107 | out: | 85 | out: |
108 | spin_unlock_irqrestore(&zone->lock, flags); | 86 | spin_unlock_irqrestore(&zone->lock, flags); |
109 | } | 87 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 16b42af393ac..196709f5ee58 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -3122,8 +3122,8 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) | |||
3122 | not required for correctness. So if the last cpu in a node goes | 3122 | not required for correctness. So if the last cpu in a node goes |
3123 | away, we get changed to run anywhere: as the first one comes back, | 3123 | away, we get changed to run anywhere: as the first one comes back, |
3124 | restore their cpu bindings. */ | 3124 | restore their cpu bindings. */ |
3125 | static int __devinit cpu_callback(struct notifier_block *nfb, | 3125 | static int cpu_callback(struct notifier_block *nfb, unsigned long action, |
3126 | unsigned long action, void *hcpu) | 3126 | void *hcpu) |
3127 | { | 3127 | { |
3128 | int nid; | 3128 | int nid; |
3129 | 3129 | ||