diff options
author | David S. Miller <davem@davemloft.net> | 2016-05-09 15:59:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-09 15:59:24 -0400 |
commit | e800072c18f0d7b89a80fa46dceb3d080c80e09c (patch) | |
tree | 8da6cb7944762a60ec37594720c1ad2757631c2f /mm | |
parent | e8ed77dfa90dd79c5343415a4bbbfdab9787b35a (diff) | |
parent | b507146bb6b9ac0c0197100ba3e299825a21fed3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
In netdevice.h we removed the structure in net-next that is being
changes in 'net'. In macsec.c and rtnetlink.c we have overlaps
between fixes in 'net' and the u64 attribute changes in 'net-next'.
The mlx5 conflicts have to do with vxlan support dependencies.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 14 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 11 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/zswap.c | 8 |
6 files changed, 20 insertions, 25 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index ccf97b02b85f..8fa254043801 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | |||
852 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, | 852 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
853 | ISOLATE_UNEVICTABLE); | 853 | ISOLATE_UNEVICTABLE); |
854 | 854 | ||
855 | /* | 855 | if (!pfn) |
856 | * In case of fatal failure, release everything that might | ||
857 | * have been isolated in the previous iteration, and signal | ||
858 | * the failure back to caller. | ||
859 | */ | ||
860 | if (!pfn) { | ||
861 | putback_movable_pages(&cc->migratepages); | ||
862 | cc->nr_migratepages = 0; | ||
863 | break; | 856 | break; |
864 | } | ||
865 | 857 | ||
866 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) | 858 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
867 | break; | 859 | break; |
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node) | |||
1741 | 1733 | ||
1742 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) | 1734 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) |
1743 | { | 1735 | { |
1744 | return pgdat->kcompactd_max_order > 0; | 1736 | return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); |
1745 | } | 1737 | } |
1746 | 1738 | ||
1747 | static bool kcompactd_node_suitable(pg_data_t *pgdat) | 1739 | static bool kcompactd_node_suitable(pg_data_t *pgdat) |
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) | |||
1805 | INIT_LIST_HEAD(&cc.freepages); | 1797 | INIT_LIST_HEAD(&cc.freepages); |
1806 | INIT_LIST_HEAD(&cc.migratepages); | 1798 | INIT_LIST_HEAD(&cc.migratepages); |
1807 | 1799 | ||
1800 | if (kthread_should_stop()) | ||
1801 | return; | ||
1808 | status = compact_zone(zone, &cc); | 1802 | status = compact_zone(zone, &cc); |
1809 | 1803 | ||
1810 | if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), | 1804 | if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index df67b53ae3c5..f7daa7de8f48 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -3452,7 +3452,7 @@ next: | |||
3452 | } | 3452 | } |
3453 | } | 3453 | } |
3454 | 3454 | ||
3455 | pr_info("%lu of %lu THP split", split, total); | 3455 | pr_info("%lu of %lu THP split\n", split, total); |
3456 | 3456 | ||
3457 | return 0; | 3457 | return 0; |
3458 | } | 3458 | } |
@@ -3463,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void) | |||
3463 | { | 3463 | { |
3464 | void *ret; | 3464 | void *ret; |
3465 | 3465 | ||
3466 | ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, | 3466 | ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
3467 | &split_huge_pages_fops); | 3467 | &split_huge_pages_fops); |
3468 | if (!ret) | 3468 | if (!ret) |
3469 | pr_warn("Failed to create split_huge_pages in debugfs"); | 3469 | pr_warn("Failed to create split_huge_pages in debugfs"); |
diff --git a/mm/memory.c b/mm/memory.c index 305537fc8640..52c218e2b724 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1222,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1222 | next = pmd_addr_end(addr, end); | 1222 | next = pmd_addr_end(addr, end); |
1223 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { | 1223 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
1224 | if (next - addr != HPAGE_PMD_SIZE) { | 1224 | if (next - addr != HPAGE_PMD_SIZE) { |
1225 | #ifdef CONFIG_DEBUG_VM | 1225 | VM_BUG_ON_VMA(vma_is_anonymous(vma) && |
1226 | if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { | 1226 | !rwsem_is_locked(&tlb->mm->mmap_sem), vma); |
1227 | pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", | ||
1228 | __func__, addr, end, | ||
1229 | vma->vm_start, | ||
1230 | vma->vm_end); | ||
1231 | BUG(); | ||
1232 | } | ||
1233 | #endif | ||
1234 | split_huge_pmd(vma, pmd, addr); | 1227 | split_huge_pmd(vma, pmd, addr); |
1235 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1228 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1236 | goto next; | 1229 | goto next; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 999792d35ccc..bc5149d5ec38 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) | |||
1910 | if (gdtc->dirty > gdtc->bg_thresh) | 1910 | if (gdtc->dirty > gdtc->bg_thresh) |
1911 | return true; | 1911 | return true; |
1912 | 1912 | ||
1913 | if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) | 1913 | if (wb_stat(wb, WB_RECLAIMABLE) > |
1914 | wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) | ||
1914 | return true; | 1915 | return true; |
1915 | 1916 | ||
1916 | if (mdtc) { | 1917 | if (mdtc) { |
@@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) | |||
1924 | if (mdtc->dirty > mdtc->bg_thresh) | 1925 | if (mdtc->dirty > mdtc->bg_thresh) |
1925 | return true; | 1926 | return true; |
1926 | 1927 | ||
1927 | if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) | 1928 | if (wb_stat(wb, WB_RECLAIMABLE) > |
1929 | wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) | ||
1928 | return true; | 1930 | return true; |
1929 | } | 1931 | } |
1930 | 1932 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59de90d5d3a3..c1069efcc4d7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void) | |||
6485 | setup_per_zone_inactive_ratio(); | 6485 | setup_per_zone_inactive_ratio(); |
6486 | return 0; | 6486 | return 0; |
6487 | } | 6487 | } |
6488 | module_init(init_per_zone_wmark_min) | 6488 | core_initcall(init_per_zone_wmark_min) |
6489 | 6489 | ||
6490 | /* | 6490 | /* |
6491 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so | 6491 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so |
diff --git a/mm/zswap.c b/mm/zswap.c index 91dad80d068b..de0f119b1780 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; | |||
170 | static LIST_HEAD(zswap_pools); | 170 | static LIST_HEAD(zswap_pools); |
171 | /* protects zswap_pools list modification */ | 171 | /* protects zswap_pools list modification */ |
172 | static DEFINE_SPINLOCK(zswap_pools_lock); | 172 | static DEFINE_SPINLOCK(zswap_pools_lock); |
173 | /* pool counter to provide unique names to zpool */ | ||
174 | static atomic_t zswap_pools_count = ATOMIC_INIT(0); | ||
173 | 175 | ||
174 | /* used by param callback function */ | 176 | /* used by param callback function */ |
175 | static bool zswap_init_started; | 177 | static bool zswap_init_started; |
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |||
565 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) | 567 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) |
566 | { | 568 | { |
567 | struct zswap_pool *pool; | 569 | struct zswap_pool *pool; |
570 | char name[38]; /* 'zswap' + 32 char (max) num + \0 */ | ||
568 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | 571 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; |
569 | 572 | ||
570 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | 573 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) | |||
573 | return NULL; | 576 | return NULL; |
574 | } | 577 | } |
575 | 578 | ||
576 | pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); | 579 | /* unique name for each pool specifically required by zsmalloc */ |
580 | snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); | ||
581 | |||
582 | pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); | ||
577 | if (!pool->zpool) { | 583 | if (!pool->zpool) { |
578 | pr_err("%s zpool not available\n", type); | 584 | pr_err("%s zpool not available\n", type); |
579 | goto error; | 585 | goto error; |