diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-05-25 23:24:28 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-05-25 23:24:28 -0400 |
| commit | bc2dbc5420e82560e650f8531ceca597441ca171 (patch) | |
| tree | ac93448eaba1fe89fa007f9945f7e507b97dba8e | |
| parent | 03250e1028057173b212341015d5fbf53327042c (diff) | |
| parent | 3f1959721558a976aaf9c2024d5bc884e6411bf7 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"16 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
kasan: fix memory hotplug during boot
kasan: free allocated shadow memory on MEM_CANCEL_ONLINE
checkpatch: fix macro argument precedence test
init/main.c: include <linux/mem_encrypt.h>
kernel/sys.c: fix potential Spectre v1 issue
mm/memory_hotplug: fix leftover use of struct page during hotplug
proc: fix smaps and meminfo alignment
mm: do not warn on offline nodes unless the specific node is explicitly requested
mm, memory_hotplug: make has_unmovable_pages more robust
mm/kasan: don't vfree() nonexistent vm_area
MAINTAINERS: change hugetlbfs maintainer and update files
ipc/shm: fix shmat() nil address after round-down when remapping
Revert "ipc/shm: Fix shmat mmap nil-page protection"
idr: fix invalid ptr dereference on item delete
ocfs2: revert "ocfs2/o2hb: check len for bio_add_page() to avoid getting incorrect bio"
mm: fix nr_rotate_swap leak in swapon() error case
| -rw-r--r-- | MAINTAINERS | 8 | ||||
| -rw-r--r-- | drivers/base/node.c | 5 | ||||
| -rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 11 | ||||
| -rw-r--r-- | fs/seq_file.c | 5 | ||||
| -rw-r--r-- | include/linux/gfp.h | 2 | ||||
| -rw-r--r-- | include/linux/node.h | 8 | ||||
| -rw-r--r-- | init/main.c | 1 | ||||
| -rw-r--r-- | ipc/shm.c | 19 | ||||
| -rw-r--r-- | kernel/sys.c | 5 | ||||
| -rw-r--r-- | lib/radix-tree.c | 4 | ||||
| -rw-r--r-- | mm/kasan/kasan.c | 66 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 16 | ||||
| -rw-r--r-- | mm/swapfile.c | 7 | ||||
| -rwxr-xr-x | scripts/checkpatch.pl | 2 | ||||
| -rw-r--r-- | tools/testing/radix-tree/idr-test.c | 7 |
16 files changed, 125 insertions, 43 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 8f57c1e63d19..ca4afd68530c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -6503,9 +6503,15 @@ F: Documentation/networking/hinic.txt | |||
| 6503 | F: drivers/net/ethernet/huawei/hinic/ | 6503 | F: drivers/net/ethernet/huawei/hinic/ |
| 6504 | 6504 | ||
| 6505 | HUGETLB FILESYSTEM | 6505 | HUGETLB FILESYSTEM |
| 6506 | M: Nadia Yvette Chambers <nyc@holomorphy.com> | 6506 | M: Mike Kravetz <mike.kravetz@oracle.com> |
| 6507 | L: linux-mm@kvack.org | ||
| 6507 | S: Maintained | 6508 | S: Maintained |
| 6508 | F: fs/hugetlbfs/ | 6509 | F: fs/hugetlbfs/ |
| 6510 | F: mm/hugetlb.c | ||
| 6511 | F: include/linux/hugetlb.h | ||
| 6512 | F: Documentation/admin-guide/mm/hugetlbpage.rst | ||
| 6513 | F: Documentation/vm/hugetlbfs_reserv.rst | ||
| 6514 | F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages | ||
| 6509 | 6515 | ||
| 6510 | HVA ST MEDIA DRIVER | 6516 | HVA ST MEDIA DRIVER |
| 6511 | M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> | 6517 | M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 7a3a580821e0..a5e821d09656 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, | |||
| 490 | return 0; | 490 | return 0; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | 493 | int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages, |
| 494 | bool check_nid) | ||
| 494 | { | 495 | { |
| 495 | unsigned long end_pfn = start_pfn + nr_pages; | 496 | unsigned long end_pfn = start_pfn + nr_pages; |
| 496 | unsigned long pfn; | 497 | unsigned long pfn; |
| @@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | |||
| 514 | 515 | ||
| 515 | mem_blk = find_memory_block_hinted(mem_sect, mem_blk); | 516 | mem_blk = find_memory_block_hinted(mem_sect, mem_blk); |
| 516 | 517 | ||
| 517 | ret = register_mem_sect_under_node(mem_blk, nid, true); | 518 | ret = register_mem_sect_under_node(mem_blk, nid, check_nid); |
| 518 | if (!err) | 519 | if (!err) |
| 519 | err = ret; | 520 | err = ret; |
| 520 | 521 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 91a8889abf9b..ea8c551bcd7e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
| @@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |||
| 570 | current_page, vec_len, vec_start); | 570 | current_page, vec_len, vec_start); |
| 571 | 571 | ||
| 572 | len = bio_add_page(bio, page, vec_len, vec_start); | 572 | len = bio_add_page(bio, page, vec_len, vec_start); |
| 573 | if (len != vec_len) { | 573 | if (len != vec_len) break; |
| 574 | mlog(ML_ERROR, "Adding page[%d] to bio failed, " | ||
| 575 | "page %p, len %d, vec_len %u, vec_start %u, " | ||
| 576 | "bi_sector %llu\n", current_page, page, len, | ||
| 577 | vec_len, vec_start, | ||
| 578 | (unsigned long long)bio->bi_iter.bi_sector); | ||
| 579 | bio_put(bio); | ||
| 580 | bio = ERR_PTR(-EIO); | ||
| 581 | return bio; | ||
| 582 | } | ||
| 583 | 574 | ||
| 584 | cs += vec_len / (PAGE_SIZE/spp); | 575 | cs += vec_len / (PAGE_SIZE/spp); |
| 585 | vec_start = 0; | 576 | vec_start = 0; |
diff --git a/fs/seq_file.c b/fs/seq_file.c index c6c27f1f9c98..4cc090b50cc5 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
| @@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, | |||
| 709 | if (m->count + width >= m->size) | 709 | if (m->count + width >= m->size) |
| 710 | goto overflow; | 710 | goto overflow; |
| 711 | 711 | ||
| 712 | if (num < 10) { | ||
| 713 | m->buf[m->count++] = num + '0'; | ||
| 714 | return; | ||
| 715 | } | ||
| 716 | |||
| 717 | len = num_to_str(m->buf + m->count, m->size - m->count, num, width); | 712 | len = num_to_str(m->buf + m->count, m->size - m->count, num, width); |
| 718 | if (!len) | 713 | if (!len) |
| 719 | goto overflow; | 714 | goto overflow; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 1a4582b44d32..fc5ab85278d5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -464,7 +464,7 @@ static inline struct page * | |||
| 464 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | 464 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) |
| 465 | { | 465 | { |
| 466 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | 466 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 467 | VM_WARN_ON(!node_online(nid)); | 467 | VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); |
| 468 | 468 | ||
| 469 | return __alloc_pages(gfp_mask, order, nid); | 469 | return __alloc_pages(gfp_mask, order, nid); |
| 470 | } | 470 | } |
diff --git a/include/linux/node.h b/include/linux/node.h index 41f171861dcc..6d336e38d155 100644 --- a/include/linux/node.h +++ b/include/linux/node.h | |||
| @@ -32,9 +32,11 @@ extern struct node *node_devices[]; | |||
| 32 | typedef void (*node_registration_func_t)(struct node *); | 32 | typedef void (*node_registration_func_t)(struct node *); |
| 33 | 33 | ||
| 34 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) | 34 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) |
| 35 | extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages); | 35 | extern int link_mem_sections(int nid, unsigned long start_pfn, |
| 36 | unsigned long nr_pages, bool check_nid); | ||
| 36 | #else | 37 | #else |
| 37 | static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | 38 | static inline int link_mem_sections(int nid, unsigned long start_pfn, |
| 39 | unsigned long nr_pages, bool check_nid) | ||
| 38 | { | 40 | { |
| 39 | return 0; | 41 | return 0; |
| 40 | } | 42 | } |
| @@ -57,7 +59,7 @@ static inline int register_one_node(int nid) | |||
| 57 | if (error) | 59 | if (error) |
| 58 | return error; | 60 | return error; |
| 59 | /* link memory sections under this node */ | 61 | /* link memory sections under this node */ |
| 60 | error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages); | 62 | error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true); |
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | return error; | 65 | return error; |
diff --git a/init/main.c b/init/main.c index fd37315835b4..3b4ada11ed52 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -91,6 +91,7 @@ | |||
| 91 | #include <linux/cache.h> | 91 | #include <linux/cache.h> |
| 92 | #include <linux/rodata_test.h> | 92 | #include <linux/rodata_test.h> |
| 93 | #include <linux/jump_label.h> | 93 | #include <linux/jump_label.h> |
| 94 | #include <linux/mem_encrypt.h> | ||
| 94 | 95 | ||
| 95 | #include <asm/io.h> | 96 | #include <asm/io.h> |
| 96 | #include <asm/bugs.h> | 97 | #include <asm/bugs.h> |
| @@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, | |||
| 1363 | 1363 | ||
| 1364 | if (addr) { | 1364 | if (addr) { |
| 1365 | if (addr & (shmlba - 1)) { | 1365 | if (addr & (shmlba - 1)) { |
| 1366 | /* | 1366 | if (shmflg & SHM_RND) { |
| 1367 | * Round down to the nearest multiple of shmlba. | 1367 | addr &= ~(shmlba - 1); /* round down */ |
| 1368 | * For sane do_mmap_pgoff() parameters, avoid | 1368 | |
| 1369 | * round downs that trigger nil-page and MAP_FIXED. | 1369 | /* |
| 1370 | */ | 1370 | * Ensure that the round-down is non-nil |
| 1371 | if ((shmflg & SHM_RND) && addr >= shmlba) | 1371 | * when remapping. This can happen for |
| 1372 | addr &= ~(shmlba - 1); | 1372 | * cases when addr < shmlba. |
| 1373 | else | 1373 | */ |
| 1374 | if (!addr && (shmflg & SHM_REMAP)) | ||
| 1375 | goto out; | ||
| 1376 | } else | ||
| 1374 | #ifndef __ARCH_FORCE_SHMLBA | 1377 | #ifndef __ARCH_FORCE_SHMLBA |
| 1375 | if (addr & ~PAGE_MASK) | 1378 | if (addr & ~PAGE_MASK) |
| 1376 | #endif | 1379 | #endif |
diff --git a/kernel/sys.c b/kernel/sys.c index b0eee418ee0d..d1b2b8d934bb 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -71,6 +71,9 @@ | |||
| 71 | #include <asm/io.h> | 71 | #include <asm/io.h> |
| 72 | #include <asm/unistd.h> | 72 | #include <asm/unistd.h> |
| 73 | 73 | ||
| 74 | /* Hardening for Spectre-v1 */ | ||
| 75 | #include <linux/nospec.h> | ||
| 76 | |||
| 74 | #include "uid16.h" | 77 | #include "uid16.h" |
| 75 | 78 | ||
| 76 | #ifndef SET_UNALIGN_CTL | 79 | #ifndef SET_UNALIGN_CTL |
| @@ -1453,6 +1456,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | |||
| 1453 | if (resource >= RLIM_NLIMITS) | 1456 | if (resource >= RLIM_NLIMITS) |
| 1454 | return -EINVAL; | 1457 | return -EINVAL; |
| 1455 | 1458 | ||
| 1459 | resource = array_index_nospec(resource, RLIM_NLIMITS); | ||
| 1456 | task_lock(current->group_leader); | 1460 | task_lock(current->group_leader); |
| 1457 | x = current->signal->rlim[resource]; | 1461 | x = current->signal->rlim[resource]; |
| 1458 | task_unlock(current->group_leader); | 1462 | task_unlock(current->group_leader); |
| @@ -1472,6 +1476,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | |||
| 1472 | if (resource >= RLIM_NLIMITS) | 1476 | if (resource >= RLIM_NLIMITS) |
| 1473 | return -EINVAL; | 1477 | return -EINVAL; |
| 1474 | 1478 | ||
| 1479 | resource = array_index_nospec(resource, RLIM_NLIMITS); | ||
| 1475 | task_lock(current->group_leader); | 1480 | task_lock(current->group_leader); |
| 1476 | r = current->signal->rlim[resource]; | 1481 | r = current->signal->rlim[resource]; |
| 1477 | task_unlock(current->group_leader); | 1482 | task_unlock(current->group_leader); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 43e0cbedc3a0..a9e41aed6de4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root, | |||
| 2034 | unsigned long index, void *item) | 2034 | unsigned long index, void *item) |
| 2035 | { | 2035 | { |
| 2036 | struct radix_tree_node *node = NULL; | 2036 | struct radix_tree_node *node = NULL; |
| 2037 | void __rcu **slot; | 2037 | void __rcu **slot = NULL; |
| 2038 | void *entry; | 2038 | void *entry; |
| 2039 | 2039 | ||
| 2040 | entry = __radix_tree_lookup(root, index, &node, &slot); | 2040 | entry = __radix_tree_lookup(root, index, &node, &slot); |
| 2041 | if (!slot) | ||
| 2042 | return NULL; | ||
| 2041 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, | 2043 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
| 2042 | get_slot_offset(node, slot)))) | 2044 | get_slot_offset(node, slot)))) |
| 2043 | return NULL; | 2045 | return NULL; |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index bc0e68f7dc75..f185455b3406 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5); | |||
| 792 | DEFINE_ASAN_SET_SHADOW(f8); | 792 | DEFINE_ASAN_SET_SHADOW(f8); |
| 793 | 793 | ||
| 794 | #ifdef CONFIG_MEMORY_HOTPLUG | 794 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 795 | static bool shadow_mapped(unsigned long addr) | ||
| 796 | { | ||
| 797 | pgd_t *pgd = pgd_offset_k(addr); | ||
| 798 | p4d_t *p4d; | ||
| 799 | pud_t *pud; | ||
| 800 | pmd_t *pmd; | ||
| 801 | pte_t *pte; | ||
| 802 | |||
| 803 | if (pgd_none(*pgd)) | ||
| 804 | return false; | ||
| 805 | p4d = p4d_offset(pgd, addr); | ||
| 806 | if (p4d_none(*p4d)) | ||
| 807 | return false; | ||
| 808 | pud = pud_offset(p4d, addr); | ||
| 809 | if (pud_none(*pud)) | ||
| 810 | return false; | ||
| 811 | |||
| 812 | /* | ||
| 813 | * We can't use pud_large() or pud_huge(), the first one is | ||
| 814 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse | ||
| 815 | * pud_bad(), if pud is bad then it's bad because it's huge. | ||
| 816 | */ | ||
| 817 | if (pud_bad(*pud)) | ||
| 818 | return true; | ||
| 819 | pmd = pmd_offset(pud, addr); | ||
| 820 | if (pmd_none(*pmd)) | ||
| 821 | return false; | ||
| 822 | |||
| 823 | if (pmd_bad(*pmd)) | ||
| 824 | return true; | ||
| 825 | pte = pte_offset_kernel(pmd, addr); | ||
| 826 | return !pte_none(*pte); | ||
| 827 | } | ||
| 828 | |||
| 795 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | 829 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
| 796 | unsigned long action, void *data) | 830 | unsigned long action, void *data) |
| 797 | { | 831 | { |
| @@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |||
| 813 | case MEM_GOING_ONLINE: { | 847 | case MEM_GOING_ONLINE: { |
| 814 | void *ret; | 848 | void *ret; |
| 815 | 849 | ||
| 850 | /* | ||
| 851 | * If shadow is mapped already than it must have been mapped | ||
| 852 | * during the boot. This could happen if we onlining previously | ||
| 853 | * offlined memory. | ||
| 854 | */ | ||
| 855 | if (shadow_mapped(shadow_start)) | ||
| 856 | return NOTIFY_OK; | ||
| 857 | |||
| 816 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | 858 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, |
| 817 | shadow_end, GFP_KERNEL, | 859 | shadow_end, GFP_KERNEL, |
| 818 | PAGE_KERNEL, VM_NO_GUARD, | 860 | PAGE_KERNEL, VM_NO_GUARD, |
| @@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |||
| 824 | kmemleak_ignore(ret); | 866 | kmemleak_ignore(ret); |
| 825 | return NOTIFY_OK; | 867 | return NOTIFY_OK; |
| 826 | } | 868 | } |
| 827 | case MEM_OFFLINE: | 869 | case MEM_CANCEL_ONLINE: |
| 828 | vfree((void *)shadow_start); | 870 | case MEM_OFFLINE: { |
| 871 | struct vm_struct *vm; | ||
| 872 | |||
| 873 | /* | ||
| 874 | * shadow_start was either mapped during boot by kasan_init() | ||
| 875 | * or during memory online by __vmalloc_node_range(). | ||
| 876 | * In the latter case we can use vfree() to free shadow. | ||
| 877 | * Non-NULL result of the find_vm_area() will tell us if | ||
| 878 | * that was the second case. | ||
| 879 | * | ||
| 880 | * Currently it's not possible to free shadow mapped | ||
| 881 | * during boot by kasan_init(). It's because the code | ||
| 882 | * to do that hasn't been written yet. So we'll just | ||
| 883 | * leak the memory. | ||
| 884 | */ | ||
| 885 | vm = find_vm_area((void *)shadow_start); | ||
| 886 | if (vm) | ||
| 887 | vfree((void *)shadow_start); | ||
| 888 | } | ||
| 829 | } | 889 | } |
| 830 | 890 | ||
| 831 | return NOTIFY_OK; | 891 | return NOTIFY_OK; |
| @@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void) | |||
| 838 | return 0; | 898 | return 0; |
| 839 | } | 899 | } |
| 840 | 900 | ||
| 841 | module_init(kasan_memhotplug_init); | 901 | core_initcall(kasan_memhotplug_init); |
| 842 | #endif | 902 | #endif |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f74826cdceea..25982467800b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1158,7 +1158,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) | |||
| 1158 | * nodes have to go through register_node. | 1158 | * nodes have to go through register_node. |
| 1159 | * TODO clean up this mess. | 1159 | * TODO clean up this mess. |
| 1160 | */ | 1160 | */ |
| 1161 | ret = link_mem_sections(nid, start_pfn, nr_pages); | 1161 | ret = link_mem_sections(nid, start_pfn, nr_pages, false); |
| 1162 | register_fail: | 1162 | register_fail: |
| 1163 | /* | 1163 | /* |
| 1164 | * If sysfs file of new node can't create, cpu on the node | 1164 | * If sysfs file of new node can't create, cpu on the node |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 511a7124d7f9..22320ea27489 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -7598,11 +7598,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7598 | unsigned long pfn, iter, found; | 7598 | unsigned long pfn, iter, found; |
| 7599 | 7599 | ||
| 7600 | /* | 7600 | /* |
| 7601 | * For avoiding noise data, lru_add_drain_all() should be called | 7601 | * TODO we could make this much more efficient by not checking every |
| 7602 | * If ZONE_MOVABLE, the zone never contains unmovable pages | 7602 | * page in the range if we know all of them are in MOVABLE_ZONE and |
| 7603 | * that the movable zone guarantees that pages are migratable but | ||
| 7604 | * the later is not the case right now unfortunatelly. E.g. movablecore | ||
| 7605 | * can still lead to having bootmem allocations in zone_movable. | ||
| 7603 | */ | 7606 | */ |
| 7604 | if (zone_idx(zone) == ZONE_MOVABLE) | ||
| 7605 | return false; | ||
| 7606 | 7607 | ||
| 7607 | /* | 7608 | /* |
| 7608 | * CMA allocations (alloc_contig_range) really need to mark isolate | 7609 | * CMA allocations (alloc_contig_range) really need to mark isolate |
| @@ -7623,7 +7624,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7623 | page = pfn_to_page(check); | 7624 | page = pfn_to_page(check); |
| 7624 | 7625 | ||
| 7625 | if (PageReserved(page)) | 7626 | if (PageReserved(page)) |
| 7626 | return true; | 7627 | goto unmovable; |
| 7627 | 7628 | ||
| 7628 | /* | 7629 | /* |
| 7629 | * Hugepages are not in LRU lists, but they're movable. | 7630 | * Hugepages are not in LRU lists, but they're movable. |
| @@ -7673,9 +7674,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7673 | * page at boot. | 7674 | * page at boot. |
| 7674 | */ | 7675 | */ |
| 7675 | if (found > count) | 7676 | if (found > count) |
| 7676 | return true; | 7677 | goto unmovable; |
| 7677 | } | 7678 | } |
| 7678 | return false; | 7679 | return false; |
| 7680 | unmovable: | ||
| 7681 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); | ||
| 7682 | return true; | ||
| 7679 | } | 7683 | } |
| 7680 | 7684 | ||
| 7681 | bool is_pageblock_removable_nolock(struct page *page) | 7685 | bool is_pageblock_removable_nolock(struct page *page) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index cc2cf04d9018..78a015fcec3b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -3112,6 +3112,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 3112 | unsigned long *frontswap_map = NULL; | 3112 | unsigned long *frontswap_map = NULL; |
| 3113 | struct page *page = NULL; | 3113 | struct page *page = NULL; |
| 3114 | struct inode *inode = NULL; | 3114 | struct inode *inode = NULL; |
| 3115 | bool inced_nr_rotate_swap = false; | ||
| 3115 | 3116 | ||
| 3116 | if (swap_flags & ~SWAP_FLAGS_VALID) | 3117 | if (swap_flags & ~SWAP_FLAGS_VALID) |
| 3117 | return -EINVAL; | 3118 | return -EINVAL; |
| @@ -3215,8 +3216,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 3215 | cluster = per_cpu_ptr(p->percpu_cluster, cpu); | 3216 | cluster = per_cpu_ptr(p->percpu_cluster, cpu); |
| 3216 | cluster_set_null(&cluster->index); | 3217 | cluster_set_null(&cluster->index); |
| 3217 | } | 3218 | } |
| 3218 | } else | 3219 | } else { |
| 3219 | atomic_inc(&nr_rotate_swap); | 3220 | atomic_inc(&nr_rotate_swap); |
| 3221 | inced_nr_rotate_swap = true; | ||
| 3222 | } | ||
| 3220 | 3223 | ||
| 3221 | error = swap_cgroup_swapon(p->type, maxpages); | 3224 | error = swap_cgroup_swapon(p->type, maxpages); |
| 3222 | if (error) | 3225 | if (error) |
| @@ -3307,6 +3310,8 @@ bad_swap: | |||
| 3307 | vfree(swap_map); | 3310 | vfree(swap_map); |
| 3308 | kvfree(cluster_info); | 3311 | kvfree(cluster_info); |
| 3309 | kvfree(frontswap_map); | 3312 | kvfree(frontswap_map); |
| 3313 | if (inced_nr_rotate_swap) | ||
| 3314 | atomic_dec(&nr_rotate_swap); | ||
| 3310 | if (swap_file) { | 3315 | if (swap_file) { |
| 3311 | if (inode && S_ISREG(inode->i_mode)) { | 3316 | if (inode && S_ISREG(inode->i_mode)) { |
| 3312 | inode_unlock(inode); | 3317 | inode_unlock(inode); |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e16d6713f236..2d42eb9cd1a5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -5041,7 +5041,7 @@ sub process { | |||
| 5041 | $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; | 5041 | $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; |
| 5042 | $tmp_stmt =~ s/\#+\s*$arg\b//g; | 5042 | $tmp_stmt =~ s/\#+\s*$arg\b//g; |
| 5043 | $tmp_stmt =~ s/\b$arg\s*\#\#//g; | 5043 | $tmp_stmt =~ s/\b$arg\s*\#\#//g; |
| 5044 | my $use_cnt = $tmp_stmt =~ s/\b$arg\b//g; | 5044 | my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g; |
| 5045 | if ($use_cnt > 1) { | 5045 | if ($use_cnt > 1) { |
| 5046 | CHK("MACRO_ARG_REUSE", | 5046 | CHK("MACRO_ARG_REUSE", |
| 5047 | "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); | 5047 | "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); |
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 6c645eb77d42..ee820fcc29b0 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
| @@ -252,6 +252,13 @@ void idr_checks(void) | |||
| 252 | idr_remove(&idr, 3); | 252 | idr_remove(&idr, 3); |
| 253 | idr_remove(&idr, 0); | 253 | idr_remove(&idr, 0); |
| 254 | 254 | ||
| 255 | assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); | ||
| 256 | idr_remove(&idr, 1); | ||
| 257 | for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) | ||
| 258 | assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); | ||
| 259 | idr_remove(&idr, 1 << 30); | ||
| 260 | idr_destroy(&idr); | ||
| 261 | |||
| 255 | for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { | 262 | for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { |
| 256 | struct item *item = item_create(i, 0); | 263 | struct item *item = item_create(i, 0); |
| 257 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); | 264 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); |
