diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-15 03:00:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-15 03:00:18 -0400 |
commit | fa3a9f5744a92c0d7856d4e326c8d920d1d31116 (patch) | |
tree | e04ed516a5ff438d5740a97a16fa945cfbd44318 | |
parent | 0a9cb4815b91378bc7e8a7cda781ee50325bdd54 (diff) | |
parent | 9babed6a66b5577628d9e76e5a6cb6104d7ddd4c (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"20 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
m32r: fix build warning about putc
mm: workingset: printk missing log level, use pr_info()
mm: thp: refix false positive BUG in page_move_anon_rmap()
mm: rmap: call page_check_address() with sync enabled to avoid racy check
mm: thp: move pmd check inside ptl for freeze_page()
vmlinux.lds: account for destructor sections
gcov: add support for gcc version >= 6
mm, meminit: ensure node is online before checking whether pages are uninitialised
mm, meminit: always return a valid node from early_pfn_to_nid
kasan/quarantine: fix bugs on qlist_move_cache()
uapi: export lirc.h header
madvise_free, thp: fix madvise_free_huge_pmd return value after splitting
Revert "scripts/gdb: add documentation example for radix tree"
Revert "scripts/gdb: add a Radix Tree Parser"
scripts/gdb: Perform path expansion to lx-symbol's arguments
scripts/gdb: add constants.py to .gitignore
scripts/gdb: rebuild constants.py on dependancy change
scripts/gdb: silence 'nothing to do' message
kasan: add newline to messages
mm, compaction: prevent VM_BUG_ON when terminating freeing scanner
-rw-r--r-- | Documentation/gdb-kernel-debugging.txt | 21 | ||||
-rw-r--r-- | arch/m32r/boot/compressed/m32r_sio.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 4 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 4 | ||||
-rw-r--r-- | include/linux/huge_mm.h | 4 | ||||
-rw-r--r-- | include/linux/rmap.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/Kbuild | 1 | ||||
-rw-r--r-- | kernel/gcov/gcc_4_7.c | 2 | ||||
-rw-r--r-- | mm/compaction.c | 36 | ||||
-rw-r--r-- | mm/huge_memory.c | 38 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/kasan/quarantine.c | 29 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 12 | ||||
-rw-r--r-- | mm/workingset.c | 2 | ||||
-rw-r--r-- | scripts/gdb/linux/.gitignore | 1 | ||||
-rw-r--r-- | scripts/gdb/linux/Makefile | 6 | ||||
-rw-r--r-- | scripts/gdb/linux/constants.py.in | 7 | ||||
-rw-r--r-- | scripts/gdb/linux/radixtree.py | 97 | ||||
-rw-r--r-- | scripts/gdb/linux/symbols.py | 2 | ||||
-rw-r--r-- | scripts/gdb/vmlinux-gdb.py | 1 |
22 files changed, 72 insertions, 217 deletions
diff --git a/Documentation/gdb-kernel-debugging.txt b/Documentation/gdb-kernel-debugging.txt index 4ab7d43d0754..7050ce8794b9 100644 --- a/Documentation/gdb-kernel-debugging.txt +++ b/Documentation/gdb-kernel-debugging.txt | |||
@@ -139,27 +139,6 @@ Examples of using the Linux-provided gdb helpers | |||
139 | start_comm = "swapper/2\000\000\000\000\000\000" | 139 | start_comm = "swapper/2\000\000\000\000\000\000" |
140 | } | 140 | } |
141 | 141 | ||
142 | o Dig into a radix tree data structure, such as the IRQ descriptors: | ||
143 | (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18) | ||
144 | $6 = { | ||
145 | irq_common_data = { | ||
146 | state_use_accessors = 67584, | ||
147 | handler_data = 0x0 <__vectors_start>, | ||
148 | msi_desc = 0x0 <__vectors_start>, | ||
149 | affinity = {{ | ||
150 | bits = {65535} | ||
151 | }} | ||
152 | }, | ||
153 | irq_data = { | ||
154 | mask = 0, | ||
155 | irq = 18, | ||
156 | hwirq = 27, | ||
157 | common = 0xee803d80, | ||
158 | chip = 0xc0eb0854 <gic_data>, | ||
159 | domain = 0xee808000, | ||
160 | parent_data = 0x0 <__vectors_start>, | ||
161 | chip_data = 0xc0eb0854 <gic_data> | ||
162 | } <... trimmed ...> | ||
163 | 142 | ||
164 | List of commands and functions | 143 | List of commands and functions |
165 | ------------------------------ | 144 | ------------------------------ |
diff --git a/arch/m32r/boot/compressed/m32r_sio.c b/arch/m32r/boot/compressed/m32r_sio.c index 01d877c6868f..cf3023dced49 100644 --- a/arch/m32r/boot/compressed/m32r_sio.c +++ b/arch/m32r/boot/compressed/m32r_sio.c | |||
@@ -8,12 +8,13 @@ | |||
8 | 8 | ||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | 10 | ||
11 | static void putc(char c); | 11 | static void m32r_putc(char c); |
12 | 12 | ||
13 | static int puts(const char *s) | 13 | static int puts(const char *s) |
14 | { | 14 | { |
15 | char c; | 15 | char c; |
16 | while ((c = *s++)) putc(c); | 16 | while ((c = *s++)) |
17 | m32r_putc(c); | ||
17 | return 0; | 18 | return 0; |
18 | } | 19 | } |
19 | 20 | ||
@@ -41,7 +42,7 @@ static int puts(const char *s) | |||
41 | #define BOOT_SIO0TXB PLD_ESIO0TXB | 42 | #define BOOT_SIO0TXB PLD_ESIO0TXB |
42 | #endif | 43 | #endif |
43 | 44 | ||
44 | static void putc(char c) | 45 | static void m32r_putc(char c) |
45 | { | 46 | { |
46 | while ((*BOOT_SIO0STS & 0x3) != 0x3) | 47 | while ((*BOOT_SIO0STS & 0x3) != 0x3) |
47 | cpu_relax(); | 48 | cpu_relax(); |
@@ -61,7 +62,7 @@ static void putc(char c) | |||
61 | #define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30) | 62 | #define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30) |
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | static void putc(char c) | 65 | static void m32r_putc(char c) |
65 | { | 66 | { |
66 | while ((*SIO0STS & 0x1) == 0) | 67 | while ((*SIO0STS & 0x1) == 0) |
67 | cpu_relax(); | 68 | cpu_relax(); |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 1b1110fa0057..0493c17b8a51 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self, | |||
54 | void *data) | 54 | void *data) |
55 | { | 55 | { |
56 | if (val == DIE_GPF) { | 56 | if (val == DIE_GPF) { |
57 | pr_emerg("CONFIG_KASAN_INLINE enabled"); | 57 | pr_emerg("CONFIG_KASAN_INLINE enabled\n"); |
58 | pr_emerg("GPF could be caused by NULL-ptr deref or user memory access"); | 58 | pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n"); |
59 | } | 59 | } |
60 | return NOTIFY_OK; | 60 | return NOTIFY_OK; |
61 | } | 61 | } |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 6a67ab94b553..081d0f258d4c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -542,15 +542,19 @@ | |||
542 | 542 | ||
543 | #define INIT_TEXT \ | 543 | #define INIT_TEXT \ |
544 | *(.init.text) \ | 544 | *(.init.text) \ |
545 | *(.text.startup) \ | ||
545 | MEM_DISCARD(init.text) | 546 | MEM_DISCARD(init.text) |
546 | 547 | ||
547 | #define EXIT_DATA \ | 548 | #define EXIT_DATA \ |
548 | *(.exit.data) \ | 549 | *(.exit.data) \ |
550 | *(.fini_array) \ | ||
551 | *(.dtors) \ | ||
549 | MEM_DISCARD(exit.data) \ | 552 | MEM_DISCARD(exit.data) \ |
550 | MEM_DISCARD(exit.rodata) | 553 | MEM_DISCARD(exit.rodata) |
551 | 554 | ||
552 | #define EXIT_TEXT \ | 555 | #define EXIT_TEXT \ |
553 | *(.exit.text) \ | 556 | *(.exit.text) \ |
557 | *(.text.exit) \ | ||
554 | MEM_DISCARD(exit.text) | 558 | MEM_DISCARD(exit.text) |
555 | 559 | ||
556 | #define EXIT_CALL \ | 560 | #define EXIT_CALL \ |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 419fb9e03447..f0a7a0320300 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page) | |||
94 | void deferred_split_huge_page(struct page *page); | 94 | void deferred_split_huge_page(struct page *page); |
95 | 95 | ||
96 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 96 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
97 | unsigned long address, bool freeze); | 97 | unsigned long address, bool freeze, struct page *page); |
98 | 98 | ||
99 | #define split_huge_pmd(__vma, __pmd, __address) \ | 99 | #define split_huge_pmd(__vma, __pmd, __address) \ |
100 | do { \ | 100 | do { \ |
@@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
102 | if (pmd_trans_huge(*____pmd) \ | 102 | if (pmd_trans_huge(*____pmd) \ |
103 | || pmd_devmap(*____pmd)) \ | 103 | || pmd_devmap(*____pmd)) \ |
104 | __split_huge_pmd(__vma, __pmd, __address, \ | 104 | __split_huge_pmd(__vma, __pmd, __address, \ |
105 | false); \ | 105 | false, NULL); \ |
106 | } while (0) | 106 | } while (0) |
107 | 107 | ||
108 | 108 | ||
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 49eb4f8ebac9..2b0fad83683f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page); | |||
158 | /* | 158 | /* |
159 | * rmap interfaces called when adding or removing pte of page | 159 | * rmap interfaces called when adding or removing pte of page |
160 | */ | 160 | */ |
161 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 161 | void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
162 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, | 162 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, |
163 | unsigned long, bool); | 163 | unsigned long, bool); |
164 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | 164 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 8bdae34d1f9a..ec10cfef166a 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -245,6 +245,7 @@ endif | |||
245 | header-y += hw_breakpoint.h | 245 | header-y += hw_breakpoint.h |
246 | header-y += l2tp.h | 246 | header-y += l2tp.h |
247 | header-y += libc-compat.h | 247 | header-y += libc-compat.h |
248 | header-y += lirc.h | ||
248 | header-y += limits.h | 249 | header-y += limits.h |
249 | header-y += llc.h | 250 | header-y += llc.h |
250 | header-y += loop.h | 251 | header-y += loop.h |
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index e25e92fb44fa..6a5c239c7669 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include "gcov.h" | 19 | #include "gcov.h" |
20 | 20 | ||
21 | #if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 | 21 | #if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) |
22 | #define GCOV_COUNTERS 10 | 22 | #define GCOV_COUNTERS 10 |
23 | #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 | 23 | #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 |
24 | #define GCOV_COUNTERS 9 | 24 | #define GCOV_COUNTERS 9 |
diff --git a/mm/compaction.c b/mm/compaction.c index 79bfe0e06907..7bc04778f84d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1009,8 +1009,6 @@ static void isolate_freepages(struct compact_control *cc) | |||
1009 | block_end_pfn = block_start_pfn, | 1009 | block_end_pfn = block_start_pfn, |
1010 | block_start_pfn -= pageblock_nr_pages, | 1010 | block_start_pfn -= pageblock_nr_pages, |
1011 | isolate_start_pfn = block_start_pfn) { | 1011 | isolate_start_pfn = block_start_pfn) { |
1012 | unsigned long isolated; | ||
1013 | |||
1014 | /* | 1012 | /* |
1015 | * This can iterate a massively long zone without finding any | 1013 | * This can iterate a massively long zone without finding any |
1016 | * suitable migration targets, so periodically check if we need | 1014 | * suitable migration targets, so periodically check if we need |
@@ -1034,36 +1032,30 @@ static void isolate_freepages(struct compact_control *cc) | |||
1034 | continue; | 1032 | continue; |
1035 | 1033 | ||
1036 | /* Found a block suitable for isolating free pages from. */ | 1034 | /* Found a block suitable for isolating free pages from. */ |
1037 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, | 1035 | isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, |
1038 | block_end_pfn, freelist, false); | 1036 | freelist, false); |
1039 | /* If isolation failed early, do not continue needlessly */ | ||
1040 | if (!isolated && isolate_start_pfn < block_end_pfn && | ||
1041 | cc->nr_migratepages > cc->nr_freepages) | ||
1042 | break; | ||
1043 | 1037 | ||
1044 | /* | 1038 | /* |
1045 | * If we isolated enough freepages, or aborted due to async | 1039 | * If we isolated enough freepages, or aborted due to lock |
1046 | * compaction being contended, terminate the loop. | 1040 | * contention, terminate. |
1047 | * Remember where the free scanner should restart next time, | ||
1048 | * which is where isolate_freepages_block() left off. | ||
1049 | * But if it scanned the whole pageblock, isolate_start_pfn | ||
1050 | * now points at block_end_pfn, which is the start of the next | ||
1051 | * pageblock. | ||
1052 | * In that case we will however want to restart at the start | ||
1053 | * of the previous pageblock. | ||
1054 | */ | 1041 | */ |
1055 | if ((cc->nr_freepages >= cc->nr_migratepages) | 1042 | if ((cc->nr_freepages >= cc->nr_migratepages) |
1056 | || cc->contended) { | 1043 | || cc->contended) { |
1057 | if (isolate_start_pfn >= block_end_pfn) | 1044 | if (isolate_start_pfn >= block_end_pfn) { |
1045 | /* | ||
1046 | * Restart at previous pageblock if more | ||
1047 | * freepages can be isolated next time. | ||
1048 | */ | ||
1058 | isolate_start_pfn = | 1049 | isolate_start_pfn = |
1059 | block_start_pfn - pageblock_nr_pages; | 1050 | block_start_pfn - pageblock_nr_pages; |
1051 | } | ||
1060 | break; | 1052 | break; |
1061 | } else { | 1053 | } else if (isolate_start_pfn < block_end_pfn) { |
1062 | /* | 1054 | /* |
1063 | * isolate_freepages_block() should not terminate | 1055 | * If isolation failed early, do not continue |
1064 | * prematurely unless contended, or isolated enough | 1056 | * needlessly. |
1065 | */ | 1057 | */ |
1066 | VM_BUG_ON(isolate_start_pfn < block_end_pfn); | 1058 | break; |
1067 | } | 1059 | } |
1068 | } | 1060 | } |
1069 | 1061 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9ed58530f695..343a2b7e57aa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1624 | if (next - addr != HPAGE_PMD_SIZE) { | 1624 | if (next - addr != HPAGE_PMD_SIZE) { |
1625 | get_page(page); | 1625 | get_page(page); |
1626 | spin_unlock(ptl); | 1626 | spin_unlock(ptl); |
1627 | if (split_huge_page(page)) { | 1627 | split_huge_page(page); |
1628 | put_page(page); | ||
1629 | unlock_page(page); | ||
1630 | goto out_unlocked; | ||
1631 | } | ||
1632 | put_page(page); | 1628 | put_page(page); |
1633 | unlock_page(page); | 1629 | unlock_page(page); |
1634 | ret = 1; | ||
1635 | goto out_unlocked; | 1630 | goto out_unlocked; |
1636 | } | 1631 | } |
1637 | 1632 | ||
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | |||
2989 | } | 2984 | } |
2990 | 2985 | ||
2991 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 2986 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
2992 | unsigned long address, bool freeze) | 2987 | unsigned long address, bool freeze, struct page *page) |
2993 | { | 2988 | { |
2994 | spinlock_t *ptl; | 2989 | spinlock_t *ptl; |
2995 | struct mm_struct *mm = vma->vm_mm; | 2990 | struct mm_struct *mm = vma->vm_mm; |
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
2997 | 2992 | ||
2998 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); | 2993 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
2999 | ptl = pmd_lock(mm, pmd); | 2994 | ptl = pmd_lock(mm, pmd); |
2995 | |||
2996 | /* | ||
2997 | * If caller asks to setup a migration entries, we need a page to check | ||
2998 | * pmd against. Otherwise we can end up replacing wrong page. | ||
2999 | */ | ||
3000 | VM_BUG_ON(freeze && !page); | ||
3001 | if (page && page != pmd_page(*pmd)) | ||
3002 | goto out; | ||
3003 | |||
3000 | if (pmd_trans_huge(*pmd)) { | 3004 | if (pmd_trans_huge(*pmd)) { |
3001 | struct page *page = pmd_page(*pmd); | 3005 | page = pmd_page(*pmd); |
3002 | if (PageMlocked(page)) | 3006 | if (PageMlocked(page)) |
3003 | clear_page_mlock(page); | 3007 | clear_page_mlock(page); |
3004 | } else if (!pmd_devmap(*pmd)) | 3008 | } else if (!pmd_devmap(*pmd)) |
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | |||
3025 | return; | 3029 | return; |
3026 | 3030 | ||
3027 | pmd = pmd_offset(pud, address); | 3031 | pmd = pmd_offset(pud, address); |
3028 | if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd))) | ||
3029 | return; | ||
3030 | 3032 | ||
3031 | /* | 3033 | __split_huge_pmd(vma, pmd, address, freeze, page); |
3032 | * If caller asks to setup a migration entries, we need a page to check | ||
3033 | * pmd against. Otherwise we can end up replacing wrong page. | ||
3034 | */ | ||
3035 | VM_BUG_ON(freeze && !page); | ||
3036 | if (page && page != pmd_page(*pmd)) | ||
3037 | return; | ||
3038 | |||
3039 | /* | ||
3040 | * Caller holds the mmap_sem write mode or the anon_vma lock, | ||
3041 | * so a huge pmd cannot materialize from under us (khugepaged | ||
3042 | * holds both the mmap_sem write mode and the anon_vma lock | ||
3043 | * write mode). | ||
3044 | */ | ||
3045 | __split_huge_pmd(vma, pmd, address, freeze); | ||
3046 | } | 3034 | } |
3047 | 3035 | ||
3048 | void vma_adjust_trans_huge(struct vm_area_struct *vma, | 3036 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c1f3c0be150a..addfe4accc07 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3383,7 +3383,7 @@ retry_avoidcopy: | |||
3383 | /* If no-one else is actually using this page, avoid the copy | 3383 | /* If no-one else is actually using this page, avoid the copy |
3384 | * and just make the page writable */ | 3384 | * and just make the page writable */ |
3385 | if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { | 3385 | if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { |
3386 | page_move_anon_rmap(old_page, vma, address); | 3386 | page_move_anon_rmap(old_page, vma); |
3387 | set_huge_ptep_writable(vma, address, ptep); | 3387 | set_huge_ptep_writable(vma, address, ptep); |
3388 | return 0; | 3388 | return 0; |
3389 | } | 3389 | } |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 4973505a9bdd..65793f150d1f 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from, | |||
238 | struct qlist_head *to, | 238 | struct qlist_head *to, |
239 | struct kmem_cache *cache) | 239 | struct kmem_cache *cache) |
240 | { | 240 | { |
241 | struct qlist_node *prev = NULL, *curr; | 241 | struct qlist_node *curr; |
242 | 242 | ||
243 | if (unlikely(qlist_empty(from))) | 243 | if (unlikely(qlist_empty(from))) |
244 | return; | 244 | return; |
245 | 245 | ||
246 | curr = from->head; | 246 | curr = from->head; |
247 | qlist_init(from); | ||
247 | while (curr) { | 248 | while (curr) { |
248 | struct qlist_node *qlink = curr; | 249 | struct qlist_node *next = curr->next; |
249 | struct kmem_cache *obj_cache = qlink_to_cache(qlink); | 250 | struct kmem_cache *obj_cache = qlink_to_cache(curr); |
250 | 251 | ||
251 | if (obj_cache == cache) { | 252 | if (obj_cache == cache) |
252 | if (unlikely(from->head == qlink)) { | 253 | qlist_put(to, curr, obj_cache->size); |
253 | from->head = curr->next; | 254 | else |
254 | prev = curr; | 255 | qlist_put(from, curr, obj_cache->size); |
255 | } else | 256 | |
256 | prev->next = curr->next; | 257 | curr = next; |
257 | if (unlikely(from->tail == qlink)) | ||
258 | from->tail = curr->next; | ||
259 | from->bytes -= cache->size; | ||
260 | qlist_put(to, qlink, cache->size); | ||
261 | } else { | ||
262 | prev = curr; | ||
263 | } | ||
264 | curr = curr->next; | ||
265 | } | 258 | } |
266 | } | 259 | } |
267 | 260 | ||
diff --git a/mm/memory.c b/mm/memory.c index cd1f29e4897e..9e046819e619 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2399 | * Protected against the rmap code by | 2399 | * Protected against the rmap code by |
2400 | * the page lock. | 2400 | * the page lock. |
2401 | */ | 2401 | */ |
2402 | page_move_anon_rmap(compound_head(old_page), | 2402 | page_move_anon_rmap(old_page, vma); |
2403 | vma, address); | ||
2404 | } | 2403 | } |
2405 | unlock_page(old_page); | 2404 | unlock_page(old_page); |
2406 | return wp_page_reuse(mm, vma, address, page_table, ptl, | 2405 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6903b695ebae..8b3e1341b754 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat) | |||
286 | /* Returns true if the struct page for the pfn is uninitialised */ | 286 | /* Returns true if the struct page for the pfn is uninitialised */ |
287 | static inline bool __meminit early_page_uninitialised(unsigned long pfn) | 287 | static inline bool __meminit early_page_uninitialised(unsigned long pfn) |
288 | { | 288 | { |
289 | if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn) | 289 | int nid = early_pfn_to_nid(pfn); |
290 | |||
291 | if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) | ||
290 | return true; | 292 | return true; |
291 | 293 | ||
292 | return false; | 294 | return false; |
@@ -1273,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn) | |||
1273 | spin_lock(&early_pfn_lock); | 1275 | spin_lock(&early_pfn_lock); |
1274 | nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); | 1276 | nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); |
1275 | if (nid < 0) | 1277 | if (nid < 0) |
1276 | nid = 0; | 1278 | nid = first_online_node; |
1277 | spin_unlock(&early_pfn_lock); | 1279 | spin_unlock(&early_pfn_lock); |
1278 | 1280 | ||
1279 | return nid; | 1281 | return nid; |
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean); | |||
1084 | * page_move_anon_rmap - move a page to our anon_vma | 1084 | * page_move_anon_rmap - move a page to our anon_vma |
1085 | * @page: the page to move to our anon_vma | 1085 | * @page: the page to move to our anon_vma |
1086 | * @vma: the vma the page belongs to | 1086 | * @vma: the vma the page belongs to |
1087 | * @address: the user virtual address mapped | ||
1088 | * | 1087 | * |
1089 | * When a page belongs exclusively to one process after a COW event, | 1088 | * When a page belongs exclusively to one process after a COW event, |
1090 | * that page can be moved into the anon_vma that belongs to just that | 1089 | * that page can be moved into the anon_vma that belongs to just that |
1091 | * process, so the rmap code will not search the parent or sibling | 1090 | * process, so the rmap code will not search the parent or sibling |
1092 | * processes. | 1091 | * processes. |
1093 | */ | 1092 | */ |
1094 | void page_move_anon_rmap(struct page *page, | 1093 | void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) |
1095 | struct vm_area_struct *vma, unsigned long address) | ||
1096 | { | 1094 | { |
1097 | struct anon_vma *anon_vma = vma->anon_vma; | 1095 | struct anon_vma *anon_vma = vma->anon_vma; |
1098 | 1096 | ||
1097 | page = compound_head(page); | ||
1098 | |||
1099 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 1099 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1100 | VM_BUG_ON_VMA(!anon_vma, vma); | 1100 | VM_BUG_ON_VMA(!anon_vma, vma); |
1101 | if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page)) | ||
1102 | address &= HPAGE_PMD_MASK; | ||
1103 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); | ||
1104 | 1101 | ||
1105 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 1102 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
1106 | /* | 1103 | /* |
@@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1427 | goto out; | 1424 | goto out; |
1428 | } | 1425 | } |
1429 | 1426 | ||
1430 | pte = page_check_address(page, mm, address, &ptl, 0); | 1427 | pte = page_check_address(page, mm, address, &ptl, |
1428 | PageTransCompound(page)); | ||
1431 | if (!pte) | 1429 | if (!pte) |
1432 | goto out; | 1430 | goto out; |
1433 | 1431 | ||
diff --git a/mm/workingset.c b/mm/workingset.c index 8a75f8d2916a..577277546d98 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -491,7 +491,7 @@ static int __init workingset_init(void) | |||
491 | max_order = fls_long(totalram_pages - 1); | 491 | max_order = fls_long(totalram_pages - 1); |
492 | if (max_order > timestamp_bits) | 492 | if (max_order > timestamp_bits) |
493 | bucket_order = max_order - timestamp_bits; | 493 | bucket_order = max_order - timestamp_bits; |
494 | printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", | 494 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
495 | timestamp_bits, max_order, bucket_order); | 495 | timestamp_bits, max_order, bucket_order); |
496 | 496 | ||
497 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); | 497 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); |
diff --git a/scripts/gdb/linux/.gitignore b/scripts/gdb/linux/.gitignore index 52e4e61140d1..2573543842d0 100644 --- a/scripts/gdb/linux/.gitignore +++ b/scripts/gdb/linux/.gitignore | |||
@@ -1,2 +1,3 @@ | |||
1 | *.pyc | 1 | *.pyc |
2 | *.pyo | 2 | *.pyo |
3 | constants.py | ||
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile index cd129e65d1ff..8b00031f5349 100644 --- a/scripts/gdb/linux/Makefile +++ b/scripts/gdb/linux/Makefile | |||
@@ -13,9 +13,11 @@ quiet_cmd_gen_constants_py = GEN $@ | |||
13 | $(CPP) -E -x c -P $(c_flags) $< > $@ ;\ | 13 | $(CPP) -E -x c -P $(c_flags) $< > $@ ;\ |
14 | sed -i '1,/<!-- end-c-headers -->/d;' $@ | 14 | sed -i '1,/<!-- end-c-headers -->/d;' $@ |
15 | 15 | ||
16 | $(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in | 16 | targets += constants.py |
17 | $(call if_changed,gen_constants_py) | 17 | $(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE |
18 | $(call if_changed_dep,gen_constants_py) | ||
18 | 19 | ||
19 | build_constants_py: $(obj)/constants.py | 20 | build_constants_py: $(obj)/constants.py |
21 | @: | ||
20 | 22 | ||
21 | clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py | 23 | clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py |
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index 07e6c2befe36..7986f4e0da12 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/mount.h> | 16 | #include <linux/mount.h> |
17 | #include <linux/radix-tree.h> | ||
18 | 17 | ||
19 | /* We need to stringify expanded macros so that they can be parsed */ | 18 | /* We need to stringify expanded macros so that they can be parsed */ |
20 | 19 | ||
@@ -51,9 +50,3 @@ LX_VALUE(MNT_NOEXEC) | |||
51 | LX_VALUE(MNT_NOATIME) | 50 | LX_VALUE(MNT_NOATIME) |
52 | LX_VALUE(MNT_NODIRATIME) | 51 | LX_VALUE(MNT_NODIRATIME) |
53 | LX_VALUE(MNT_RELATIME) | 52 | LX_VALUE(MNT_RELATIME) |
54 | |||
55 | /* linux/radix-tree.h */ | ||
56 | LX_VALUE(RADIX_TREE_INDIRECT_PTR) | ||
57 | LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK) | ||
58 | LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) | ||
59 | LX_GDBPARSED(RADIX_TREE_MAP_MASK) | ||
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py deleted file mode 100644 index 0fdef4e2971a..000000000000 --- a/scripts/gdb/linux/radixtree.py +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | # | ||
2 | # gdb helper commands and functions for Linux kernel debugging | ||
3 | # | ||
4 | # Radix Tree Parser | ||
5 | # | ||
6 | # Copyright (c) 2016 Linaro Ltd | ||
7 | # | ||
8 | # Authors: | ||
9 | # Kieran Bingham <kieran.bingham@linaro.org> | ||
10 | # | ||
11 | # This work is licensed under the terms of the GNU GPL version 2. | ||
12 | # | ||
13 | |||
14 | import gdb | ||
15 | |||
16 | from linux import utils | ||
17 | from linux import constants | ||
18 | |||
19 | radix_tree_root_type = utils.CachedType("struct radix_tree_root") | ||
20 | radix_tree_node_type = utils.CachedType("struct radix_tree_node") | ||
21 | |||
22 | |||
23 | def is_indirect_ptr(node): | ||
24 | long_type = utils.get_long_type() | ||
25 | return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR) | ||
26 | |||
27 | |||
28 | def indirect_to_ptr(node): | ||
29 | long_type = utils.get_long_type() | ||
30 | node_type = node.type | ||
31 | indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR | ||
32 | return indirect_ptr.cast(node_type) | ||
33 | |||
34 | |||
35 | def maxindex(height): | ||
36 | height = height & constants.LX_RADIX_TREE_HEIGHT_MASK | ||
37 | return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]") | ||
38 | |||
39 | |||
40 | def lookup(root, index): | ||
41 | if root.type == radix_tree_root_type.get_type().pointer(): | ||
42 | root = root.dereference() | ||
43 | elif root.type != radix_tree_root_type.get_type(): | ||
44 | raise gdb.GdbError("Must be struct radix_tree_root not {}" | ||
45 | .format(root.type)) | ||
46 | |||
47 | node = root['rnode'] | ||
48 | if node is 0: | ||
49 | return None | ||
50 | |||
51 | if not (is_indirect_ptr(node)): | ||
52 | if (index > 0): | ||
53 | return None | ||
54 | return node | ||
55 | |||
56 | node = indirect_to_ptr(node) | ||
57 | |||
58 | height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK | ||
59 | if (index > maxindex(height)): | ||
60 | return None | ||
61 | |||
62 | shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT | ||
63 | |||
64 | while True: | ||
65 | new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK | ||
66 | slot = node['slots'][new_index] | ||
67 | |||
68 | node = slot.cast(node.type.pointer()).dereference() | ||
69 | if node is 0: | ||
70 | return None | ||
71 | |||
72 | shift -= constants.LX_RADIX_TREE_MAP_SHIFT | ||
73 | height -= 1 | ||
74 | |||
75 | if (height <= 0): | ||
76 | break | ||
77 | |||
78 | return node | ||
79 | |||
80 | |||
81 | class LxRadixTree(gdb.Function): | ||
82 | """ Lookup and return a node from a RadixTree. | ||
83 | |||
84 | $lx_radix_tree_lookup(root_node [, index]): Return the node at the given index. | ||
85 | If index is omitted, the root node is dereferenced and returned.""" | ||
86 | |||
87 | def __init__(self): | ||
88 | super(LxRadixTree, self).__init__("lx_radix_tree_lookup") | ||
89 | |||
90 | def invoke(self, root, index=0): | ||
91 | result = lookup(root, index) | ||
92 | if result is None: | ||
93 | raise gdb.GdbError("No entry in tree at index {}".format(index)) | ||
94 | |||
95 | return result | ||
96 | |||
97 | LxRadixTree() | ||
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index 9a0f8923f67c..004b0ac7fa72 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py | |||
@@ -153,7 +153,7 @@ lx-symbols command.""" | |||
153 | saved_state['breakpoint'].enabled = saved_state['enabled'] | 153 | saved_state['breakpoint'].enabled = saved_state['enabled'] |
154 | 154 | ||
155 | def invoke(self, arg, from_tty): | 155 | def invoke(self, arg, from_tty): |
156 | self.module_paths = arg.split() | 156 | self.module_paths = [os.path.expanduser(p) for p in arg.split()] |
157 | self.module_paths.append(os.getcwd()) | 157 | self.module_paths.append(os.getcwd()) |
158 | 158 | ||
159 | # enforce update | 159 | # enforce update |
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py index 3a80ad6eecad..6e0b0afd888a 100644 --- a/scripts/gdb/vmlinux-gdb.py +++ b/scripts/gdb/vmlinux-gdb.py | |||
@@ -31,4 +31,3 @@ else: | |||
31 | import linux.lists | 31 | import linux.lists |
32 | import linux.proc | 32 | import linux.proc |
33 | import linux.constants | 33 | import linux.constants |
34 | import linux.radixtree | ||