summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kdump/gdbmacros.txt93
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page_alloc.c39
-rw-r--r--mm/page_owner.c26
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/vmalloc.c9
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/z3fold.c24
-rwxr-xr-xscripts/checkpatch.pl1
11 files changed, 205 insertions, 50 deletions
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index 35f6a982a0d5..220d0a80ca2c 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -170,21 +170,92 @@ document trapinfo
170 address the kernel panicked. 170 address the kernel panicked.
171end 171end
172 172
173define dump_log_idx
174 set $idx = $arg0
175 if ($argc > 1)
176 set $prev_flags = $arg1
177 else
178 set $prev_flags = 0
179 end
180 set $msg = ((struct printk_log *) (log_buf + $idx))
181 set $prefix = 1
182 set $newline = 1
183 set $log = log_buf + $idx + sizeof(*$msg)
173 184
174define dmesg 185 # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
175 set $i = 0 186 if (($prev_flags & 8) && !($msg->flags & 4))
176 set $end_idx = (log_end - 1) & (log_buf_len - 1) 187 set $prefix = 0
188 end
189
190 # msg->flags & LOG_CONT
191 if ($msg->flags & 8)
192 # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
193 if (($prev_flags & 8) && !($prev_flags & 2))
194 set $prefix = 0
195 end
196 # (!(msg->flags & LOG_NEWLINE))
197 if (!($msg->flags & 2))
198 set $newline = 0
199 end
200 end
201
202 if ($prefix)
203 printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
204 end
205 if ($msg->text_len != 0)
206 eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
207 end
208 if ($newline)
209 printf "\n"
210 end
211 if ($msg->dict_len > 0)
212 set $dict = $log + $msg->text_len
213 set $idx = 0
214 set $line = 1
215 while ($idx < $msg->dict_len)
216 if ($line)
217 printf " "
218 set $line = 0
219 end
220 set $c = $dict[$idx]
221 if ($c == '\0')
222 printf "\n"
223 set $line = 1
224 else
225 if ($c < ' ' || $c >= 127 || $c == '\\')
226 printf "\\x%02x", $c
227 else
228 printf "%c", $c
229 end
230 end
231 set $idx = $idx + 1
232 end
233 printf "\n"
234 end
235end
236document dump_log_idx
237 Dump a single log given its index in the log buffer. The first
238 parameter is the index into log_buf, the second is optional and
239 specified the previous log buffer's flags, used for properly
240 formatting continued lines.
241end
177 242
178 while ($i < logged_chars) 243define dmesg
179 set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) 244 set $i = log_first_idx
245 set $end_idx = log_first_idx
246 set $prev_flags = 0
180 247
181 if ($idx + 100 <= $end_idx) || \ 248 while (1)
182 ($end_idx <= $idx && $idx + 100 < log_buf_len) 249 set $msg = ((struct printk_log *) (log_buf + $i))
183 printf "%.100s", &log_buf[$idx] 250 if ($msg->len == 0)
184 set $i = $i + 100 251 set $i = 0
185 else 252 else
186 printf "%c", log_buf[$idx] 253 dump_log_idx $i $prev_flags
187 set $i = $i + 1 254 set $i = $i + $msg->len
255 set $prev_flags = $msg->flags
256 end
257 if ($i == $end_idx)
258 loop_break
188 end 259 end
189 end 260 end
190end 261end
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
46 46
47static inline bool page_is_young(struct page *page) 47static inline bool page_is_young(struct page *page)
48{ 48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 49 struct page_ext *page_ext = lookup_page_ext(page);
50
51 if (unlikely(!page_ext))
52 return false;
53
54 return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
50} 55}
51 56
52static inline void set_page_young(struct page *page) 57static inline void set_page_young(struct page *page)
53{ 58{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 59 struct page_ext *page_ext = lookup_page_ext(page);
60
61 if (unlikely(!page_ext))
62 return;
63
64 set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
55} 65}
56 66
57static inline bool test_and_clear_page_young(struct page *page) 67static inline bool test_and_clear_page_young(struct page *page)
58{ 68{
59 return test_and_clear_bit(PAGE_EXT_YOUNG, 69 struct page_ext *page_ext = lookup_page_ext(page);
60 &lookup_page_ext(page)->flags); 70
71 if (unlikely(!page_ext))
72 return false;
73
74 return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
61} 75}
62 76
63static inline bool page_is_idle(struct page *page) 77static inline bool page_is_idle(struct page *page)
64{ 78{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 79 struct page_ext *page_ext = lookup_page_ext(page);
80
81 if (unlikely(!page_ext))
82 return false;
83
84 return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
66} 85}
67 86
68static inline void set_page_idle(struct page *page) 87static inline void set_page_idle(struct page *page)
69{ 88{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 89 struct page_ext *page_ext = lookup_page_ext(page);
90
91 if (unlikely(!page_ext))
92 return;
93
94 set_bit(PAGE_EXT_IDLE, &page_ext->flags);
71} 95}
72 96
73static inline void clear_page_idle(struct page *page) 97static inline void clear_page_idle(struct page *page)
74{ 98{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 99 struct page_ext *page_ext = lookup_page_ext(page);
100
101 if (unlikely(!page_ext))
102 return;
103
104 clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
76} 105}
77#endif /* CONFIG_64BIT */ 106#endif /* CONFIG_64BIT */
78 107
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 925b431f3f03..58c69c94402a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2896 * ordering is imposed by list_lru_node->lock taken by 2896 * ordering is imposed by list_lru_node->lock taken by
2897 * memcg_drain_all_list_lrus(). 2897 * memcg_drain_all_list_lrus().
2898 */ 2898 */
2899 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2899 css_for_each_descendant_pre(css, &memcg->css) { 2900 css_for_each_descendant_pre(css, &memcg->css) {
2900 child = mem_cgroup_from_css(css); 2901 child = mem_cgroup_from_css(css);
2901 BUG_ON(child->kmemcg_id != kmemcg_id); 2902 BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2903 if (!memcg->use_hierarchy) 2904 if (!memcg->use_hierarchy)
2904 break; 2905 break;
2905 } 2906 }
2907 rcu_read_unlock();
2908
2906 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2909 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2907 2910
2908 memcg_free_cache_id(kmemcg_id); 2911 memcg_free_cache_id(kmemcg_id);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dfb1ab61fb23..acbc432d1a52 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
625 if (atomic_read(&mm->mm_users) > 1) { 625 if (atomic_read(&mm->mm_users) > 1) {
626 rcu_read_lock(); 626 rcu_read_lock();
627 for_each_process(p) { 627 for_each_process(p) {
628 bool exiting;
629
630 if (!process_shares_mm(p, mm)) 628 if (!process_shares_mm(p, mm))
631 continue; 629 continue;
632 if (fatal_signal_pending(p)) 630 if (fatal_signal_pending(p))
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
636 * If the task is exiting make sure the whole thread group 634 * If the task is exiting make sure the whole thread group
637 * is exiting and cannot acces mm anymore. 635 * is exiting and cannot acces mm anymore.
638 */ 636 */
639 spin_lock_irq(&p->sighand->siglock); 637 if (signal_group_exit(p->signal))
640 exiting = signal_group_exit(p->signal);
641 spin_unlock_irq(&p->sighand->siglock);
642 if (exiting)
643 continue; 638 continue;
644 639
645 /* Give up */ 640 /* Give up */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8f3bfc435ee..6903b695ebae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
656 return; 656 return;
657 657
658 page_ext = lookup_page_ext(page); 658 page_ext = lookup_page_ext(page);
659 if (unlikely(!page_ext))
660 return;
661
659 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 662 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
660 663
661 INIT_LIST_HEAD(&page->lru); 664 INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
673 return; 676 return;
674 677
675 page_ext = lookup_page_ext(page); 678 page_ext = lookup_page_ext(page);
679 if (unlikely(!page_ext))
680 return;
681
676 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 682 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
677 683
678 set_page_private(page, 0); 684 set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2609 page = list_last_entry(list, struct page, lru); 2615 page = list_last_entry(list, struct page, lru);
2610 else 2616 else
2611 page = list_first_entry(list, struct page, lru); 2617 page = list_first_entry(list, struct page, lru);
2612 } while (page && check_new_pcp(page));
2613 2618
2614 __dec_zone_state(zone, NR_ALLOC_BATCH); 2619 __dec_zone_state(zone, NR_ALLOC_BATCH);
2615 list_del(&page->lru); 2620 list_del(&page->lru);
2616 pcp->count--; 2621 pcp->count--;
2622
2623 } while (check_new_pcp(page));
2617 } else { 2624 } else {
2618 /* 2625 /*
2619 * We most definitely don't want callers attempting to 2626 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ reset_fair:
3023 apply_fair = false; 3030 apply_fair = false;
3024 fair_skipped = false; 3031 fair_skipped = false;
3025 reset_alloc_batches(ac->preferred_zoneref->zone); 3032 reset_alloc_batches(ac->preferred_zoneref->zone);
3033 z = ac->preferred_zoneref;
3026 goto zonelist_scan; 3034 goto zonelist_scan;
3027 } 3035 }
3028 3036
@@ -3596,6 +3604,17 @@ retry:
3596 */ 3604 */
3597 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3605 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3598 3606
3607 /*
3608 * Reset the zonelist iterators if memory policies can be ignored.
3609 * These allocations are high priority and system rather than user
3610 * orientated.
3611 */
3612 if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
3613 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3614 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3615 ac->high_zoneidx, ac->nodemask);
3616 }
3617
3599 /* This is the last chance, in general, before the goto nopage. */ 3618 /* This is the last chance, in general, before the goto nopage. */
3600 page = get_page_from_freelist(gfp_mask, order, 3619 page = get_page_from_freelist(gfp_mask, order,
3601 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3620 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ retry:
3604 3623
3605 /* Allocate without watermarks if the context allows */ 3624 /* Allocate without watermarks if the context allows */
3606 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3625 if (alloc_flags & ALLOC_NO_WATERMARKS) {
3607 /*
3608 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3609 * the allocation is high priority and these type of
3610 * allocations are system rather than user orientated
3611 */
3612 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3613 page = get_page_from_freelist(gfp_mask, order, 3626 page = get_page_from_freelist(gfp_mask, order,
3614 ALLOC_NO_WATERMARKS, ac); 3627 ALLOC_NO_WATERMARKS, ac);
3615 if (page) 3628 if (page)
@@ -3808,7 +3821,11 @@ retry_cpuset:
3808 /* Dirty zone balancing only done in the fast path */ 3821 /* Dirty zone balancing only done in the fast path */
3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3822 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3810 3823
3811 /* The preferred zone is used for statistics later */ 3824 /*
3825 * The preferred zone is used for statistics but crucially it is
3826 * also used as the starting point for the zonelist iterator. It
3827 * may get reset for allocations that ignore memory policies.
3828 */
3812 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3829 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3813 ac.high_zoneidx, ac.nodemask); 3830 ac.high_zoneidx, ac.nodemask);
3814 if (!ac.preferred_zoneref) { 3831 if (!ac.preferred_zoneref) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 792b56da13d8..c6cda3e36212 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
55 55
56 for (i = 0; i < (1 << order); i++) { 56 for (i = 0; i < (1 << order); i++) {
57 page_ext = lookup_page_ext(page + i); 57 page_ext = lookup_page_ext(page + i);
58 if (unlikely(!page_ext))
59 continue;
58 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); 60 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
59 } 61 }
60} 62}
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
62void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) 64void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
63{ 65{
64 struct page_ext *page_ext = lookup_page_ext(page); 66 struct page_ext *page_ext = lookup_page_ext(page);
67
65 struct stack_trace trace = { 68 struct stack_trace trace = {
66 .nr_entries = 0, 69 .nr_entries = 0,
67 .max_entries = ARRAY_SIZE(page_ext->trace_entries), 70 .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
69 .skip = 3, 72 .skip = 3,
70 }; 73 };
71 74
75 if (unlikely(!page_ext))
76 return;
77
72 save_stack_trace(&trace); 78 save_stack_trace(&trace);
73 79
74 page_ext->order = order; 80 page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
82void __set_page_owner_migrate_reason(struct page *page, int reason) 88void __set_page_owner_migrate_reason(struct page *page, int reason)
83{ 89{
84 struct page_ext *page_ext = lookup_page_ext(page); 90 struct page_ext *page_ext = lookup_page_ext(page);
91 if (unlikely(!page_ext))
92 return;
85 93
86 page_ext->last_migrate_reason = reason; 94 page_ext->last_migrate_reason = reason;
87} 95}
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
89gfp_t __get_page_owner_gfp(struct page *page) 97gfp_t __get_page_owner_gfp(struct page *page)
90{ 98{
91 struct page_ext *page_ext = lookup_page_ext(page); 99 struct page_ext *page_ext = lookup_page_ext(page);
100 if (unlikely(!page_ext))
101 /*
102 * The caller just returns 0 if no valid gfp
103 * So return 0 here too.
104 */
105 return 0;
92 106
93 return page_ext->gfp_mask; 107 return page_ext->gfp_mask;
94} 108}
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
99 struct page_ext *new_ext = lookup_page_ext(newpage); 113 struct page_ext *new_ext = lookup_page_ext(newpage);
100 int i; 114 int i;
101 115
116 if (unlikely(!old_ext || !new_ext))
117 return;
118
102 new_ext->order = old_ext->order; 119 new_ext->order = old_ext->order;
103 new_ext->gfp_mask = old_ext->gfp_mask; 120 new_ext->gfp_mask = old_ext->gfp_mask;
104 new_ext->nr_entries = old_ext->nr_entries; 121 new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
193 gfp_t gfp_mask = page_ext->gfp_mask; 210 gfp_t gfp_mask = page_ext->gfp_mask;
194 int mt = gfpflags_to_migratetype(gfp_mask); 211 int mt = gfpflags_to_migratetype(gfp_mask);
195 212
213 if (unlikely(!page_ext)) {
214 pr_alert("There is not page extension available.\n");
215 return;
216 }
217
196 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 218 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
197 pr_alert("page_owner info is not active (free page?)\n"); 219 pr_alert("page_owner info is not active (free page?)\n");
198 return; 220 return;
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
251 } 273 }
252 274
253 page_ext = lookup_page_ext(page); 275 page_ext = lookup_page_ext(page);
276 if (unlikely(!page_ext))
277 continue;
254 278
255 /* 279 /*
256 * Some pages could be missed by concurrent allocation or free, 280 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
317 continue; 341 continue;
318 342
319 page_ext = lookup_page_ext(page); 343 page_ext = lookup_page_ext(page);
344 if (unlikely(!page_ext))
345 continue;
320 346
321 /* Maybe overraping zone */ 347 /* Maybe overraping zone */
322 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 348 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 1eae5fad2446..2e647c65916b 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
54 struct page_ext *page_ext; 54 struct page_ext *page_ext;
55 55
56 page_ext = lookup_page_ext(page); 56 page_ext = lookup_page_ext(page);
57 if (unlikely(!page_ext))
58 return;
59
57 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
58} 61}
59 62
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
62 struct page_ext *page_ext; 65 struct page_ext *page_ext;
63 66
64 page_ext = lookup_page_ext(page); 67 page_ext = lookup_page_ext(page);
68 if (unlikely(!page_ext))
69 return;
70
65 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
66} 72}
67 73
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
70 struct page_ext *page_ext; 76 struct page_ext *page_ext;
71 77
72 page_ext = lookup_page_ext(page); 78 page_ext = lookup_page_ext(page);
73 if (!page_ext) 79 if (unlikely(!page_ext))
74 return false; 80 return false;
75 81
76 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cf7ad1a53be0..e11475cdeb7a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1105 */ 1105 */
1106void vm_unmap_ram(const void *mem, unsigned int count) 1106void vm_unmap_ram(const void *mem, unsigned int count)
1107{ 1107{
1108 unsigned long size = count << PAGE_SHIFT; 1108 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1109 unsigned long addr = (unsigned long)mem; 1109 unsigned long addr = (unsigned long)mem;
1110 1110
1111 BUG_ON(!addr); 1111 BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
1140 */ 1140 */
1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1142{ 1142{
1143 unsigned long size = count << PAGE_SHIFT; 1143 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1144 unsigned long addr; 1144 unsigned long addr;
1145 void *mem; 1145 void *mem;
1146 1146
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
1574 unsigned long flags, pgprot_t prot) 1574 unsigned long flags, pgprot_t prot)
1575{ 1575{
1576 struct vm_struct *area; 1576 struct vm_struct *area;
1577 unsigned long size; /* In bytes */
1577 1578
1578 might_sleep(); 1579 might_sleep();
1579 1580
1580 if (count > totalram_pages) 1581 if (count > totalram_pages)
1581 return NULL; 1582 return NULL;
1582 1583
1583 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1584 size = (unsigned long)count << PAGE_SHIFT;
1584 __builtin_return_address(0)); 1585 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1585 if (!area) 1586 if (!area)
1586 return NULL; 1587 return NULL;
1587 1588
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77e42ef388c2..cb2a67bb4158 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1061 continue; 1061 continue;
1062 1062
1063 page_ext = lookup_page_ext(page); 1063 page_ext = lookup_page_ext(page);
1064 if (unlikely(!page_ext))
1065 continue;
1064 1066
1065 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 1067 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1066 continue; 1068 continue;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 34917d55d311..8f9e89ca1d31 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
412 /* HEADLESS page stored */ 412 /* HEADLESS page stored */
413 bud = HEADLESS; 413 bud = HEADLESS;
414 } else { 414 } else {
415 bud = (handle - zhdr->first_num) & BUDDY_MASK; 415 bud = handle_to_buddy(handle);
416 416
417 switch (bud) { 417 switch (bud) {
418 case FIRST: 418 case FIRST:
@@ -572,15 +572,19 @@ next:
572 pool->pages_nr--; 572 pool->pages_nr--;
573 spin_unlock(&pool->lock); 573 spin_unlock(&pool->lock);
574 return 0; 574 return 0;
575 } else if (zhdr->first_chunks != 0 && 575 } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
576 zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { 576 if (zhdr->first_chunks != 0 &&
577 /* Full, add to buddied list */ 577 zhdr->last_chunks != 0 &&
578 list_add(&zhdr->buddy, &pool->buddied); 578 zhdr->middle_chunks != 0) {
579 } else if (!test_bit(PAGE_HEADLESS, &page->private)) { 579 /* Full, add to buddied list */
580 z3fold_compact_page(zhdr); 580 list_add(&zhdr->buddy, &pool->buddied);
581 /* add to unbuddied list */ 581 } else {
582 freechunks = num_free_chunks(zhdr); 582 z3fold_compact_page(zhdr);
583 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); 583 /* add to unbuddied list */
584 freechunks = num_free_chunks(zhdr);
585 list_add(&zhdr->buddy,
586 &pool->unbuddied[freechunks]);
587 }
584 } 588 }
585 589
586 /* add to beginning of LRU */ 590 /* add to beginning of LRU */
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6750595bd7b8..4904ced676d4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2454,6 +2454,7 @@ sub process {
2454 2454
2455# Check for git id commit length and improperly formed commit descriptions 2455# Check for git id commit length and improperly formed commit descriptions
2456 if ($in_commit_log && !$commit_log_possible_stack_dump && 2456 if ($in_commit_log && !$commit_log_possible_stack_dump &&
2457 $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
2457 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || 2458 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
2458 ($line =~ /\b[0-9a-f]{12,40}\b/i && 2459 ($line =~ /\b[0-9a-f]{12,40}\b/i &&
2459 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && 2460 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&