diff options
author | Yang Shi <yang.shi@linaro.org> | 2016-06-03 17:55:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-03 18:06:22 -0400 |
commit | f86e4271978bd93db466d6a95dad4b0fdcdb04f6 (patch) | |
tree | 26ef29934495da598a11c3a177d0294a71c5eae8 /mm | |
parent | d8bae33dddc03dc652e1d8cfceebf4f753939de7 (diff) |
mm: check the return value of lookup_page_ext for all call sites
Per the discussion with Joonsoo Kim [1], we need check the return value
of lookup_page_ext() for all call sites since it might return NULL in
some cases, although it is unlikely, i.e. memory hotplug.
Tested with ltp with "page_owner=0".
[1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE
[akpm@linux-foundation.org: fix build-breaking typos]
[arnd@arndb.de: fix build problems from lookup_page_ext]
Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org
Signed-off-by: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/page_owner.c | 26 | ||||
-rw-r--r-- | mm/page_poison.c | 8 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
4 files changed, 41 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8f3bfc435ee..d27e8b968ac3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, | |||
656 | return; | 656 | return; |
657 | 657 | ||
658 | page_ext = lookup_page_ext(page); | 658 | page_ext = lookup_page_ext(page); |
659 | if (unlikely(!page_ext)) | ||
660 | return; | ||
661 | |||
659 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 662 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
660 | 663 | ||
661 | INIT_LIST_HEAD(&page->lru); | 664 | INIT_LIST_HEAD(&page->lru); |
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, | |||
673 | return; | 676 | return; |
674 | 677 | ||
675 | page_ext = lookup_page_ext(page); | 678 | page_ext = lookup_page_ext(page); |
679 | if (unlikely(!page_ext)) | ||
680 | return; | ||
681 | |||
676 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 682 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
677 | 683 | ||
678 | set_page_private(page, 0); | 684 | set_page_private(page, 0); |
diff --git a/mm/page_owner.c b/mm/page_owner.c index 792b56da13d8..c6cda3e36212 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c | |||
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
55 | 55 | ||
56 | for (i = 0; i < (1 << order); i++) { | 56 | for (i = 0; i < (1 << order); i++) { |
57 | page_ext = lookup_page_ext(page + i); | 57 | page_ext = lookup_page_ext(page + i); |
58 | if (unlikely(!page_ext)) | ||
59 | continue; | ||
58 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); | 60 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); |
59 | } | 61 | } |
60 | } | 62 | } |
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
62 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | 64 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) |
63 | { | 65 | { |
64 | struct page_ext *page_ext = lookup_page_ext(page); | 66 | struct page_ext *page_ext = lookup_page_ext(page); |
67 | |||
65 | struct stack_trace trace = { | 68 | struct stack_trace trace = { |
66 | .nr_entries = 0, | 69 | .nr_entries = 0, |
67 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), | 70 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), |
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
69 | .skip = 3, | 72 | .skip = 3, |
70 | }; | 73 | }; |
71 | 74 | ||
75 | if (unlikely(!page_ext)) | ||
76 | return; | ||
77 | |||
72 | save_stack_trace(&trace); | 78 | save_stack_trace(&trace); |
73 | 79 | ||
74 | page_ext->order = order; | 80 | page_ext->order = order; |
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
82 | void __set_page_owner_migrate_reason(struct page *page, int reason) | 88 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
83 | { | 89 | { |
84 | struct page_ext *page_ext = lookup_page_ext(page); | 90 | struct page_ext *page_ext = lookup_page_ext(page); |
91 | if (unlikely(!page_ext)) | ||
92 | return; | ||
85 | 93 | ||
86 | page_ext->last_migrate_reason = reason; | 94 | page_ext->last_migrate_reason = reason; |
87 | } | 95 | } |
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) | |||
89 | gfp_t __get_page_owner_gfp(struct page *page) | 97 | gfp_t __get_page_owner_gfp(struct page *page) |
90 | { | 98 | { |
91 | struct page_ext *page_ext = lookup_page_ext(page); | 99 | struct page_ext *page_ext = lookup_page_ext(page); |
100 | if (unlikely(!page_ext)) | ||
101 | /* | ||
102 | * The caller just returns 0 if no valid gfp | ||
103 | * So return 0 here too. | ||
104 | */ | ||
105 | return 0; | ||
92 | 106 | ||
93 | return page_ext->gfp_mask; | 107 | return page_ext->gfp_mask; |
94 | } | 108 | } |
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) | |||
99 | struct page_ext *new_ext = lookup_page_ext(newpage); | 113 | struct page_ext *new_ext = lookup_page_ext(newpage); |
100 | int i; | 114 | int i; |
101 | 115 | ||
116 | if (unlikely(!old_ext || !new_ext)) | ||
117 | return; | ||
118 | |||
102 | new_ext->order = old_ext->order; | 119 | new_ext->order = old_ext->order; |
103 | new_ext->gfp_mask = old_ext->gfp_mask; | 120 | new_ext->gfp_mask = old_ext->gfp_mask; |
104 | new_ext->nr_entries = old_ext->nr_entries; | 121 | new_ext->nr_entries = old_ext->nr_entries; |
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page) | |||
193 | gfp_t gfp_mask = page_ext->gfp_mask; | 210 | gfp_t gfp_mask = page_ext->gfp_mask; |
194 | int mt = gfpflags_to_migratetype(gfp_mask); | 211 | int mt = gfpflags_to_migratetype(gfp_mask); |
195 | 212 | ||
213 | if (unlikely(!page_ext)) { | ||
214 | pr_alert("There is not page extension available.\n"); | ||
215 | return; | ||
216 | } | ||
217 | |||
196 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { | 218 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
197 | pr_alert("page_owner info is not active (free page?)\n"); | 219 | pr_alert("page_owner info is not active (free page?)\n"); |
198 | return; | 220 | return; |
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
251 | } | 273 | } |
252 | 274 | ||
253 | page_ext = lookup_page_ext(page); | 275 | page_ext = lookup_page_ext(page); |
276 | if (unlikely(!page_ext)) | ||
277 | continue; | ||
254 | 278 | ||
255 | /* | 279 | /* |
256 | * Some pages could be missed by concurrent allocation or free, | 280 | * Some pages could be missed by concurrent allocation or free, |
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) | |||
317 | continue; | 341 | continue; |
318 | 342 | ||
319 | page_ext = lookup_page_ext(page); | 343 | page_ext = lookup_page_ext(page); |
344 | if (unlikely(!page_ext)) | ||
345 | continue; | ||
320 | 346 | ||
321 | /* Maybe overraping zone */ | 347 | /* Maybe overraping zone */ |
322 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 348 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
diff --git a/mm/page_poison.c b/mm/page_poison.c index 1eae5fad2446..2e647c65916b 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c | |||
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page) | |||
54 | struct page_ext *page_ext; | 54 | struct page_ext *page_ext; |
55 | 55 | ||
56 | page_ext = lookup_page_ext(page); | 56 | page_ext = lookup_page_ext(page); |
57 | if (unlikely(!page_ext)) | ||
58 | return; | ||
59 | |||
57 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 60 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
58 | } | 61 | } |
59 | 62 | ||
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page) | |||
62 | struct page_ext *page_ext; | 65 | struct page_ext *page_ext; |
63 | 66 | ||
64 | page_ext = lookup_page_ext(page); | 67 | page_ext = lookup_page_ext(page); |
68 | if (unlikely(!page_ext)) | ||
69 | return; | ||
70 | |||
65 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 71 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
66 | } | 72 | } |
67 | 73 | ||
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page) | |||
70 | struct page_ext *page_ext; | 76 | struct page_ext *page_ext; |
71 | 77 | ||
72 | page_ext = lookup_page_ext(page); | 78 | page_ext = lookup_page_ext(page); |
73 | if (!page_ext) | 79 | if (unlikely(!page_ext)) |
74 | return false; | 80 | return false; |
75 | 81 | ||
76 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 82 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 77e42ef388c2..cb2a67bb4158 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, | |||
1061 | continue; | 1061 | continue; |
1062 | 1062 | ||
1063 | page_ext = lookup_page_ext(page); | 1063 | page_ext = lookup_page_ext(page); |
1064 | if (unlikely(!page_ext)) | ||
1065 | continue; | ||
1064 | 1066 | ||
1065 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 1067 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
1066 | continue; | 1068 | continue; |