aboutsummaryrefslogtreecommitdiffstats
path: root/mm/usercopy.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-09-15 02:24:53 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-15 02:24:53 -0400
commitd4b80afbba49e968623330f1336da8c724da8aad (patch)
treea9478bd77d8b001a6a7119328d34e9666d7bfe93 /mm/usercopy.c
parentfcd709ef20a9d83bdb7524d27cd6719dac8690a0 (diff)
parent4cea8776571b18db7485930cb422faa739580c8c (diff)
Merge branch 'linus' into x86/asm, to pick up recent fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/usercopy.c')
-rw-r--r--mm/usercopy.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8ebae91a6b55..089328f2b920 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -83,7 +83,7 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
83 unsigned long check_high = check_low + n; 83 unsigned long check_high = check_low + n;
84 84
85 /* Does not overlap if entirely above or entirely below. */ 85 /* Does not overlap if entirely above or entirely below. */
86 if (check_low >= high || check_high < low) 86 if (check_low >= high || check_high <= low)
87 return false; 87 return false;
88 88
89 return true; 89 return true;
@@ -124,7 +124,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
124static inline const char *check_bogus_address(const void *ptr, unsigned long n) 124static inline const char *check_bogus_address(const void *ptr, unsigned long n)
125{ 125{
126 /* Reject if object wraps past end of memory. */ 126 /* Reject if object wraps past end of memory. */
127 if (ptr + n < ptr) 127 if ((unsigned long)ptr + n < (unsigned long)ptr)
128 return "<wrapped address>"; 128 return "<wrapped address>";
129 129
130 /* Reject if NULL or ZERO-allocation. */ 130 /* Reject if NULL or ZERO-allocation. */
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
134 return NULL; 134 return NULL;
135} 135}
136 136
137static inline const char *check_heap_object(const void *ptr, unsigned long n, 137/* Checks for allocs that are marked in some way as spanning multiple pages. */
138 bool to_user) 138static inline const char *check_page_span(const void *ptr, unsigned long n,
139 struct page *page, bool to_user)
139{ 140{
140 struct page *page, *endpage; 141#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
141 const void *end = ptr + n - 1; 142 const void *end = ptr + n - 1;
143 struct page *endpage;
142 bool is_reserved, is_cma; 144 bool is_reserved, is_cma;
143 145
144 /* 146 /*
145 * Some architectures (arm64) return true for virt_addr_valid() on
146 * vmalloced addresses. Work around this by checking for vmalloc
147 * first.
148 */
149 if (is_vmalloc_addr(ptr))
150 return NULL;
151
152 if (!virt_addr_valid(ptr))
153 return NULL;
154
155 page = virt_to_head_page(ptr);
156
157 /* Check slab allocator for flags and size. */
158 if (PageSlab(page))
159 return __check_heap_object(ptr, n, page);
160
161 /*
162 * Sometimes the kernel data regions are not marked Reserved (see 147 * Sometimes the kernel data regions are not marked Reserved (see
163 * check below). And sometimes [_sdata,_edata) does not cover 148 * check below). And sometimes [_sdata,_edata) does not cover
164 * rodata and/or bss, so check each range explicitly. 149 * rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
186 ((unsigned long)end & (unsigned long)PAGE_MASK))) 171 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 return NULL; 172 return NULL;
188 173
189 /* Allow if start and end are inside the same compound page. */ 174 /* Allow if fully inside the same compound (__GFP_COMP) page. */
190 endpage = virt_to_head_page(end); 175 endpage = virt_to_head_page(end);
191 if (likely(endpage == page)) 176 if (likely(endpage == page))
192 return NULL; 177 return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
199 is_reserved = PageReserved(page); 184 is_reserved = PageReserved(page);
200 is_cma = is_migrate_cma_page(page); 185 is_cma = is_migrate_cma_page(page);
201 if (!is_reserved && !is_cma) 186 if (!is_reserved && !is_cma)
202 goto reject; 187 return "<spans multiple pages>";
203 188
204 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 page = virt_to_head_page(ptr); 190 page = virt_to_head_page(ptr);
206 if (is_reserved && !PageReserved(page)) 191 if (is_reserved && !PageReserved(page))
207 goto reject; 192 return "<spans Reserved and non-Reserved pages>";
208 if (is_cma && !is_migrate_cma_page(page)) 193 if (is_cma && !is_migrate_cma_page(page))
209 goto reject; 194 return "<spans CMA and non-CMA pages>";
210 } 195 }
196#endif
211 197
212 return NULL; 198 return NULL;
199}
200
201static inline const char *check_heap_object(const void *ptr, unsigned long n,
202 bool to_user)
203{
204 struct page *page;
205
206 /*
207 * Some architectures (arm64) return true for virt_addr_valid() on
208 * vmalloced addresses. Work around this by checking for vmalloc
209 * first.
210 */
211 if (is_vmalloc_addr(ptr))
212 return NULL;
213
214 if (!virt_addr_valid(ptr))
215 return NULL;
216
217 page = virt_to_head_page(ptr);
218
219 /* Check slab allocator for flags and size. */
220 if (PageSlab(page))
221 return __check_heap_object(ptr, n, page);
213 222
214reject: 223 /* Verify object does not incorrectly span multiple pages. */
215 return "<spans multiple pages>"; 224 return check_page_span(ptr, n, page, to_user);
216} 225}
217 226
218/* 227/*