aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2017-04-05 12:39:08 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-21 03:31:24 -0400
commit2c0ad235ac77f2fc2eee593bf06822cad772e0e2 (patch)
tree0ca5b8a057ee9182efe7c0a50524bdba24751645
parentef793e6e113473f7cb08edf1ca4a8737c57ce51c (diff)
mm: Tighten x86 /dev/mem with zeroing reads
commit a4866aa812518ed1a37d8ea0c881dc946409de94 upstream. Under CONFIG_STRICT_DEVMEM, reading System RAM through /dev/mem is disallowed. However, on x86, the first 1MB was always allowed for BIOS and similar things, regardless of it actually being System RAM. It was possible for heap to end up getting allocated in low 1MB RAM, and then read by things like x86info or dd, which would trip hardened usercopy: usercopy: kernel memory exposure attempt detected from ffff880000090000 (dma-kmalloc-256) (4096 bytes) This changes the x86 exception for the low 1MB by reading back zeros for System RAM areas instead of blindly allowing them. More work is needed to extend this to mmap, but currently mmap doesn't go through usercopy, so hardened usercopy won't Oops the kernel. Reported-by: Tommi Rantala <tommi.t.rantala@nokia.com> Tested-by: Tommi Rantala <tommi.t.rantala@nokia.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Brad Spengler <spender@grsecurity.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--drivers/char/mem.c82
2 files changed, 82 insertions, 41 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912d66d2..889e7619a091 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
644 * is valid. The argument is a physical page number. 644 * is valid. The argument is a physical page number.
645 * 645 *
646 * 646 * On x86, access has to be given to the first megabyte of RAM because that
647 * On x86, access has to be given to the first megabyte of ram because that area 647 * area traditionally contains BIOS code and data regions used by X, dosemu,
648 * contains BIOS code and data regions used by X and dosemu and similar apps. 648 * and similar apps. Since they map the entire memory range, the whole range
649 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 649 * must be allowed (for mapping), but any areas that would otherwise be
650 * mmio resources as well as potential bios/acpi data regions. 650 * disallowed are flagged as being "zero filled" instead of rejected.
651 * Access has to be given to non-kernel-ram areas as well, these contain the
652 * PCI mmio resources as well as potential bios/acpi data regions.
651 */ 653 */
652int devmem_is_allowed(unsigned long pagenr) 654int devmem_is_allowed(unsigned long pagenr)
653{ 655{
654 if (pagenr < 256) 656 if (page_is_ram(pagenr)) {
655 return 1; 657 /*
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 658 * For disallowed memory regions in the low 1MB range,
659 * request that the page be shown as all zeros.
660 */
661 if (pagenr < 256)
662 return 2;
663
664 return 0;
665 }
666
667 /*
668 * This must follow RAM test, since System RAM is considered a
669 * restricted resource under CONFIG_STRICT_IOMEM.
670 */
671 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
672 /* Low 1MB bypasses iomem restrictions. */
673 if (pagenr < 256)
674 return 1;
675
657 return 0; 676 return 0;
658 if (!page_is_ram(pagenr)) 677 }
659 return 1; 678
660 return 0; 679 return 1;
661} 680}
662 681
663void free_init_pages(char *what, unsigned long begin, unsigned long end) 682void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;