aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-12-28 03:30:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:44 -0500
commit2813b9c0296259fb11e75c839bab2d958ba4f96c (patch)
tree4f174c60d6f189977d74053b118f8f820ab689f1 /include/linux/mm.h
parent41eea9cd239c5b3fff726894f85c97f60e5799a3 (diff)
kasan, mm, arm64: tag non slab memory allocated via pagealloc
Tag-based KASAN doesn't check memory accesses through pointers tagged with 0xff. When page_address is used to get pointer to memory that corresponds to some page, the tag of the resulting pointer gets set to 0xff, even though the allocated memory might have been tagged differently. For slab pages it's impossible to recover the correct tag to return from page_address, since the page might contain multiple slab objects tagged with different values, and we can't know in advance which one of them is going to get accessed. For non slab pages however, we can recover the tag in page_address, since the whole page was marked with the same tag. This patch adds tagging to non slab memory allocated with pagealloc. To set the tag of the pointer returned from page_address, the tag gets stored to page->flags when the memory gets allocated. Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h29
1 files changed, 29 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5411de93a363..b4d01969e700 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -804,6 +804,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
804#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 804#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
805#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 805#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
806#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 806#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
807#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
807 808
808/* 809/*
809 * Define the bit shifts to access each section. For non-existent 810 * Define the bit shifts to access each section. For non-existent
@@ -814,6 +815,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
814#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 815#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
815#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 816#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
816#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 817#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
818#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
817 819
818/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 820/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
819#ifdef NODE_NOT_IN_PAGE_FLAGS 821#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -836,6 +838,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
836#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 838#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
837#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 839#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
838#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 840#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
841#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
839#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 842#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
840 843
841static inline enum zone_type page_zonenum(const struct page *page) 844static inline enum zone_type page_zonenum(const struct page *page)
@@ -1101,6 +1104,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1101} 1104}
1102#endif /* CONFIG_NUMA_BALANCING */ 1105#endif /* CONFIG_NUMA_BALANCING */
1103 1106
1107#ifdef CONFIG_KASAN_SW_TAGS
1108static inline u8 page_kasan_tag(const struct page *page)
1109{
1110 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1111}
1112
1113static inline void page_kasan_tag_set(struct page *page, u8 tag)
1114{
1115 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1116 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1117}
1118
1119static inline void page_kasan_tag_reset(struct page *page)
1120{
1121 page_kasan_tag_set(page, 0xff);
1122}
1123#else
1124static inline u8 page_kasan_tag(const struct page *page)
1125{
1126 return 0xff;
1127}
1128
1129static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1130static inline void page_kasan_tag_reset(struct page *page) { }
1131#endif
1132
1104static inline struct zone *page_zone(const struct page *page) 1133static inline struct zone *page_zone(const struct page *page)
1105{ 1134{
1106 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1135 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];