diff options
| -rw-r--r-- | arch/arm64/kernel/setup.c | 3 | ||||
| -rw-r--r-- | arch/arm64/mm/kasan_init.c | 2 | ||||
| -rw-r--r-- | fs/proc/base.c | 4 | ||||
| -rw-r--r-- | init/initramfs.c | 6 | ||||
| -rw-r--r-- | kernel/sched/psi.c | 2 | ||||
| -rw-r--r-- | mm/debug.c | 4 | ||||
| -rw-r--r-- | mm/kasan/Makefile | 2 | ||||
| -rw-r--r-- | mm/kasan/common.c | 29 | ||||
| -rw-r--r-- | mm/kasan/tags.c | 2 | ||||
| -rw-r--r-- | mm/kmemleak.c | 10 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 27 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/page_alloc.c | 12 | ||||
| -rw-r--r-- | mm/shmem.c | 10 | ||||
| -rw-r--r-- | mm/slab.c | 15 | ||||
| -rw-r--r-- | mm/slab.h | 7 | ||||
| -rw-r--r-- | mm/slab_common.c | 3 | ||||
| -rw-r--r-- | mm/slub.c | 59 | ||||
| -rw-r--r-- | mm/swap.c | 17 | ||||
| -rw-r--r-- | mm/util.c | 2 |
20 files changed, 140 insertions, 82 deletions
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index d09ec76f08cf..009849328289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
| @@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p) | |||
| 339 | smp_init_cpus(); | 339 | smp_init_cpus(); |
| 340 | smp_build_mpidr_hash(); | 340 | smp_build_mpidr_hash(); |
| 341 | 341 | ||
| 342 | /* Init percpu seeds for random tags after cpus are set up. */ | ||
| 343 | kasan_init_tags(); | ||
| 344 | |||
| 342 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 345 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
| 343 | /* | 346 | /* |
| 344 | * Make sure init_thread_info.ttbr0 always generates translation | 347 | * Make sure init_thread_info.ttbr0 always generates translation |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..f37a86d2a69d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
| @@ -252,8 +252,6 @@ void __init kasan_init(void) | |||
| 252 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); | 252 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); |
| 253 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); | 253 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
| 254 | 254 | ||
| 255 | kasan_init_tags(); | ||
| 256 | |||
| 257 | /* At this point kasan is fully initialized. Enable error messages */ | 255 | /* At this point kasan is fully initialized. Enable error messages */ |
| 258 | init_task.kasan_depth = 0; | 256 | init_task.kasan_depth = 0; |
| 259 | pr_info("KernelAddressSanitizer initialized\n"); | 257 | pr_info("KernelAddressSanitizer initialized\n"); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 633a63462573..f5ed9512d193 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) | |||
| 1086 | 1086 | ||
| 1087 | task_lock(p); | 1087 | task_lock(p); |
| 1088 | if (!p->vfork_done && process_shares_mm(p, mm)) { | 1088 | if (!p->vfork_done && process_shares_mm(p, mm)) { |
| 1089 | pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", | ||
| 1090 | task_pid_nr(p), p->comm, | ||
| 1091 | p->signal->oom_score_adj, oom_adj, | ||
| 1092 | task_pid_nr(task), task->comm); | ||
| 1093 | p->signal->oom_score_adj = oom_adj; | 1089 | p->signal->oom_score_adj = oom_adj; |
| 1094 | if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) | 1090 | if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) |
| 1095 | p->signal->oom_score_adj_min = (short)oom_adj; | 1091 | p->signal->oom_score_adj_min = (short)oom_adj; |
diff --git a/init/initramfs.c b/init/initramfs.c index 7cea802d00ef..fca899622937 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
| @@ -550,6 +550,7 @@ skip: | |||
| 550 | initrd_end = 0; | 550 | initrd_end = 0; |
| 551 | } | 551 | } |
| 552 | 552 | ||
| 553 | #ifdef CONFIG_BLK_DEV_RAM | ||
| 553 | #define BUF_SIZE 1024 | 554 | #define BUF_SIZE 1024 |
| 554 | static void __init clean_rootfs(void) | 555 | static void __init clean_rootfs(void) |
| 555 | { | 556 | { |
| @@ -596,6 +597,7 @@ static void __init clean_rootfs(void) | |||
| 596 | ksys_close(fd); | 597 | ksys_close(fd); |
| 597 | kfree(buf); | 598 | kfree(buf); |
| 598 | } | 599 | } |
| 600 | #endif | ||
| 599 | 601 | ||
| 600 | static int __init populate_rootfs(void) | 602 | static int __init populate_rootfs(void) |
| 601 | { | 603 | { |
| @@ -638,10 +640,8 @@ static int __init populate_rootfs(void) | |||
| 638 | printk(KERN_INFO "Unpacking initramfs...\n"); | 640 | printk(KERN_INFO "Unpacking initramfs...\n"); |
| 639 | err = unpack_to_rootfs((char *)initrd_start, | 641 | err = unpack_to_rootfs((char *)initrd_start, |
| 640 | initrd_end - initrd_start); | 642 | initrd_end - initrd_start); |
| 641 | if (err) { | 643 | if (err) |
| 642 | printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); | 644 | printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); |
| 643 | clean_rootfs(); | ||
| 644 | } | ||
| 645 | free_initrd(); | 645 | free_initrd(); |
| 646 | #endif | 646 | #endif |
| 647 | } | 647 | } |
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index c3484785b179..0e97ca9306ef 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c | |||
| @@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group) | |||
| 322 | expires = group->next_update; | 322 | expires = group->next_update; |
| 323 | if (now < expires) | 323 | if (now < expires) |
| 324 | goto out; | 324 | goto out; |
| 325 | if (now - expires > psi_period) | 325 | if (now - expires >= psi_period) |
| 326 | missed_periods = div_u64(now - expires, psi_period); | 326 | missed_periods = div_u64(now - expires, psi_period); |
| 327 | 327 | ||
| 328 | /* | 328 | /* |
diff --git a/mm/debug.c b/mm/debug.c index 0abb987dad9b..1611cf00a137 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
| @@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { | |||
| 44 | 44 | ||
| 45 | void __dump_page(struct page *page, const char *reason) | 45 | void __dump_page(struct page *page, const char *reason) |
| 46 | { | 46 | { |
| 47 | struct address_space *mapping = page_mapping(page); | 47 | struct address_space *mapping; |
| 48 | bool page_poisoned = PagePoisoned(page); | 48 | bool page_poisoned = PagePoisoned(page); |
| 49 | int mapcount; | 49 | int mapcount; |
| 50 | 50 | ||
| @@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason) | |||
| 58 | goto hex_only; | 58 | goto hex_only; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | mapping = page_mapping(page); | ||
| 62 | |||
| 61 | /* | 63 | /* |
| 62 | * Avoid VM_BUG_ON() in page_mapcount(). | 64 | * Avoid VM_BUG_ON() in page_mapcount(). |
| 63 | * page->_mapcount space in struct page is used by sl[aou]b pages to | 65 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index e2bb06c1b45e..5d1065efbd47 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
| @@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n | |||
| 7 | 7 | ||
| 8 | CFLAGS_REMOVE_common.o = -pg | 8 | CFLAGS_REMOVE_common.o = -pg |
| 9 | CFLAGS_REMOVE_generic.o = -pg | 9 | CFLAGS_REMOVE_generic.o = -pg |
| 10 | CFLAGS_REMOVE_tags.o = -pg | ||
| 11 | |||
| 10 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 12 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
| 11 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 13 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
| 12 | 14 | ||
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 73c9cbfdedf4..09b534fbba17 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c | |||
| @@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |||
| 361 | * get different tags. | 361 | * get different tags. |
| 362 | */ | 362 | */ |
| 363 | static u8 assign_tag(struct kmem_cache *cache, const void *object, | 363 | static u8 assign_tag(struct kmem_cache *cache, const void *object, |
| 364 | bool init, bool krealloc) | 364 | bool init, bool keep_tag) |
| 365 | { | 365 | { |
| 366 | /* Reuse the same tag for krealloc'ed objects. */ | 366 | /* |
| 367 | if (krealloc) | 367 | * 1. When an object is kmalloc()'ed, two hooks are called: |
| 368 | * kasan_slab_alloc() and kasan_kmalloc(). We assign the | ||
| 369 | * tag only in the first one. | ||
| 370 | * 2. We reuse the same tag for krealloc'ed objects. | ||
| 371 | */ | ||
| 372 | if (keep_tag) | ||
| 368 | return get_tag(object); | 373 | return get_tag(object); |
| 369 | 374 | ||
| 370 | /* | 375 | /* |
| @@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, | |||
| 405 | return (void *)object; | 410 | return (void *)object; |
| 406 | } | 411 | } |
| 407 | 412 | ||
| 408 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, | ||
| 409 | gfp_t flags) | ||
| 410 | { | ||
| 411 | return kasan_kmalloc(cache, object, cache->object_size, flags); | ||
| 412 | } | ||
| 413 | |||
| 414 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) | 413 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
| 415 | { | 414 | { |
| 416 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | 415 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
| @@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | |||
| 467 | } | 466 | } |
| 468 | 467 | ||
| 469 | static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | 468 | static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, |
| 470 | size_t size, gfp_t flags, bool krealloc) | 469 | size_t size, gfp_t flags, bool keep_tag) |
| 471 | { | 470 | { |
| 472 | unsigned long redzone_start; | 471 | unsigned long redzone_start; |
| 473 | unsigned long redzone_end; | 472 | unsigned long redzone_end; |
| @@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | |||
| 485 | KASAN_SHADOW_SCALE_SIZE); | 484 | KASAN_SHADOW_SCALE_SIZE); |
| 486 | 485 | ||
| 487 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | 486 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
| 488 | tag = assign_tag(cache, object, false, krealloc); | 487 | tag = assign_tag(cache, object, false, keep_tag); |
| 489 | 488 | ||
| 490 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ | 489 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ |
| 491 | kasan_unpoison_shadow(set_tag(object, tag), size); | 490 | kasan_unpoison_shadow(set_tag(object, tag), size); |
| @@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | |||
| 498 | return set_tag(object, tag); | 497 | return set_tag(object, tag); |
| 499 | } | 498 | } |
| 500 | 499 | ||
| 500 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, | ||
| 501 | gfp_t flags) | ||
| 502 | { | ||
| 503 | return __kasan_kmalloc(cache, object, cache->object_size, flags, false); | ||
| 504 | } | ||
| 505 | |||
| 501 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, | 506 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
| 502 | size_t size, gfp_t flags) | 507 | size_t size, gfp_t flags) |
| 503 | { | 508 | { |
| 504 | return __kasan_kmalloc(cache, object, size, flags, false); | 509 | return __kasan_kmalloc(cache, object, size, flags, true); |
| 505 | } | 510 | } |
| 506 | EXPORT_SYMBOL(kasan_kmalloc); | 511 | EXPORT_SYMBOL(kasan_kmalloc); |
| 507 | 512 | ||
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 0777649e07c4..63fca3172659 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c | |||
| @@ -46,7 +46,7 @@ void kasan_init_tags(void) | |||
| 46 | int cpu; | 46 | int cpu; |
| 47 | 47 | ||
| 48 | for_each_possible_cpu(cpu) | 48 | for_each_possible_cpu(cpu) |
| 49 | per_cpu(prng_state, cpu) = get_random_u32(); | 49 | per_cpu(prng_state, cpu) = (u32)get_cycles(); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | /* | 52 | /* |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f9d9dc250428..707fa5579f66 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |||
| 574 | unsigned long flags; | 574 | unsigned long flags; |
| 575 | struct kmemleak_object *object, *parent; | 575 | struct kmemleak_object *object, *parent; |
| 576 | struct rb_node **link, *rb_parent; | 576 | struct rb_node **link, *rb_parent; |
| 577 | unsigned long untagged_ptr; | ||
| 577 | 578 | ||
| 578 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); | 579 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); |
| 579 | if (!object) { | 580 | if (!object) { |
| @@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |||
| 619 | 620 | ||
| 620 | write_lock_irqsave(&kmemleak_lock, flags); | 621 | write_lock_irqsave(&kmemleak_lock, flags); |
| 621 | 622 | ||
| 622 | min_addr = min(min_addr, ptr); | 623 | untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); |
| 623 | max_addr = max(max_addr, ptr + size); | 624 | min_addr = min(min_addr, untagged_ptr); |
| 625 | max_addr = max(max_addr, untagged_ptr + size); | ||
| 624 | link = &object_tree_root.rb_node; | 626 | link = &object_tree_root.rb_node; |
| 625 | rb_parent = NULL; | 627 | rb_parent = NULL; |
| 626 | while (*link) { | 628 | while (*link) { |
| @@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end, | |||
| 1333 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | 1335 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
| 1334 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | 1336 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
| 1335 | unsigned long flags; | 1337 | unsigned long flags; |
| 1338 | unsigned long untagged_ptr; | ||
| 1336 | 1339 | ||
| 1337 | read_lock_irqsave(&kmemleak_lock, flags); | 1340 | read_lock_irqsave(&kmemleak_lock, flags); |
| 1338 | for (ptr = start; ptr < end; ptr++) { | 1341 | for (ptr = start; ptr < end; ptr++) { |
| @@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end, | |||
| 1347 | pointer = *ptr; | 1350 | pointer = *ptr; |
| 1348 | kasan_enable_current(); | 1351 | kasan_enable_current(); |
| 1349 | 1352 | ||
| 1350 | if (pointer < min_addr || pointer >= max_addr) | 1353 | untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); |
| 1354 | if (untagged_ptr < min_addr || untagged_ptr >= max_addr) | ||
| 1351 | continue; | 1355 | continue; |
| 1352 | 1356 | ||
| 1353 | /* | 1357 | /* |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 124e794867c5..1ad28323fb9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) | |||
| 1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; | 1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; |
| 1189 | } | 1189 | } |
| 1190 | 1190 | ||
| 1191 | /* Return the start of the next active pageblock after a given page */ | 1191 | /* Return the pfn of the start of the next active pageblock after a given pfn */ |
| 1192 | static struct page *next_active_pageblock(struct page *page) | 1192 | static unsigned long next_active_pageblock(unsigned long pfn) |
| 1193 | { | 1193 | { |
| 1194 | struct page *page = pfn_to_page(pfn); | ||
| 1195 | |||
| 1194 | /* Ensure the starting page is pageblock-aligned */ | 1196 | /* Ensure the starting page is pageblock-aligned */ |
| 1195 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 1197 | BUG_ON(pfn & (pageblock_nr_pages - 1)); |
| 1196 | 1198 | ||
| 1197 | /* If the entire pageblock is free, move to the end of free page */ | 1199 | /* If the entire pageblock is free, move to the end of free page */ |
| 1198 | if (pageblock_free(page)) { | 1200 | if (pageblock_free(page)) { |
| @@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) | |||
| 1200 | /* be careful. we don't have locks, page_order can be changed.*/ | 1202 | /* be careful. we don't have locks, page_order can be changed.*/ |
| 1201 | order = page_order(page); | 1203 | order = page_order(page); |
| 1202 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | 1204 | if ((order < MAX_ORDER) && (order >= pageblock_order)) |
| 1203 | return page + (1 << order); | 1205 | return pfn + (1 << order); |
| 1204 | } | 1206 | } |
| 1205 | 1207 | ||
| 1206 | return page + pageblock_nr_pages; | 1208 | return pfn + pageblock_nr_pages; |
| 1207 | } | 1209 | } |
| 1208 | 1210 | ||
| 1209 | static bool is_pageblock_removable_nolock(struct page *page) | 1211 | static bool is_pageblock_removable_nolock(unsigned long pfn) |
| 1210 | { | 1212 | { |
| 1213 | struct page *page = pfn_to_page(pfn); | ||
| 1211 | struct zone *zone; | 1214 | struct zone *zone; |
| 1212 | unsigned long pfn; | ||
| 1213 | 1215 | ||
| 1214 | /* | 1216 | /* |
| 1215 | * We have to be careful here because we are iterating over memory | 1217 | * We have to be careful here because we are iterating over memory |
| @@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) | |||
| 1232 | /* Checks if this range of memory is likely to be hot-removable. */ | 1234 | /* Checks if this range of memory is likely to be hot-removable. */ |
| 1233 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | 1235 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
| 1234 | { | 1236 | { |
| 1235 | struct page *page = pfn_to_page(start_pfn); | 1237 | unsigned long end_pfn, pfn; |
| 1236 | unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); | 1238 | |
| 1237 | struct page *end_page = pfn_to_page(end_pfn); | 1239 | end_pfn = min(start_pfn + nr_pages, |
| 1240 | zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); | ||
| 1238 | 1241 | ||
| 1239 | /* Check the starting page of each pageblock within the range */ | 1242 | /* Check the starting page of each pageblock within the range */ |
| 1240 | for (; page < end_page; page = next_active_pageblock(page)) { | 1243 | for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { |
| 1241 | if (!is_pageblock_removable_nolock(page)) | 1244 | if (!is_pageblock_removable_nolock(pfn)) |
| 1242 | return false; | 1245 | return false; |
| 1243 | cond_resched(); | 1246 | cond_resched(); |
| 1244 | } | 1247 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d4496d9d34f5..ee2bce59d2bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, | |||
| 1314 | nodemask_t *nodes) | 1314 | nodemask_t *nodes) |
| 1315 | { | 1315 | { |
| 1316 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; | 1316 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; |
| 1317 | const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); | 1317 | unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); |
| 1318 | 1318 | ||
| 1319 | if (copy > nbytes) { | 1319 | if (copy > nbytes) { |
| 1320 | if (copy > PAGE_SIZE) | 1320 | if (copy > PAGE_SIZE) |
| @@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy, | |||
| 1491 | int uninitialized_var(pval); | 1491 | int uninitialized_var(pval); |
| 1492 | nodemask_t nodes; | 1492 | nodemask_t nodes; |
| 1493 | 1493 | ||
| 1494 | if (nmask != NULL && maxnode < MAX_NUMNODES) | 1494 | if (nmask != NULL && maxnode < nr_node_ids) |
| 1495 | return -EINVAL; | 1495 | return -EINVAL; |
| 1496 | 1496 | ||
| 1497 | err = do_get_mempolicy(&pval, &nodes, addr, flags); | 1497 | err = do_get_mempolicy(&pval, &nodes, addr, flags); |
| @@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, | |||
| 1527 | unsigned long nr_bits, alloc_size; | 1527 | unsigned long nr_bits, alloc_size; |
| 1528 | DECLARE_BITMAP(bm, MAX_NUMNODES); | 1528 | DECLARE_BITMAP(bm, MAX_NUMNODES); |
| 1529 | 1529 | ||
| 1530 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); | 1530 | nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); |
| 1531 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | 1531 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; |
| 1532 | 1532 | ||
| 1533 | if (nmask) | 1533 | if (nmask) |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7f79b78bc829..0b9f577b1a2a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) | |||
| 2170 | 2170 | ||
| 2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], | 2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], |
| 2172 | watermark_boost_factor, 10000); | 2172 | watermark_boost_factor, 10000); |
| 2173 | |||
| 2174 | /* | ||
| 2175 | * high watermark may be uninitialised if fragmentation occurs | ||
| 2176 | * very early in boot so do not boost. We do not fall | ||
| 2177 | * through and boost by pageblock_nr_pages as failing | ||
| 2178 | * allocations that early means that reclaim is not going | ||
| 2179 | * to help and it may even be impossible to reclaim the | ||
| 2180 | * boosted watermark resulting in a hang. | ||
| 2181 | */ | ||
| 2182 | if (!max_boost) | ||
| 2183 | return; | ||
| 2184 | |||
| 2173 | max_boost = max(pageblock_nr_pages, max_boost); | 2185 | max_boost = max(pageblock_nr_pages, max_boost); |
| 2174 | 2186 | ||
| 2175 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, | 2187 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, |
diff --git a/mm/shmem.c b/mm/shmem.c index 6ece1e2fe76e..0905215fb016 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -2854,10 +2854,14 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr | |||
| 2854 | * No ordinary (disk based) filesystem counts links as inodes; | 2854 | * No ordinary (disk based) filesystem counts links as inodes; |
| 2855 | * but each new link needs a new dentry, pinning lowmem, and | 2855 | * but each new link needs a new dentry, pinning lowmem, and |
| 2856 | * tmpfs dentries cannot be pruned until they are unlinked. | 2856 | * tmpfs dentries cannot be pruned until they are unlinked. |
| 2857 | * But if an O_TMPFILE file is linked into the tmpfs, the | ||
| 2858 | * first link must skip that, to get the accounting right. | ||
| 2857 | */ | 2859 | */ |
| 2858 | ret = shmem_reserve_inode(inode->i_sb); | 2860 | if (inode->i_nlink) { |
| 2859 | if (ret) | 2861 | ret = shmem_reserve_inode(inode->i_sb); |
| 2860 | goto out; | 2862 | if (ret) |
| 2863 | goto out; | ||
| 2864 | } | ||
| 2861 | 2865 | ||
| 2862 | dir->i_size += BOGO_DIRENT_SIZE; | 2866 | dir->i_size += BOGO_DIRENT_SIZE; |
| 2863 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); | 2867 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); |
| @@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
| 2359 | void *freelist; | 2359 | void *freelist; |
| 2360 | void *addr = page_address(page); | 2360 | void *addr = page_address(page); |
| 2361 | 2361 | ||
| 2362 | page->s_mem = kasan_reset_tag(addr) + colour_off; | 2362 | page->s_mem = addr + colour_off; |
| 2363 | page->active = 0; | 2363 | page->active = 0; |
| 2364 | 2364 | ||
| 2365 | if (OBJFREELIST_SLAB(cachep)) | 2365 | if (OBJFREELIST_SLAB(cachep)) |
| @@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
| 2368 | /* Slab management obj is off-slab. */ | 2368 | /* Slab management obj is off-slab. */ |
| 2369 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, | 2369 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, |
| 2370 | local_flags, nodeid); | 2370 | local_flags, nodeid); |
| 2371 | freelist = kasan_reset_tag(freelist); | ||
| 2371 | if (!freelist) | 2372 | if (!freelist) |
| 2372 | return NULL; | 2373 | return NULL; |
| 2373 | } else { | 2374 | } else { |
| @@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, | |||
| 2681 | 2682 | ||
| 2682 | offset *= cachep->colour_off; | 2683 | offset *= cachep->colour_off; |
| 2683 | 2684 | ||
| 2685 | /* | ||
| 2686 | * Call kasan_poison_slab() before calling alloc_slabmgmt(), so | ||
| 2687 | * page_address() in the latter returns a non-tagged pointer, | ||
| 2688 | * as it should be for slab pages. | ||
| 2689 | */ | ||
| 2690 | kasan_poison_slab(page); | ||
| 2691 | |||
| 2684 | /* Get slab management. */ | 2692 | /* Get slab management. */ |
| 2685 | freelist = alloc_slabmgmt(cachep, page, offset, | 2693 | freelist = alloc_slabmgmt(cachep, page, offset, |
| 2686 | local_flags & ~GFP_CONSTRAINT_MASK, page_node); | 2694 | local_flags & ~GFP_CONSTRAINT_MASK, page_node); |
| @@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, | |||
| 2689 | 2697 | ||
| 2690 | slab_map_pages(cachep, page, freelist); | 2698 | slab_map_pages(cachep, page, freelist); |
| 2691 | 2699 | ||
| 2692 | kasan_poison_slab(page); | ||
| 2693 | cache_init_objs(cachep, page); | 2700 | cache_init_objs(cachep, page); |
| 2694 | 2701 | ||
| 2695 | if (gfpflags_allow_blocking(local_flags)) | 2702 | if (gfpflags_allow_blocking(local_flags)) |
| @@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3540 | { | 3547 | { |
| 3541 | void *ret = slab_alloc(cachep, flags, _RET_IP_); | 3548 | void *ret = slab_alloc(cachep, flags, _RET_IP_); |
| 3542 | 3549 | ||
| 3543 | ret = kasan_slab_alloc(cachep, ret, flags); | ||
| 3544 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3550 | trace_kmem_cache_alloc(_RET_IP_, ret, |
| 3545 | cachep->object_size, cachep->size, flags); | 3551 | cachep->object_size, cachep->size, flags); |
| 3546 | 3552 | ||
| @@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
| 3630 | { | 3636 | { |
| 3631 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | 3637 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
| 3632 | 3638 | ||
| 3633 | ret = kasan_slab_alloc(cachep, ret, flags); | ||
| 3634 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3639 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 3635 | cachep->object_size, cachep->size, | 3640 | cachep->object_size, cachep->size, |
| 3636 | flags, nodeid); | 3641 | flags, nodeid); |
| @@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, | |||
| 4408 | unsigned int objnr; | 4413 | unsigned int objnr; |
| 4409 | unsigned long offset; | 4414 | unsigned long offset; |
| 4410 | 4415 | ||
| 4416 | ptr = kasan_reset_tag(ptr); | ||
| 4417 | |||
| 4411 | /* Find and validate object. */ | 4418 | /* Find and validate object. */ |
| 4412 | cachep = page->slab_cache; | 4419 | cachep = page->slab_cache; |
| 4413 | objnr = obj_to_index(cachep, page, (void *)ptr); | 4420 | objnr = obj_to_index(cachep, page, (void *)ptr); |
| @@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |||
| 437 | 437 | ||
| 438 | flags &= gfp_allowed_mask; | 438 | flags &= gfp_allowed_mask; |
| 439 | for (i = 0; i < size; i++) { | 439 | for (i = 0; i < size; i++) { |
| 440 | void *object = p[i]; | 440 | p[i] = kasan_slab_alloc(s, p[i], flags); |
| 441 | 441 | /* As p[i] might get tagged, call kmemleak hook after KASAN. */ | |
| 442 | kmemleak_alloc_recursive(object, s->object_size, 1, | 442 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
| 443 | s->flags, flags); | 443 | s->flags, flags); |
| 444 | p[i] = kasan_slab_alloc(s, object, flags); | ||
| 445 | } | 444 | } |
| 446 | 445 | ||
| 447 | if (memcg_kmem_enabled()) | 446 | if (memcg_kmem_enabled()) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 81732d05e74a..f9d89c1b5977 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |||
| 1228 | flags |= __GFP_COMP; | 1228 | flags |= __GFP_COMP; |
| 1229 | page = alloc_pages(flags, order); | 1229 | page = alloc_pages(flags, order); |
| 1230 | ret = page ? page_address(page) : NULL; | 1230 | ret = page ? page_address(page) : NULL; |
| 1231 | kmemleak_alloc(ret, size, 1, flags); | ||
| 1232 | ret = kasan_kmalloc_large(ret, size, flags); | 1231 | ret = kasan_kmalloc_large(ret, size, flags); |
| 1232 | /* As ret might get tagged, call kmemleak hook after KASAN. */ | ||
| 1233 | kmemleak_alloc(ret, size, 1, flags); | ||
| 1233 | return ret; | 1234 | return ret; |
| 1234 | } | 1235 | } |
| 1235 | EXPORT_SYMBOL(kmalloc_order); | 1236 | EXPORT_SYMBOL(kmalloc_order); |
| @@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, | |||
| 249 | unsigned long ptr_addr) | 249 | unsigned long ptr_addr) |
| 250 | { | 250 | { |
| 251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED | 251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 252 | return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); | 252 | /* |
| 253 | * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. | ||
| 254 | * Normally, this doesn't cause any issues, as both set_freepointer() | ||
| 255 | * and get_freepointer() are called with a pointer with the same tag. | ||
| 256 | * However, there are some issues with CONFIG_SLUB_DEBUG code. For | ||
| 257 | * example, when __free_slub() iterates over objects in a cache, it | ||
| 258 | * passes untagged pointers to check_object(). check_object() in turns | ||
| 259 | * calls get_freepointer() with an untagged pointer, which causes the | ||
| 260 | * freepointer to be restored incorrectly. | ||
| 261 | */ | ||
| 262 | return (void *)((unsigned long)ptr ^ s->random ^ | ||
| 263 | (unsigned long)kasan_reset_tag((void *)ptr_addr)); | ||
| 253 | #else | 264 | #else |
| 254 | return ptr; | 265 | return ptr; |
| 255 | #endif | 266 | #endif |
| @@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | |||
| 303 | __p < (__addr) + (__objects) * (__s)->size; \ | 314 | __p < (__addr) + (__objects) * (__s)->size; \ |
| 304 | __p += (__s)->size) | 315 | __p += (__s)->size) |
| 305 | 316 | ||
| 306 | #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ | ||
| 307 | for (__p = fixup_red_left(__s, __addr), __idx = 1; \ | ||
| 308 | __idx <= __objects; \ | ||
| 309 | __p += (__s)->size, __idx++) | ||
| 310 | |||
| 311 | /* Determine object index from a given position */ | 317 | /* Determine object index from a given position */ |
| 312 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) | 318 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) |
| 313 | { | 319 | { |
| 314 | return (p - addr) / s->size; | 320 | return (kasan_reset_tag(p) - addr) / s->size; |
| 315 | } | 321 | } |
| 316 | 322 | ||
| 317 | static inline unsigned int order_objects(unsigned int order, unsigned int size) | 323 | static inline unsigned int order_objects(unsigned int order, unsigned int size) |
| @@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, | |||
| 507 | return 1; | 513 | return 1; |
| 508 | 514 | ||
| 509 | base = page_address(page); | 515 | base = page_address(page); |
| 516 | object = kasan_reset_tag(object); | ||
| 510 | object = restore_red_left(s, object); | 517 | object = restore_red_left(s, object); |
| 511 | if (object < base || object >= base + page->objects * s->size || | 518 | if (object < base || object >= base + page->objects * s->size || |
| 512 | (object - base) % s->size) { | 519 | (object - base) % s->size) { |
| @@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
| 1075 | init_tracking(s, object); | 1082 | init_tracking(s, object); |
| 1076 | } | 1083 | } |
| 1077 | 1084 | ||
| 1085 | static void setup_page_debug(struct kmem_cache *s, void *addr, int order) | ||
| 1086 | { | ||
| 1087 | if (!(s->flags & SLAB_POISON)) | ||
| 1088 | return; | ||
| 1089 | |||
| 1090 | metadata_access_enable(); | ||
| 1091 | memset(addr, POISON_INUSE, PAGE_SIZE << order); | ||
| 1092 | metadata_access_disable(); | ||
| 1093 | } | ||
| 1094 | |||
| 1078 | static inline int alloc_consistency_checks(struct kmem_cache *s, | 1095 | static inline int alloc_consistency_checks(struct kmem_cache *s, |
| 1079 | struct page *page, | 1096 | struct page *page, |
| 1080 | void *object, unsigned long addr) | 1097 | void *object, unsigned long addr) |
| @@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, | |||
| 1330 | #else /* !CONFIG_SLUB_DEBUG */ | 1347 | #else /* !CONFIG_SLUB_DEBUG */ |
| 1331 | static inline void setup_object_debug(struct kmem_cache *s, | 1348 | static inline void setup_object_debug(struct kmem_cache *s, |
| 1332 | struct page *page, void *object) {} | 1349 | struct page *page, void *object) {} |
| 1350 | static inline void setup_page_debug(struct kmem_cache *s, | ||
| 1351 | void *addr, int order) {} | ||
| 1333 | 1352 | ||
| 1334 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1353 | static inline int alloc_debug_processing(struct kmem_cache *s, |
| 1335 | struct page *page, void *object, unsigned long addr) { return 0; } | 1354 | struct page *page, void *object, unsigned long addr) { return 0; } |
| @@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, | |||
| 1374 | */ | 1393 | */ |
| 1375 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | 1394 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
| 1376 | { | 1395 | { |
| 1396 | ptr = kasan_kmalloc_large(ptr, size, flags); | ||
| 1397 | /* As ptr might get tagged, call kmemleak hook after KASAN. */ | ||
| 1377 | kmemleak_alloc(ptr, size, 1, flags); | 1398 | kmemleak_alloc(ptr, size, 1, flags); |
| 1378 | return kasan_kmalloc_large(ptr, size, flags); | 1399 | return ptr; |
| 1379 | } | 1400 | } |
| 1380 | 1401 | ||
| 1381 | static __always_inline void kfree_hook(void *x) | 1402 | static __always_inline void kfree_hook(void *x) |
| @@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
| 1641 | if (page_is_pfmemalloc(page)) | 1662 | if (page_is_pfmemalloc(page)) |
| 1642 | SetPageSlabPfmemalloc(page); | 1663 | SetPageSlabPfmemalloc(page); |
| 1643 | 1664 | ||
| 1665 | kasan_poison_slab(page); | ||
| 1666 | |||
| 1644 | start = page_address(page); | 1667 | start = page_address(page); |
| 1645 | 1668 | ||
| 1646 | if (unlikely(s->flags & SLAB_POISON)) | 1669 | setup_page_debug(s, start, order); |
| 1647 | memset(start, POISON_INUSE, PAGE_SIZE << order); | ||
| 1648 | |||
| 1649 | kasan_poison_slab(page); | ||
| 1650 | 1670 | ||
| 1651 | shuffle = shuffle_freelist(s, page); | 1671 | shuffle = shuffle_freelist(s, page); |
| 1652 | 1672 | ||
| 1653 | if (!shuffle) { | 1673 | if (!shuffle) { |
| 1654 | for_each_object_idx(p, idx, s, start, page->objects) { | ||
| 1655 | if (likely(idx < page->objects)) { | ||
| 1656 | next = p + s->size; | ||
| 1657 | next = setup_object(s, page, next); | ||
| 1658 | set_freepointer(s, p, next); | ||
| 1659 | } else | ||
| 1660 | set_freepointer(s, p, NULL); | ||
| 1661 | } | ||
| 1662 | start = fixup_red_left(s, start); | 1674 | start = fixup_red_left(s, start); |
| 1663 | start = setup_object(s, page, start); | 1675 | start = setup_object(s, page, start); |
| 1664 | page->freelist = start; | 1676 | page->freelist = start; |
| 1677 | for (idx = 0, p = start; idx < page->objects - 1; idx++) { | ||
| 1678 | next = p + s->size; | ||
| 1679 | next = setup_object(s, page, next); | ||
| 1680 | set_freepointer(s, p, next); | ||
| 1681 | p = next; | ||
| 1682 | } | ||
| 1683 | set_freepointer(s, p, NULL); | ||
| 1665 | } | 1684 | } |
| 1666 | 1685 | ||
| 1667 | page->inuse = page->objects; | 1686 | page->inuse = page->objects; |
| @@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) | |||
| 320 | { | 320 | { |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | static bool need_activate_page_drain(int cpu) | ||
| 324 | { | ||
| 325 | return false; | ||
| 326 | } | ||
| 327 | |||
| 328 | void activate_page(struct page *page) | 323 | void activate_page(struct page *page) |
| 329 | { | 324 | { |
| 330 | struct zone *zone = page_zone(page); | 325 | struct zone *zone = page_zone(page); |
| @@ -653,13 +648,15 @@ void lru_add_drain(void) | |||
| 653 | put_cpu(); | 648 | put_cpu(); |
| 654 | } | 649 | } |
| 655 | 650 | ||
| 651 | #ifdef CONFIG_SMP | ||
| 652 | |||
| 653 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
| 654 | |||
| 656 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 655 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
| 657 | { | 656 | { |
| 658 | lru_add_drain(); | 657 | lru_add_drain(); |
| 659 | } | 658 | } |
| 660 | 659 | ||
| 661 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
| 662 | |||
| 663 | /* | 660 | /* |
| 664 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu | 661 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
| 665 | * kworkers being shut down before our page_alloc_cpu_dead callback is | 662 | * kworkers being shut down before our page_alloc_cpu_dead callback is |
| @@ -702,6 +699,12 @@ void lru_add_drain_all(void) | |||
| 702 | 699 | ||
| 703 | mutex_unlock(&lock); | 700 | mutex_unlock(&lock); |
| 704 | } | 701 | } |
| 702 | #else | ||
| 703 | void lru_add_drain_all(void) | ||
| 704 | { | ||
| 705 | lru_add_drain(); | ||
| 706 | } | ||
| 707 | #endif | ||
| 705 | 708 | ||
| 706 | /** | 709 | /** |
| 707 | * release_pages - batched put_page() | 710 | * release_pages - batched put_page() |
| @@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len) | |||
| 150 | { | 150 | { |
| 151 | void *p; | 151 | void *p; |
| 152 | 152 | ||
| 153 | p = kmalloc_track_caller(len, GFP_USER); | 153 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
| 154 | if (!p) | 154 | if (!p) |
| 155 | return ERR_PTR(-ENOMEM); | 155 | return ERR_PTR(-ENOMEM); |
| 156 | 156 | ||
