diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 38 | ||||
| -rw-r--r-- | lib/debugobjects.c | 10 | ||||
| -rw-r--r-- | lib/idr.c | 11 | ||||
| -rw-r--r-- | lib/iov_iter.c | 1 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
| -rw-r--r-- | lib/list_debug.c | 99 | ||||
| -rw-r--r-- | lib/locking-selftest.c | 66 | ||||
| -rw-r--r-- | lib/lockref.c | 2 | ||||
| -rw-r--r-- | lib/mpi/mpi-pow.c | 7 | ||||
| -rw-r--r-- | lib/nlattr.c | 2 | ||||
| -rw-r--r-- | lib/parser.c | 47 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 25 | ||||
| -rw-r--r-- | lib/radix-tree.c | 322 | ||||
| -rw-r--r-- | lib/raid6/avx2.c | 232 | ||||
| -rw-r--r-- | lib/rbtree.c | 23 | ||||
| -rw-r--r-- | lib/swiotlb.c | 81 | ||||
| -rw-r--r-- | lib/test_kasan.c | 29 |
17 files changed, 709 insertions, 292 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a6c8db1d62f6..e6327d102184 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -13,7 +13,22 @@ config PRINTK_TIME | |||
| 13 | be included, not that the timestamp is recorded. | 13 | be included, not that the timestamp is recorded. |
| 14 | 14 | ||
| 15 | The behavior is also controlled by the kernel command line | 15 | The behavior is also controlled by the kernel command line |
| 16 | parameter printk.time=1. See Documentation/kernel-parameters.txt | 16 | parameter printk.time=1. See Documentation/admin-guide/kernel-parameters.rst |
| 17 | |||
| 18 | config CONSOLE_LOGLEVEL_DEFAULT | ||
| 19 | int "Default console loglevel (1-15)" | ||
| 20 | range 1 15 | ||
| 21 | default "7" | ||
| 22 | help | ||
| 23 | Default loglevel to determine what will be printed on the console. | ||
| 24 | |||
| 25 | Setting a default here is equivalent to passing in loglevel=<x> in | ||
| 26 | the kernel bootargs. loglevel=<x> continues to override whatever | ||
| 27 | value is specified here as well. | ||
| 28 | |||
| 29 | Note: This does not affect the log level of un-prefixed prink() | ||
| 30 | usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT | ||
| 31 | option. | ||
| 17 | 32 | ||
| 18 | config MESSAGE_LOGLEVEL_DEFAULT | 33 | config MESSAGE_LOGLEVEL_DEFAULT |
| 19 | int "Default message log level (1-7)" | 34 | int "Default message log level (1-7)" |
| @@ -26,6 +41,10 @@ config MESSAGE_LOGLEVEL_DEFAULT | |||
| 26 | that are auditing their logs closely may want to set it to a lower | 41 | that are auditing their logs closely may want to set it to a lower |
| 27 | priority. | 42 | priority. |
| 28 | 43 | ||
| 44 | Note: This does not affect what message level gets printed on the console | ||
| 45 | by default. To change that, use loglevel=<x> in the kernel bootargs, | ||
| 46 | or pick a different CONSOLE_LOGLEVEL_DEFAULT configuration value. | ||
| 47 | |||
| 29 | config BOOT_PRINTK_DELAY | 48 | config BOOT_PRINTK_DELAY |
| 30 | bool "Delay each boot printk message by N milliseconds" | 49 | bool "Delay each boot printk message by N milliseconds" |
| 31 | depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY | 50 | depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY |
| @@ -1218,7 +1237,7 @@ config DEBUG_BUGVERBOSE | |||
| 1218 | 1237 | ||
| 1219 | config DEBUG_LIST | 1238 | config DEBUG_LIST |
| 1220 | bool "Debug linked list manipulation" | 1239 | bool "Debug linked list manipulation" |
| 1221 | depends on DEBUG_KERNEL | 1240 | depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION |
| 1222 | help | 1241 | help |
| 1223 | Enable this to turn on extended checks in the linked-list | 1242 | Enable this to turn on extended checks in the linked-list |
| 1224 | walking routines. | 1243 | walking routines. |
| @@ -1434,7 +1453,8 @@ config RCU_TRACE | |||
| 1434 | select TRACE_CLOCK | 1453 | select TRACE_CLOCK |
| 1435 | help | 1454 | help |
| 1436 | This option provides tracing in RCU which presents stats | 1455 | This option provides tracing in RCU which presents stats |
| 1437 | in debugfs for debugging RCU implementation. | 1456 | in debugfs for debugging RCU implementation. It also enables |
| 1457 | additional tracepoints for ftrace-style event tracing. | ||
| 1438 | 1458 | ||
| 1439 | Say Y here if you want to enable RCU tracing | 1459 | Say Y here if you want to enable RCU tracing |
| 1440 | Say N if you are unsure. | 1460 | Say N if you are unsure. |
| @@ -1964,6 +1984,16 @@ config TEST_STATIC_KEYS | |||
| 1964 | 1984 | ||
| 1965 | If unsure, say N. | 1985 | If unsure, say N. |
| 1966 | 1986 | ||
| 1987 | config BUG_ON_DATA_CORRUPTION | ||
| 1988 | bool "Trigger a BUG when data corruption is detected" | ||
| 1989 | select DEBUG_LIST | ||
| 1990 | help | ||
| 1991 | Select this option if the kernel should BUG when it encounters | ||
| 1992 | data corruption in kernel memory structures when they get checked | ||
| 1993 | for validity. | ||
| 1994 | |||
| 1995 | If unsure, say N. | ||
| 1996 | |||
| 1967 | source "samples/Kconfig" | 1997 | source "samples/Kconfig" |
| 1968 | 1998 | ||
| 1969 | source "lib/Kconfig.kgdb" | 1999 | source "lib/Kconfig.kgdb" |
| @@ -1975,7 +2005,7 @@ config ARCH_HAS_DEVMEM_IS_ALLOWED | |||
| 1975 | 2005 | ||
| 1976 | config STRICT_DEVMEM | 2006 | config STRICT_DEVMEM |
| 1977 | bool "Filter access to /dev/mem" | 2007 | bool "Filter access to /dev/mem" |
| 1978 | depends on MMU | 2008 | depends on MMU && DEVMEM |
| 1979 | depends on ARCH_HAS_DEVMEM_IS_ALLOWED | 2009 | depends on ARCH_HAS_DEVMEM_IS_ALLOWED |
| 1980 | default y if TILE || PPC | 2010 | default y if TILE || PPC |
| 1981 | ---help--- | 2011 | ---help--- |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a8e12601eb37..04c1ef717fe0 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj) | |||
| 199 | * initialized: | 199 | * initialized: |
| 200 | */ | 200 | */ |
| 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
| 202 | sched = keventd_up(); | 202 | sched = 1; |
| 203 | hlist_add_head(&obj->node, &obj_pool); | 203 | hlist_add_head(&obj->node, &obj_pool); |
| 204 | obj_pool_free++; | 204 | obj_pool_free++; |
| 205 | obj_pool_used--; | 205 | obj_pool_used--; |
| @@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr) | |||
| 362 | 362 | ||
| 363 | __debug_object_init(addr, descr, 0); | 363 | __debug_object_init(addr, descr, 0); |
| 364 | } | 364 | } |
| 365 | EXPORT_SYMBOL_GPL(debug_object_init); | ||
| 365 | 366 | ||
| 366 | /** | 367 | /** |
| 367 | * debug_object_init_on_stack - debug checks when an object on stack is | 368 | * debug_object_init_on_stack - debug checks when an object on stack is |
| @@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | |||
| 376 | 377 | ||
| 377 | __debug_object_init(addr, descr, 1); | 378 | __debug_object_init(addr, descr, 1); |
| 378 | } | 379 | } |
| 380 | EXPORT_SYMBOL_GPL(debug_object_init_on_stack); | ||
| 379 | 381 | ||
| 380 | /** | 382 | /** |
| 381 | * debug_object_activate - debug checks when an object is activated | 383 | * debug_object_activate - debug checks when an object is activated |
| @@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 449 | } | 451 | } |
| 450 | return 0; | 452 | return 0; |
| 451 | } | 453 | } |
| 454 | EXPORT_SYMBOL_GPL(debug_object_activate); | ||
| 452 | 455 | ||
| 453 | /** | 456 | /** |
| 454 | * debug_object_deactivate - debug checks when an object is deactivated | 457 | * debug_object_deactivate - debug checks when an object is deactivated |
| @@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 496 | 499 | ||
| 497 | raw_spin_unlock_irqrestore(&db->lock, flags); | 500 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 498 | } | 501 | } |
| 502 | EXPORT_SYMBOL_GPL(debug_object_deactivate); | ||
| 499 | 503 | ||
| 500 | /** | 504 | /** |
| 501 | * debug_object_destroy - debug checks when an object is destroyed | 505 | * debug_object_destroy - debug checks when an object is destroyed |
| @@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 542 | out_unlock: | 546 | out_unlock: |
| 543 | raw_spin_unlock_irqrestore(&db->lock, flags); | 547 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 544 | } | 548 | } |
| 549 | EXPORT_SYMBOL_GPL(debug_object_destroy); | ||
| 545 | 550 | ||
| 546 | /** | 551 | /** |
| 547 | * debug_object_free - debug checks when an object is freed | 552 | * debug_object_free - debug checks when an object is freed |
| @@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
| 582 | out_unlock: | 587 | out_unlock: |
| 583 | raw_spin_unlock_irqrestore(&db->lock, flags); | 588 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 584 | } | 589 | } |
| 590 | EXPORT_SYMBOL_GPL(debug_object_free); | ||
| 585 | 591 | ||
| 586 | /** | 592 | /** |
| 587 | * debug_object_assert_init - debug checks when object should be init-ed | 593 | * debug_object_assert_init - debug checks when object should be init-ed |
| @@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) | |||
| 626 | 632 | ||
| 627 | raw_spin_unlock_irqrestore(&db->lock, flags); | 633 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 628 | } | 634 | } |
| 635 | EXPORT_SYMBOL_GPL(debug_object_assert_init); | ||
| 629 | 636 | ||
| 630 | /** | 637 | /** |
| 631 | * debug_object_active_state - debug checks object usage state machine | 638 | * debug_object_active_state - debug checks object usage state machine |
| @@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
| 673 | 680 | ||
| 674 | raw_spin_unlock_irqrestore(&db->lock, flags); | 681 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 675 | } | 682 | } |
| 683 | EXPORT_SYMBOL_GPL(debug_object_active_state); | ||
| 676 | 684 | ||
| 677 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | 685 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
| 678 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 686 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
| @@ -927,6 +927,9 @@ EXPORT_SYMBOL(ida_pre_get); | |||
| 927 | * and go back to the ida_pre_get() call. If the ida is full, it will | 927 | * and go back to the ida_pre_get() call. If the ida is full, it will |
| 928 | * return %-ENOSPC. | 928 | * return %-ENOSPC. |
| 929 | * | 929 | * |
| 930 | * Note that callers must ensure that concurrent access to @ida is not possible. | ||
| 931 | * See ida_simple_get() for a varaint which takes care of locking. | ||
| 932 | * | ||
| 930 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. | 933 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
| 931 | */ | 934 | */ |
| 932 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 935 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
| @@ -1073,6 +1076,9 @@ EXPORT_SYMBOL(ida_destroy); | |||
| 1073 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. | 1076 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. |
| 1074 | * On memory allocation failure, returns -ENOMEM. | 1077 | * On memory allocation failure, returns -ENOMEM. |
| 1075 | * | 1078 | * |
| 1079 | * Compared to ida_get_new_above() this function does its own locking, and | ||
| 1080 | * should be used unless there are special requirements. | ||
| 1081 | * | ||
| 1076 | * Use ida_simple_remove() to get rid of an id. | 1082 | * Use ida_simple_remove() to get rid of an id. |
| 1077 | */ | 1083 | */ |
| 1078 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | 1084 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
| @@ -1119,6 +1125,11 @@ EXPORT_SYMBOL(ida_simple_get); | |||
| 1119 | * ida_simple_remove - remove an allocated id. | 1125 | * ida_simple_remove - remove an allocated id. |
| 1120 | * @ida: the (initialized) ida. | 1126 | * @ida: the (initialized) ida. |
| 1121 | * @id: the id returned by ida_simple_get. | 1127 | * @id: the id returned by ida_simple_get. |
| 1128 | * | ||
| 1129 | * Use to release an id allocated with ida_simple_get(). | ||
| 1130 | * | ||
| 1131 | * Compared to ida_remove() this function does its own locking, and should be | ||
| 1132 | * used unless there are special requirements. | ||
| 1122 | */ | 1133 | */ |
| 1123 | void ida_simple_remove(struct ida *ida, unsigned int id) | 1134 | void ida_simple_remove(struct ida *ida, unsigned int id) |
| 1124 | { | 1135 | { |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f2bd21b93dfc..691a52b634fe 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
| 2 | #include <linux/bvec.h> | ||
| 2 | #include <linux/uio.h> | 3 | #include <linux/uio.h> |
| 3 | #include <linux/pagemap.h> | 4 | #include <linux/pagemap.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index f6c2c1e7779c..9a2b811966eb 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -56,7 +56,7 @@ static const char *kobject_actions[] = { | |||
| 56 | * kobject_action_type - translate action string to numeric type | 56 | * kobject_action_type - translate action string to numeric type |
| 57 | * | 57 | * |
| 58 | * @buf: buffer containing the action string, newline is ignored | 58 | * @buf: buffer containing the action string, newline is ignored |
| 59 | * @len: length of buffer | 59 | * @count: length of buffer |
| 60 | * @type: pointer to the location to store the action type | 60 | * @type: pointer to the location to store the action type |
| 61 | * | 61 | * |
| 62 | * Returns 0 if the action string was recognized. | 62 | * Returns 0 if the action string was recognized. |
| @@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info) | |||
| 154 | /** | 154 | /** |
| 155 | * kobject_uevent_env - send an uevent with environmental data | 155 | * kobject_uevent_env - send an uevent with environmental data |
| 156 | * | 156 | * |
| 157 | * @action: action that is happening | ||
| 158 | * @kobj: struct kobject that the action is happening to | 157 | * @kobj: struct kobject that the action is happening to |
| 158 | * @action: action that is happening | ||
| 159 | * @envp_ext: pointer to environmental data | 159 | * @envp_ext: pointer to environmental data |
| 160 | * | 160 | * |
| 161 | * Returns 0 if kobject_uevent_env() is completed with success or the | 161 | * Returns 0 if kobject_uevent_env() is completed with success or the |
| @@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env); | |||
| 363 | /** | 363 | /** |
| 364 | * kobject_uevent - notify userspace by sending an uevent | 364 | * kobject_uevent - notify userspace by sending an uevent |
| 365 | * | 365 | * |
| 366 | * @action: action that is happening | ||
| 367 | * @kobj: struct kobject that the action is happening to | 366 | * @kobj: struct kobject that the action is happening to |
| 367 | * @action: action that is happening | ||
| 368 | * | 368 | * |
| 369 | * Returns 0 if kobject_uevent() is completed with success or the | 369 | * Returns 0 if kobject_uevent() is completed with success or the |
| 370 | * corresponding error when it fails. | 370 | * corresponding error when it fails. |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 3859bf63561c..7f7bfa55eb6d 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | * Copyright 2006, Red Hat, Inc., Dave Jones | 2 | * Copyright 2006, Red Hat, Inc., Dave Jones |
| 3 | * Released under the General Public License (GPL). | 3 | * Released under the General Public License (GPL). |
| 4 | * | 4 | * |
| 5 | * This file contains the linked list implementations for | 5 | * This file contains the linked list validation for DEBUG_LIST. |
| 6 | * DEBUG_LIST. | ||
| 7 | */ | 6 | */ |
| 8 | 7 | ||
| 9 | #include <linux/export.h> | 8 | #include <linux/export.h> |
| @@ -13,88 +12,48 @@ | |||
| 13 | #include <linux/rculist.h> | 12 | #include <linux/rculist.h> |
| 14 | 13 | ||
| 15 | /* | 14 | /* |
| 16 | * Insert a new entry between two known consecutive entries. | 15 | * Check that the data structures for the list manipulations are reasonably |
| 17 | * | 16 | * valid. Failures here indicate memory corruption (and possibly an exploit |
| 18 | * This is only for internal list manipulation where we know | 17 | * attempt). |
| 19 | * the prev/next entries already! | ||
| 20 | */ | 18 | */ |
| 21 | 19 | ||
| 22 | void __list_add(struct list_head *new, | 20 | bool __list_add_valid(struct list_head *new, struct list_head *prev, |
| 23 | struct list_head *prev, | 21 | struct list_head *next) |
| 24 | struct list_head *next) | ||
| 25 | { | 22 | { |
| 26 | WARN(next->prev != prev, | 23 | CHECK_DATA_CORRUPTION(next->prev != prev, |
| 27 | "list_add corruption. next->prev should be " | 24 | "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", |
| 28 | "prev (%p), but was %p. (next=%p).\n", | ||
| 29 | prev, next->prev, next); | 25 | prev, next->prev, next); |
| 30 | WARN(prev->next != next, | 26 | CHECK_DATA_CORRUPTION(prev->next != next, |
| 31 | "list_add corruption. prev->next should be " | 27 | "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", |
| 32 | "next (%p), but was %p. (prev=%p).\n", | ||
| 33 | next, prev->next, prev); | 28 | next, prev->next, prev); |
| 34 | WARN(new == prev || new == next, | 29 | CHECK_DATA_CORRUPTION(new == prev || new == next, |
| 35 | "list_add double add: new=%p, prev=%p, next=%p.\n", | 30 | "list_add double add: new=%p, prev=%p, next=%p.\n", |
| 36 | new, prev, next); | 31 | new, prev, next); |
| 37 | next->prev = new; | 32 | |
| 38 | new->next = next; | 33 | return true; |
| 39 | new->prev = prev; | ||
| 40 | WRITE_ONCE(prev->next, new); | ||
| 41 | } | 34 | } |
| 42 | EXPORT_SYMBOL(__list_add); | 35 | EXPORT_SYMBOL(__list_add_valid); |
| 43 | 36 | ||
| 44 | void __list_del_entry(struct list_head *entry) | 37 | bool __list_del_entry_valid(struct list_head *entry) |
| 45 | { | 38 | { |
| 46 | struct list_head *prev, *next; | 39 | struct list_head *prev, *next; |
| 47 | 40 | ||
| 48 | prev = entry->prev; | 41 | prev = entry->prev; |
| 49 | next = entry->next; | 42 | next = entry->next; |
| 50 | 43 | ||
| 51 | if (WARN(next == LIST_POISON1, | 44 | CHECK_DATA_CORRUPTION(next == LIST_POISON1, |
| 52 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", | 45 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", |
| 53 | entry, LIST_POISON1) || | 46 | entry, LIST_POISON1); |
| 54 | WARN(prev == LIST_POISON2, | 47 | CHECK_DATA_CORRUPTION(prev == LIST_POISON2, |
| 55 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", | 48 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", |
| 56 | entry, LIST_POISON2) || | 49 | entry, LIST_POISON2); |
| 57 | WARN(prev->next != entry, | 50 | CHECK_DATA_CORRUPTION(prev->next != entry, |
| 58 | "list_del corruption. prev->next should be %p, " | 51 | "list_del corruption. prev->next should be %p, but was %p\n", |
| 59 | "but was %p\n", entry, prev->next) || | 52 | entry, prev->next); |
| 60 | WARN(next->prev != entry, | 53 | CHECK_DATA_CORRUPTION(next->prev != entry, |
| 61 | "list_del corruption. next->prev should be %p, " | 54 | "list_del corruption. next->prev should be %p, but was %p\n", |
| 62 | "but was %p\n", entry, next->prev)) | 55 | entry, next->prev); |
| 63 | return; | 56 | return true; |
| 64 | |||
| 65 | __list_del(prev, next); | ||
| 66 | } | ||
| 67 | EXPORT_SYMBOL(__list_del_entry); | ||
| 68 | 57 | ||
| 69 | /** | ||
| 70 | * list_del - deletes entry from list. | ||
| 71 | * @entry: the element to delete from the list. | ||
| 72 | * Note: list_empty on entry does not return true after this, the entry is | ||
| 73 | * in an undefined state. | ||
| 74 | */ | ||
| 75 | void list_del(struct list_head *entry) | ||
| 76 | { | ||
| 77 | __list_del_entry(entry); | ||
| 78 | entry->next = LIST_POISON1; | ||
| 79 | entry->prev = LIST_POISON2; | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(list_del); | ||
| 82 | |||
| 83 | /* | ||
| 84 | * RCU variants. | ||
| 85 | */ | ||
| 86 | void __list_add_rcu(struct list_head *new, | ||
| 87 | struct list_head *prev, struct list_head *next) | ||
| 88 | { | ||
| 89 | WARN(next->prev != prev, | ||
| 90 | "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", | ||
| 91 | prev, next->prev, next); | ||
| 92 | WARN(prev->next != next, | ||
| 93 | "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", | ||
| 94 | next, prev->next, prev); | ||
| 95 | new->next = next; | ||
| 96 | new->prev = prev; | ||
| 97 | rcu_assign_pointer(list_next_rcu(prev), new); | ||
| 98 | next->prev = new; | ||
| 99 | } | 58 | } |
| 100 | EXPORT_SYMBOL(__list_add_rcu); | 59 | EXPORT_SYMBOL(__list_del_entry_valid); |
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 872a15a2a637..f3a217ea0388 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
| @@ -980,23 +980,23 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) | |||
| 980 | #ifndef CONFIG_PROVE_LOCKING | 980 | #ifndef CONFIG_PROVE_LOCKING |
| 981 | if (expected == FAILURE && debug_locks) { | 981 | if (expected == FAILURE && debug_locks) { |
| 982 | expected_testcase_failures++; | 982 | expected_testcase_failures++; |
| 983 | printk("failed|"); | 983 | pr_cont("failed|"); |
| 984 | } | 984 | } |
| 985 | else | 985 | else |
| 986 | #endif | 986 | #endif |
| 987 | if (debug_locks != expected) { | 987 | if (debug_locks != expected) { |
| 988 | unexpected_testcase_failures++; | 988 | unexpected_testcase_failures++; |
| 989 | printk("FAILED|"); | 989 | pr_cont("FAILED|"); |
| 990 | 990 | ||
| 991 | dump_stack(); | 991 | dump_stack(); |
| 992 | } else { | 992 | } else { |
| 993 | testcase_successes++; | 993 | testcase_successes++; |
| 994 | printk(" ok |"); | 994 | pr_cont(" ok |"); |
| 995 | } | 995 | } |
| 996 | testcase_total++; | 996 | testcase_total++; |
| 997 | 997 | ||
| 998 | if (debug_locks_verbose) | 998 | if (debug_locks_verbose) |
| 999 | printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", | 999 | pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n", |
| 1000 | lockclass_mask, debug_locks, expected); | 1000 | lockclass_mask, debug_locks, expected); |
| 1001 | /* | 1001 | /* |
| 1002 | * Some tests (e.g. double-unlock) might corrupt the preemption | 1002 | * Some tests (e.g. double-unlock) might corrupt the preemption |
| @@ -1021,26 +1021,26 @@ static inline void print_testname(const char *testname) | |||
| 1021 | #define DO_TESTCASE_1(desc, name, nr) \ | 1021 | #define DO_TESTCASE_1(desc, name, nr) \ |
| 1022 | print_testname(desc"/"#nr); \ | 1022 | print_testname(desc"/"#nr); \ |
| 1023 | dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | 1023 | dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ |
| 1024 | printk("\n"); | 1024 | pr_cont("\n"); |
| 1025 | 1025 | ||
| 1026 | #define DO_TESTCASE_1B(desc, name, nr) \ | 1026 | #define DO_TESTCASE_1B(desc, name, nr) \ |
| 1027 | print_testname(desc"/"#nr); \ | 1027 | print_testname(desc"/"#nr); \ |
| 1028 | dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | 1028 | dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ |
| 1029 | printk("\n"); | 1029 | pr_cont("\n"); |
| 1030 | 1030 | ||
| 1031 | #define DO_TESTCASE_3(desc, name, nr) \ | 1031 | #define DO_TESTCASE_3(desc, name, nr) \ |
| 1032 | print_testname(desc"/"#nr); \ | 1032 | print_testname(desc"/"#nr); \ |
| 1033 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ | 1033 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ |
| 1034 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | 1034 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ |
| 1035 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | 1035 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ |
| 1036 | printk("\n"); | 1036 | pr_cont("\n"); |
| 1037 | 1037 | ||
| 1038 | #define DO_TESTCASE_3RW(desc, name, nr) \ | 1038 | #define DO_TESTCASE_3RW(desc, name, nr) \ |
| 1039 | print_testname(desc"/"#nr); \ | 1039 | print_testname(desc"/"#nr); \ |
| 1040 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ | 1040 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ |
| 1041 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | 1041 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ |
| 1042 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | 1042 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ |
| 1043 | printk("\n"); | 1043 | pr_cont("\n"); |
| 1044 | 1044 | ||
| 1045 | #define DO_TESTCASE_6(desc, name) \ | 1045 | #define DO_TESTCASE_6(desc, name) \ |
| 1046 | print_testname(desc); \ | 1046 | print_testname(desc); \ |
| @@ -1050,7 +1050,7 @@ static inline void print_testname(const char *testname) | |||
| 1050 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | 1050 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ |
| 1051 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | 1051 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1052 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | 1052 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1053 | printk("\n"); | 1053 | pr_cont("\n"); |
| 1054 | 1054 | ||
| 1055 | #define DO_TESTCASE_6_SUCCESS(desc, name) \ | 1055 | #define DO_TESTCASE_6_SUCCESS(desc, name) \ |
| 1056 | print_testname(desc); \ | 1056 | print_testname(desc); \ |
| @@ -1060,7 +1060,7 @@ static inline void print_testname(const char *testname) | |||
| 1060 | dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ | 1060 | dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ |
| 1061 | dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ | 1061 | dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ |
| 1062 | dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ | 1062 | dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ |
| 1063 | printk("\n"); | 1063 | pr_cont("\n"); |
| 1064 | 1064 | ||
| 1065 | /* | 1065 | /* |
| 1066 | * 'read' variant: rlocks must not trigger. | 1066 | * 'read' variant: rlocks must not trigger. |
| @@ -1073,7 +1073,7 @@ static inline void print_testname(const char *testname) | |||
| 1073 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | 1073 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ |
| 1074 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | 1074 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1075 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | 1075 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1076 | printk("\n"); | 1076 | pr_cont("\n"); |
| 1077 | 1077 | ||
| 1078 | #define DO_TESTCASE_2I(desc, name, nr) \ | 1078 | #define DO_TESTCASE_2I(desc, name, nr) \ |
| 1079 | DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ | 1079 | DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ |
| @@ -1726,25 +1726,25 @@ static void ww_tests(void) | |||
| 1726 | dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW); | 1726 | dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW); |
| 1727 | dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW); | 1727 | dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW); |
| 1728 | dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW); | 1728 | dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW); |
| 1729 | printk("\n"); | 1729 | pr_cont("\n"); |
| 1730 | 1730 | ||
| 1731 | print_testname("ww contexts mixing"); | 1731 | print_testname("ww contexts mixing"); |
| 1732 | dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW); | 1732 | dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW); |
| 1733 | dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW); | 1733 | dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW); |
| 1734 | printk("\n"); | 1734 | pr_cont("\n"); |
| 1735 | 1735 | ||
| 1736 | print_testname("finishing ww context"); | 1736 | print_testname("finishing ww context"); |
| 1737 | dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW); | 1737 | dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW); |
| 1738 | dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW); | 1738 | dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW); |
| 1739 | dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW); | 1739 | dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW); |
| 1740 | dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW); | 1740 | dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW); |
| 1741 | printk("\n"); | 1741 | pr_cont("\n"); |
| 1742 | 1742 | ||
| 1743 | print_testname("locking mismatches"); | 1743 | print_testname("locking mismatches"); |
| 1744 | dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW); | 1744 | dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW); |
| 1745 | dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW); | 1745 | dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW); |
| 1746 | dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW); | 1746 | dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW); |
| 1747 | printk("\n"); | 1747 | pr_cont("\n"); |
| 1748 | 1748 | ||
| 1749 | print_testname("EDEADLK handling"); | 1749 | print_testname("EDEADLK handling"); |
| 1750 | dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW); | 1750 | dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW); |
| @@ -1757,11 +1757,11 @@ static void ww_tests(void) | |||
| 1757 | dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW); | 1757 | dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW); |
| 1758 | dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW); | 1758 | dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW); |
| 1759 | dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW); | 1759 | dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW); |
| 1760 | printk("\n"); | 1760 | pr_cont("\n"); |
| 1761 | 1761 | ||
| 1762 | print_testname("spinlock nest unlocked"); | 1762 | print_testname("spinlock nest unlocked"); |
| 1763 | dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW); | 1763 | dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW); |
| 1764 | printk("\n"); | 1764 | pr_cont("\n"); |
| 1765 | 1765 | ||
| 1766 | printk(" -----------------------------------------------------\n"); | 1766 | printk(" -----------------------------------------------------\n"); |
| 1767 | printk(" |block | try |context|\n"); | 1767 | printk(" |block | try |context|\n"); |
| @@ -1771,25 +1771,25 @@ static void ww_tests(void) | |||
| 1771 | dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW); | 1771 | dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW); |
| 1772 | dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW); | 1772 | dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW); |
| 1773 | dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW); | 1773 | dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW); |
| 1774 | printk("\n"); | 1774 | pr_cont("\n"); |
| 1775 | 1775 | ||
| 1776 | print_testname("try"); | 1776 | print_testname("try"); |
| 1777 | dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW); | 1777 | dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW); |
| 1778 | dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW); | 1778 | dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW); |
| 1779 | dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW); | 1779 | dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW); |
| 1780 | printk("\n"); | 1780 | pr_cont("\n"); |
| 1781 | 1781 | ||
| 1782 | print_testname("block"); | 1782 | print_testname("block"); |
| 1783 | dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW); | 1783 | dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW); |
| 1784 | dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW); | 1784 | dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW); |
| 1785 | dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW); | 1785 | dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW); |
| 1786 | printk("\n"); | 1786 | pr_cont("\n"); |
| 1787 | 1787 | ||
| 1788 | print_testname("spinlock"); | 1788 | print_testname("spinlock"); |
| 1789 | dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW); | 1789 | dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW); |
| 1790 | dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW); | 1790 | dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW); |
| 1791 | dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW); | 1791 | dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW); |
| 1792 | printk("\n"); | 1792 | pr_cont("\n"); |
| 1793 | } | 1793 | } |
| 1794 | 1794 | ||
| 1795 | void locking_selftest(void) | 1795 | void locking_selftest(void) |
| @@ -1829,32 +1829,32 @@ void locking_selftest(void) | |||
| 1829 | 1829 | ||
| 1830 | printk(" --------------------------------------------------------------------------\n"); | 1830 | printk(" --------------------------------------------------------------------------\n"); |
| 1831 | print_testname("recursive read-lock"); | 1831 | print_testname("recursive read-lock"); |
| 1832 | printk(" |"); | 1832 | pr_cont(" |"); |
| 1833 | dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); | 1833 | dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); |
| 1834 | printk(" |"); | 1834 | pr_cont(" |"); |
| 1835 | dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); | 1835 | dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); |
| 1836 | printk("\n"); | 1836 | pr_cont("\n"); |
| 1837 | 1837 | ||
| 1838 | print_testname("recursive read-lock #2"); | 1838 | print_testname("recursive read-lock #2"); |
| 1839 | printk(" |"); | 1839 | pr_cont(" |"); |
| 1840 | dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); | 1840 | dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); |
| 1841 | printk(" |"); | 1841 | pr_cont(" |"); |
| 1842 | dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); | 1842 | dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); |
| 1843 | printk("\n"); | 1843 | pr_cont("\n"); |
| 1844 | 1844 | ||
| 1845 | print_testname("mixed read-write-lock"); | 1845 | print_testname("mixed read-write-lock"); |
| 1846 | printk(" |"); | 1846 | pr_cont(" |"); |
| 1847 | dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); | 1847 | dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); |
| 1848 | printk(" |"); | 1848 | pr_cont(" |"); |
| 1849 | dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); | 1849 | dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); |
| 1850 | printk("\n"); | 1850 | pr_cont("\n"); |
| 1851 | 1851 | ||
| 1852 | print_testname("mixed write-read-lock"); | 1852 | print_testname("mixed write-read-lock"); |
| 1853 | printk(" |"); | 1853 | pr_cont(" |"); |
| 1854 | dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); | 1854 | dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); |
| 1855 | printk(" |"); | 1855 | pr_cont(" |"); |
| 1856 | dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); | 1856 | dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); |
| 1857 | printk("\n"); | 1857 | pr_cont("\n"); |
| 1858 | 1858 | ||
| 1859 | printk(" --------------------------------------------------------------------------\n"); | 1859 | printk(" --------------------------------------------------------------------------\n"); |
| 1860 | 1860 | ||
diff --git a/lib/lockref.c b/lib/lockref.c index 5a92189ad711..c4bfcb8836cd 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | if (likely(old.lock_count == prev.lock_count)) { \ | 20 | if (likely(old.lock_count == prev.lock_count)) { \ |
| 21 | SUCCESS; \ | 21 | SUCCESS; \ |
| 22 | } \ | 22 | } \ |
| 23 | cpu_relax_lowlatency(); \ | 23 | cpu_relax(); \ |
| 24 | } \ | 24 | } \ |
| 25 | } while (0) | 25 | } while (0) |
| 26 | 26 | ||
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 5464c8744ea9..e24388a863a7 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c | |||
| @@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) | |||
| 64 | if (!esize) { | 64 | if (!esize) { |
| 65 | /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 | 65 | /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 |
| 66 | * depending on if MOD equals 1. */ | 66 | * depending on if MOD equals 1. */ |
| 67 | rp[0] = 1; | ||
| 68 | res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; | 67 | res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; |
| 68 | if (res->nlimbs) { | ||
| 69 | if (mpi_resize(res, 1) < 0) | ||
| 70 | goto enomem; | ||
| 71 | rp = res->d; | ||
| 72 | rp[0] = 1; | ||
| 73 | } | ||
| 69 | res->sign = 0; | 74 | res->sign = 0; |
| 70 | goto leave; | 75 | goto leave; |
| 71 | } | 76 | } |
diff --git a/lib/nlattr.c b/lib/nlattr.c index fce1e9afc6d9..b42b8577fc23 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <net/netlink.h> | 15 | #include <net/netlink.h> |
| 16 | 16 | ||
| 17 | static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { | 17 | static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { |
| 18 | [NLA_U8] = sizeof(u8), | 18 | [NLA_U8] = sizeof(u8), |
| 19 | [NLA_U16] = sizeof(u16), | 19 | [NLA_U16] = sizeof(u16), |
| 20 | [NLA_U32] = sizeof(u32), | 20 | [NLA_U32] = sizeof(u32), |
diff --git a/lib/parser.c b/lib/parser.c index b6d11631231b..3278958b472a 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
| @@ -152,6 +152,36 @@ static int match_number(substring_t *s, int *result, int base) | |||
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /** | 154 | /** |
| 155 | * match_u64int: scan a number in the given base from a substring_t | ||
| 156 | * @s: substring to be scanned | ||
| 157 | * @result: resulting u64 on success | ||
| 158 | * @base: base to use when converting string | ||
| 159 | * | ||
| 160 | * Description: Given a &substring_t and a base, attempts to parse the substring | ||
| 161 | * as a number in that base. On success, sets @result to the integer represented | ||
| 162 | * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. | ||
| 163 | */ | ||
| 164 | static int match_u64int(substring_t *s, u64 *result, int base) | ||
| 165 | { | ||
| 166 | char *buf; | ||
| 167 | int ret; | ||
| 168 | u64 val; | ||
| 169 | size_t len = s->to - s->from; | ||
| 170 | |||
| 171 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
| 172 | if (!buf) | ||
| 173 | return -ENOMEM; | ||
| 174 | memcpy(buf, s->from, len); | ||
| 175 | buf[len] = '\0'; | ||
| 176 | |||
| 177 | ret = kstrtoull(buf, base, &val); | ||
| 178 | if (!ret) | ||
| 179 | *result = val; | ||
| 180 | kfree(buf); | ||
| 181 | return ret; | ||
| 182 | } | ||
| 183 | |||
| 184 | /** | ||
| 155 | * match_int: - scan a decimal representation of an integer from a substring_t | 185 | * match_int: - scan a decimal representation of an integer from a substring_t |
| 156 | * @s: substring_t to be scanned | 186 | * @s: substring_t to be scanned |
| 157 | * @result: resulting integer on success | 187 | * @result: resulting integer on success |
| @@ -167,6 +197,23 @@ int match_int(substring_t *s, int *result) | |||
| 167 | EXPORT_SYMBOL(match_int); | 197 | EXPORT_SYMBOL(match_int); |
| 168 | 198 | ||
| 169 | /** | 199 | /** |
| 200 | * match_u64: - scan a decimal representation of a u64 from | ||
| 201 | * a substring_t | ||
| 202 | * @s: substring_t to be scanned | ||
| 203 | * @result: resulting unsigned long long on success | ||
| 204 | * | ||
| 205 | * Description: Attempts to parse the &substring_t @s as a long decimal | ||
| 206 | * integer. On success, sets @result to the integer represented by the | ||
| 207 | * string and returns 0. | ||
| 208 | * Returns -ENOMEM, -EINVAL, or -ERANGE on failure. | ||
| 209 | */ | ||
| 210 | int match_u64(substring_t *s, u64 *result) | ||
| 211 | { | ||
| 212 | return match_u64int(s, result, 0); | ||
| 213 | } | ||
| 214 | EXPORT_SYMBOL(match_u64); | ||
| 215 | |||
| 216 | /** | ||
| 170 | * match_octal: - scan an octal representation of an integer from a substring_t | 217 | * match_octal: - scan an octal representation of an integer from a substring_t |
| 171 | * @s: substring_t to be scanned | 218 | * @s: substring_t to be scanned |
| 172 | * @result: resulting integer on success | 219 | * @result: resulting integer on success |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 72d36113ccaa..c8cebb137076 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -158,25 +158,21 @@ EXPORT_SYMBOL(percpu_counter_destroy); | |||
| 158 | int percpu_counter_batch __read_mostly = 32; | 158 | int percpu_counter_batch __read_mostly = 32; |
| 159 | EXPORT_SYMBOL(percpu_counter_batch); | 159 | EXPORT_SYMBOL(percpu_counter_batch); |
| 160 | 160 | ||
| 161 | static void compute_batch_value(void) | 161 | static int compute_batch_value(unsigned int cpu) |
| 162 | { | 162 | { |
| 163 | int nr = num_online_cpus(); | 163 | int nr = num_online_cpus(); |
| 164 | 164 | ||
| 165 | percpu_counter_batch = max(32, nr*2); | 165 | percpu_counter_batch = max(32, nr*2); |
| 166 | return 0; | ||
| 166 | } | 167 | } |
| 167 | 168 | ||
| 168 | static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | 169 | static int percpu_counter_cpu_dead(unsigned int cpu) |
| 169 | unsigned long action, void *hcpu) | ||
| 170 | { | 170 | { |
| 171 | #ifdef CONFIG_HOTPLUG_CPU | 171 | #ifdef CONFIG_HOTPLUG_CPU |
| 172 | unsigned int cpu; | ||
| 173 | struct percpu_counter *fbc; | 172 | struct percpu_counter *fbc; |
| 174 | 173 | ||
| 175 | compute_batch_value(); | 174 | compute_batch_value(cpu); |
| 176 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) | ||
| 177 | return NOTIFY_OK; | ||
| 178 | 175 | ||
| 179 | cpu = (unsigned long)hcpu; | ||
| 180 | spin_lock_irq(&percpu_counters_lock); | 176 | spin_lock_irq(&percpu_counters_lock); |
| 181 | list_for_each_entry(fbc, &percpu_counters, list) { | 177 | list_for_each_entry(fbc, &percpu_counters, list) { |
| 182 | s32 *pcount; | 178 | s32 *pcount; |
| @@ -190,7 +186,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
| 190 | } | 186 | } |
| 191 | spin_unlock_irq(&percpu_counters_lock); | 187 | spin_unlock_irq(&percpu_counters_lock); |
| 192 | #endif | 188 | #endif |
| 193 | return NOTIFY_OK; | 189 | return 0; |
| 194 | } | 190 | } |
| 195 | 191 | ||
| 196 | /* | 192 | /* |
| @@ -222,8 +218,15 @@ EXPORT_SYMBOL(__percpu_counter_compare); | |||
| 222 | 218 | ||
| 223 | static int __init percpu_counter_startup(void) | 219 | static int __init percpu_counter_startup(void) |
| 224 | { | 220 | { |
| 225 | compute_batch_value(); | 221 | int ret; |
| 226 | hotcpu_notifier(percpu_counter_hotcpu_callback, 0); | 222 | |
| 223 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online", | ||
| 224 | compute_batch_value, NULL); | ||
| 225 | WARN_ON(ret < 0); | ||
| 226 | ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD, | ||
| 227 | "lib/percpu_cnt:dead", NULL, | ||
| 228 | percpu_counter_cpu_dead); | ||
| 229 | WARN_ON(ret < 0); | ||
| 227 | return 0; | 230 | return 0; |
| 228 | } | 231 | } |
| 229 | module_init(percpu_counter_startup); | 232 | module_init(percpu_counter_startup); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8e6d552c40dd..2e8c6f7aa56e 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -220,10 +220,10 @@ static void dump_node(struct radix_tree_node *node, unsigned long index) | |||
| 220 | { | 220 | { |
| 221 | unsigned long i; | 221 | unsigned long i; |
| 222 | 222 | ||
| 223 | pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n", | 223 | pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d exceptional %d parent %p\n", |
| 224 | node, node->offset, | 224 | node, node->offset, |
| 225 | node->tags[0][0], node->tags[1][0], node->tags[2][0], | 225 | node->tags[0][0], node->tags[1][0], node->tags[2][0], |
| 226 | node->shift, node->count, node->parent); | 226 | node->shift, node->count, node->exceptional, node->parent); |
| 227 | 227 | ||
| 228 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | 228 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { |
| 229 | unsigned long first = index | (i << node->shift); | 229 | unsigned long first = index | (i << node->shift); |
| @@ -325,7 +325,6 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
| 325 | tag_clear(node, i, 0); | 325 | tag_clear(node, i, 0); |
| 326 | 326 | ||
| 327 | node->slots[0] = NULL; | 327 | node->slots[0] = NULL; |
| 328 | node->count = 0; | ||
| 329 | 328 | ||
| 330 | kmem_cache_free(radix_tree_node_cachep, node); | 329 | kmem_cache_free(radix_tree_node_cachep, node); |
| 331 | } | 330 | } |
| @@ -522,8 +521,13 @@ static int radix_tree_extend(struct radix_tree_root *root, | |||
| 522 | node->offset = 0; | 521 | node->offset = 0; |
| 523 | node->count = 1; | 522 | node->count = 1; |
| 524 | node->parent = NULL; | 523 | node->parent = NULL; |
| 525 | if (radix_tree_is_internal_node(slot)) | 524 | if (radix_tree_is_internal_node(slot)) { |
| 526 | entry_to_node(slot)->parent = node; | 525 | entry_to_node(slot)->parent = node; |
| 526 | } else { | ||
| 527 | /* Moving an exceptional root->rnode to a node */ | ||
| 528 | if (radix_tree_exceptional_entry(slot)) | ||
| 529 | node->exceptional = 1; | ||
| 530 | } | ||
| 527 | node->slots[0] = slot; | 531 | node->slots[0] = slot; |
| 528 | slot = node_to_entry(node); | 532 | slot = node_to_entry(node); |
| 529 | rcu_assign_pointer(root->rnode, slot); | 533 | rcu_assign_pointer(root->rnode, slot); |
| @@ -534,6 +538,104 @@ out: | |||
| 534 | } | 538 | } |
| 535 | 539 | ||
| 536 | /** | 540 | /** |
| 541 | * radix_tree_shrink - shrink radix tree to minimum height | ||
| 542 | * @root radix tree root | ||
| 543 | */ | ||
| 544 | static inline void radix_tree_shrink(struct radix_tree_root *root, | ||
| 545 | radix_tree_update_node_t update_node, | ||
| 546 | void *private) | ||
| 547 | { | ||
| 548 | for (;;) { | ||
| 549 | struct radix_tree_node *node = root->rnode; | ||
| 550 | struct radix_tree_node *child; | ||
| 551 | |||
| 552 | if (!radix_tree_is_internal_node(node)) | ||
| 553 | break; | ||
| 554 | node = entry_to_node(node); | ||
| 555 | |||
| 556 | /* | ||
| 557 | * The candidate node has more than one child, or its child | ||
| 558 | * is not at the leftmost slot, or the child is a multiorder | ||
| 559 | * entry, we cannot shrink. | ||
| 560 | */ | ||
| 561 | if (node->count != 1) | ||
| 562 | break; | ||
| 563 | child = node->slots[0]; | ||
| 564 | if (!child) | ||
| 565 | break; | ||
| 566 | if (!radix_tree_is_internal_node(child) && node->shift) | ||
| 567 | break; | ||
| 568 | |||
| 569 | if (radix_tree_is_internal_node(child)) | ||
| 570 | entry_to_node(child)->parent = NULL; | ||
| 571 | |||
| 572 | /* | ||
| 573 | * We don't need rcu_assign_pointer(), since we are simply | ||
| 574 | * moving the node from one part of the tree to another: if it | ||
| 575 | * was safe to dereference the old pointer to it | ||
| 576 | * (node->slots[0]), it will be safe to dereference the new | ||
| 577 | * one (root->rnode) as far as dependent read barriers go. | ||
| 578 | */ | ||
| 579 | root->rnode = child; | ||
| 580 | |||
| 581 | /* | ||
| 582 | * We have a dilemma here. The node's slot[0] must not be | ||
| 583 | * NULLed in case there are concurrent lookups expecting to | ||
| 584 | * find the item. However if this was a bottom-level node, | ||
| 585 | * then it may be subject to the slot pointer being visible | ||
| 586 | * to callers dereferencing it. If item corresponding to | ||
| 587 | * slot[0] is subsequently deleted, these callers would expect | ||
| 588 | * their slot to become empty sooner or later. | ||
| 589 | * | ||
| 590 | * For example, lockless pagecache will look up a slot, deref | ||
| 591 | * the page pointer, and if the page has 0 refcount it means it | ||
| 592 | * was concurrently deleted from pagecache so try the deref | ||
| 593 | * again. Fortunately there is already a requirement for logic | ||
| 594 | * to retry the entire slot lookup -- the indirect pointer | ||
| 595 | * problem (replacing direct root node with an indirect pointer | ||
| 596 | * also results in a stale slot). So tag the slot as indirect | ||
| 597 | * to force callers to retry. | ||
| 598 | */ | ||
| 599 | node->count = 0; | ||
| 600 | if (!radix_tree_is_internal_node(child)) { | ||
| 601 | node->slots[0] = RADIX_TREE_RETRY; | ||
| 602 | if (update_node) | ||
| 603 | update_node(node, private); | ||
| 604 | } | ||
| 605 | |||
| 606 | radix_tree_node_free(node); | ||
| 607 | } | ||
| 608 | } | ||
| 609 | |||
| 610 | static void delete_node(struct radix_tree_root *root, | ||
| 611 | struct radix_tree_node *node, | ||
| 612 | radix_tree_update_node_t update_node, void *private) | ||
| 613 | { | ||
| 614 | do { | ||
| 615 | struct radix_tree_node *parent; | ||
| 616 | |||
| 617 | if (node->count) { | ||
| 618 | if (node == entry_to_node(root->rnode)) | ||
| 619 | radix_tree_shrink(root, update_node, private); | ||
| 620 | return; | ||
| 621 | } | ||
| 622 | |||
| 623 | parent = node->parent; | ||
| 624 | if (parent) { | ||
| 625 | parent->slots[node->offset] = NULL; | ||
| 626 | parent->count--; | ||
| 627 | } else { | ||
| 628 | root_tag_clear_all(root); | ||
| 629 | root->rnode = NULL; | ||
| 630 | } | ||
| 631 | |||
| 632 | radix_tree_node_free(node); | ||
| 633 | |||
| 634 | node = parent; | ||
| 635 | } while (node); | ||
| 636 | } | ||
| 637 | |||
| 638 | /** | ||
| 537 | * __radix_tree_create - create a slot in a radix tree | 639 | * __radix_tree_create - create a slot in a radix tree |
| 538 | * @root: radix tree root | 640 | * @root: radix tree root |
| 539 | * @index: index key | 641 | * @index: index key |
| @@ -649,6 +751,8 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, | |||
| 649 | if (node) { | 751 | if (node) { |
| 650 | unsigned offset = get_slot_offset(node, slot); | 752 | unsigned offset = get_slot_offset(node, slot); |
| 651 | node->count++; | 753 | node->count++; |
| 754 | if (radix_tree_exceptional_entry(item)) | ||
| 755 | node->exceptional++; | ||
| 652 | BUG_ON(tag_get(node, 0, offset)); | 756 | BUG_ON(tag_get(node, 0, offset)); |
| 653 | BUG_ON(tag_get(node, 1, offset)); | 757 | BUG_ON(tag_get(node, 1, offset)); |
| 654 | BUG_ON(tag_get(node, 2, offset)); | 758 | BUG_ON(tag_get(node, 2, offset)); |
| @@ -746,6 +850,85 @@ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | |||
| 746 | } | 850 | } |
| 747 | EXPORT_SYMBOL(radix_tree_lookup); | 851 | EXPORT_SYMBOL(radix_tree_lookup); |
| 748 | 852 | ||
| 853 | static void replace_slot(struct radix_tree_root *root, | ||
| 854 | struct radix_tree_node *node, | ||
| 855 | void **slot, void *item, | ||
| 856 | bool warn_typeswitch) | ||
| 857 | { | ||
| 858 | void *old = rcu_dereference_raw(*slot); | ||
| 859 | int count, exceptional; | ||
| 860 | |||
| 861 | WARN_ON_ONCE(radix_tree_is_internal_node(item)); | ||
| 862 | |||
| 863 | count = !!item - !!old; | ||
| 864 | exceptional = !!radix_tree_exceptional_entry(item) - | ||
| 865 | !!radix_tree_exceptional_entry(old); | ||
| 866 | |||
| 867 | WARN_ON_ONCE(warn_typeswitch && (count || exceptional)); | ||
| 868 | |||
| 869 | if (node) { | ||
| 870 | node->count += count; | ||
| 871 | node->exceptional += exceptional; | ||
| 872 | } | ||
| 873 | |||
| 874 | rcu_assign_pointer(*slot, item); | ||
| 875 | } | ||
| 876 | |||
| 877 | /** | ||
| 878 | * __radix_tree_replace - replace item in a slot | ||
| 879 | * @root: radix tree root | ||
| 880 | * @node: pointer to tree node | ||
| 881 | * @slot: pointer to slot in @node | ||
| 882 | * @item: new item to store in the slot. | ||
| 883 | * @update_node: callback for changing leaf nodes | ||
| 884 | * @private: private data to pass to @update_node | ||
| 885 | * | ||
| 886 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | ||
| 887 | * across slot lookup and replacement. | ||
| 888 | */ | ||
| 889 | void __radix_tree_replace(struct radix_tree_root *root, | ||
| 890 | struct radix_tree_node *node, | ||
| 891 | void **slot, void *item, | ||
| 892 | radix_tree_update_node_t update_node, void *private) | ||
| 893 | { | ||
| 894 | /* | ||
| 895 | * This function supports replacing exceptional entries and | ||
| 896 | * deleting entries, but that needs accounting against the | ||
| 897 | * node unless the slot is root->rnode. | ||
| 898 | */ | ||
| 899 | replace_slot(root, node, slot, item, | ||
| 900 | !node && slot != (void **)&root->rnode); | ||
| 901 | |||
| 902 | if (!node) | ||
| 903 | return; | ||
| 904 | |||
| 905 | if (update_node) | ||
| 906 | update_node(node, private); | ||
| 907 | |||
| 908 | delete_node(root, node, update_node, private); | ||
| 909 | } | ||
| 910 | |||
| 911 | /** | ||
| 912 | * radix_tree_replace_slot - replace item in a slot | ||
| 913 | * @root: radix tree root | ||
| 914 | * @slot: pointer to slot | ||
| 915 | * @item: new item to store in the slot. | ||
| 916 | * | ||
| 917 | * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), | ||
| 918 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked | ||
| 919 | * across slot lookup and replacement. | ||
| 920 | * | ||
| 921 | * NOTE: This cannot be used to switch between non-entries (empty slots), | ||
| 922 | * regular entries, and exceptional entries, as that requires accounting | ||
| 923 | * inside the radix tree node. When switching from one type of entry or | ||
| 924 | * deleting, use __radix_tree_lookup() and __radix_tree_replace(). | ||
| 925 | */ | ||
| 926 | void radix_tree_replace_slot(struct radix_tree_root *root, | ||
| 927 | void **slot, void *item) | ||
| 928 | { | ||
| 929 | replace_slot(root, NULL, slot, item, true); | ||
| 930 | } | ||
| 931 | |||
| 749 | /** | 932 | /** |
| 750 | * radix_tree_tag_set - set a tag on a radix tree node | 933 | * radix_tree_tag_set - set a tag on a radix tree node |
| 751 | * @root: radix tree root | 934 | * @root: radix tree root |
| @@ -1394,75 +1577,6 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |||
| 1394 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | 1577 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ |
| 1395 | 1578 | ||
| 1396 | /** | 1579 | /** |
| 1397 | * radix_tree_shrink - shrink radix tree to minimum height | ||
| 1398 | * @root radix tree root | ||
| 1399 | */ | ||
| 1400 | static inline bool radix_tree_shrink(struct radix_tree_root *root) | ||
| 1401 | { | ||
| 1402 | bool shrunk = false; | ||
| 1403 | |||
| 1404 | for (;;) { | ||
| 1405 | struct radix_tree_node *node = root->rnode; | ||
| 1406 | struct radix_tree_node *child; | ||
| 1407 | |||
| 1408 | if (!radix_tree_is_internal_node(node)) | ||
| 1409 | break; | ||
| 1410 | node = entry_to_node(node); | ||
| 1411 | |||
| 1412 | /* | ||
| 1413 | * The candidate node has more than one child, or its child | ||
| 1414 | * is not at the leftmost slot, or the child is a multiorder | ||
| 1415 | * entry, we cannot shrink. | ||
| 1416 | */ | ||
| 1417 | if (node->count != 1) | ||
| 1418 | break; | ||
| 1419 | child = node->slots[0]; | ||
| 1420 | if (!child) | ||
| 1421 | break; | ||
| 1422 | if (!radix_tree_is_internal_node(child) && node->shift) | ||
| 1423 | break; | ||
| 1424 | |||
| 1425 | if (radix_tree_is_internal_node(child)) | ||
| 1426 | entry_to_node(child)->parent = NULL; | ||
| 1427 | |||
| 1428 | /* | ||
| 1429 | * We don't need rcu_assign_pointer(), since we are simply | ||
| 1430 | * moving the node from one part of the tree to another: if it | ||
| 1431 | * was safe to dereference the old pointer to it | ||
| 1432 | * (node->slots[0]), it will be safe to dereference the new | ||
| 1433 | * one (root->rnode) as far as dependent read barriers go. | ||
| 1434 | */ | ||
| 1435 | root->rnode = child; | ||
| 1436 | |||
| 1437 | /* | ||
| 1438 | * We have a dilemma here. The node's slot[0] must not be | ||
| 1439 | * NULLed in case there are concurrent lookups expecting to | ||
| 1440 | * find the item. However if this was a bottom-level node, | ||
| 1441 | * then it may be subject to the slot pointer being visible | ||
| 1442 | * to callers dereferencing it. If item corresponding to | ||
| 1443 | * slot[0] is subsequently deleted, these callers would expect | ||
| 1444 | * their slot to become empty sooner or later. | ||
| 1445 | * | ||
| 1446 | * For example, lockless pagecache will look up a slot, deref | ||
| 1447 | * the page pointer, and if the page has 0 refcount it means it | ||
| 1448 | * was concurrently deleted from pagecache so try the deref | ||
| 1449 | * again. Fortunately there is already a requirement for logic | ||
| 1450 | * to retry the entire slot lookup -- the indirect pointer | ||
| 1451 | * problem (replacing direct root node with an indirect pointer | ||
| 1452 | * also results in a stale slot). So tag the slot as indirect | ||
| 1453 | * to force callers to retry. | ||
| 1454 | */ | ||
| 1455 | if (!radix_tree_is_internal_node(child)) | ||
| 1456 | node->slots[0] = RADIX_TREE_RETRY; | ||
| 1457 | |||
| 1458 | radix_tree_node_free(node); | ||
| 1459 | shrunk = true; | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | return shrunk; | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | /** | ||
| 1466 | * __radix_tree_delete_node - try to free node after clearing a slot | 1580 | * __radix_tree_delete_node - try to free node after clearing a slot |
| 1467 | * @root: radix tree root | 1581 | * @root: radix tree root |
| 1468 | * @node: node containing @index | 1582 | * @node: node containing @index |
| @@ -1470,39 +1584,11 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root) | |||
| 1470 | * After clearing the slot at @index in @node from radix tree | 1584 | * After clearing the slot at @index in @node from radix tree |
| 1471 | * rooted at @root, call this function to attempt freeing the | 1585 | * rooted at @root, call this function to attempt freeing the |
| 1472 | * node and shrinking the tree. | 1586 | * node and shrinking the tree. |
| 1473 | * | ||
| 1474 | * Returns %true if @node was freed, %false otherwise. | ||
| 1475 | */ | 1587 | */ |
| 1476 | bool __radix_tree_delete_node(struct radix_tree_root *root, | 1588 | void __radix_tree_delete_node(struct radix_tree_root *root, |
| 1477 | struct radix_tree_node *node) | 1589 | struct radix_tree_node *node) |
| 1478 | { | 1590 | { |
| 1479 | bool deleted = false; | 1591 | delete_node(root, node, NULL, NULL); |
| 1480 | |||
| 1481 | do { | ||
| 1482 | struct radix_tree_node *parent; | ||
| 1483 | |||
| 1484 | if (node->count) { | ||
| 1485 | if (node == entry_to_node(root->rnode)) | ||
| 1486 | deleted |= radix_tree_shrink(root); | ||
| 1487 | return deleted; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | parent = node->parent; | ||
| 1491 | if (parent) { | ||
| 1492 | parent->slots[node->offset] = NULL; | ||
| 1493 | parent->count--; | ||
| 1494 | } else { | ||
| 1495 | root_tag_clear_all(root); | ||
| 1496 | root->rnode = NULL; | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | radix_tree_node_free(node); | ||
| 1500 | deleted = true; | ||
| 1501 | |||
| 1502 | node = parent; | ||
| 1503 | } while (node); | ||
| 1504 | |||
| 1505 | return deleted; | ||
| 1506 | } | 1592 | } |
| 1507 | 1593 | ||
| 1508 | static inline void delete_sibling_entries(struct radix_tree_node *node, | 1594 | static inline void delete_sibling_entries(struct radix_tree_node *node, |
| @@ -1559,10 +1645,7 @@ void *radix_tree_delete_item(struct radix_tree_root *root, | |||
| 1559 | node_tag_clear(root, node, tag, offset); | 1645 | node_tag_clear(root, node, tag, offset); |
| 1560 | 1646 | ||
| 1561 | delete_sibling_entries(node, node_to_entry(slot), offset); | 1647 | delete_sibling_entries(node, node_to_entry(slot), offset); |
| 1562 | node->slots[offset] = NULL; | 1648 | __radix_tree_replace(root, node, slot, NULL, NULL, NULL); |
| 1563 | node->count--; | ||
| 1564 | |||
| 1565 | __radix_tree_delete_node(root, node); | ||
| 1566 | 1649 | ||
| 1567 | return entry; | 1650 | return entry; |
| 1568 | } | 1651 | } |
| @@ -1642,32 +1725,31 @@ static __init void radix_tree_init_maxnodes(void) | |||
| 1642 | } | 1725 | } |
| 1643 | } | 1726 | } |
| 1644 | 1727 | ||
| 1645 | static int radix_tree_callback(struct notifier_block *nfb, | 1728 | static int radix_tree_cpu_dead(unsigned int cpu) |
| 1646 | unsigned long action, void *hcpu) | ||
| 1647 | { | 1729 | { |
| 1648 | int cpu = (long)hcpu; | ||
| 1649 | struct radix_tree_preload *rtp; | 1730 | struct radix_tree_preload *rtp; |
| 1650 | struct radix_tree_node *node; | 1731 | struct radix_tree_node *node; |
| 1651 | 1732 | ||
| 1652 | /* Free per-cpu pool of preloaded nodes */ | 1733 | /* Free per-cpu pool of preloaded nodes */ |
| 1653 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 1734 | rtp = &per_cpu(radix_tree_preloads, cpu); |
| 1654 | rtp = &per_cpu(radix_tree_preloads, cpu); | 1735 | while (rtp->nr) { |
| 1655 | while (rtp->nr) { | 1736 | node = rtp->nodes; |
| 1656 | node = rtp->nodes; | 1737 | rtp->nodes = node->private_data; |
| 1657 | rtp->nodes = node->private_data; | 1738 | kmem_cache_free(radix_tree_node_cachep, node); |
| 1658 | kmem_cache_free(radix_tree_node_cachep, node); | 1739 | rtp->nr--; |
| 1659 | rtp->nr--; | ||
| 1660 | } | ||
| 1661 | } | 1740 | } |
| 1662 | return NOTIFY_OK; | 1741 | return 0; |
| 1663 | } | 1742 | } |
| 1664 | 1743 | ||
| 1665 | void __init radix_tree_init(void) | 1744 | void __init radix_tree_init(void) |
| 1666 | { | 1745 | { |
| 1746 | int ret; | ||
| 1667 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | 1747 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
| 1668 | sizeof(struct radix_tree_node), 0, | 1748 | sizeof(struct radix_tree_node), 0, |
| 1669 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, | 1749 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
| 1670 | radix_tree_node_ctor); | 1750 | radix_tree_node_ctor); |
| 1671 | radix_tree_init_maxnodes(); | 1751 | radix_tree_init_maxnodes(); |
| 1672 | hotcpu_notifier(radix_tree_callback, 0); | 1752 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
| 1753 | NULL, radix_tree_cpu_dead); | ||
| 1754 | WARN_ON(ret < 0); | ||
| 1673 | } | 1755 | } |
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c index 76734004358d..20bca3d44f67 100644 --- a/lib/raid6/avx2.c +++ b/lib/raid6/avx2.c | |||
| @@ -87,9 +87,57 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 87 | kernel_fpu_end(); | 87 | kernel_fpu_end(); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static void raid6_avx21_xor_syndrome(int disks, int start, int stop, | ||
| 91 | size_t bytes, void **ptrs) | ||
| 92 | { | ||
| 93 | u8 **dptr = (u8 **)ptrs; | ||
| 94 | u8 *p, *q; | ||
| 95 | int d, z, z0; | ||
| 96 | |||
| 97 | z0 = stop; /* P/Q right side optimization */ | ||
| 98 | p = dptr[disks-2]; /* XOR parity */ | ||
| 99 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 100 | |||
| 101 | kernel_fpu_begin(); | ||
| 102 | |||
| 103 | asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); | ||
| 104 | |||
| 105 | for (d = 0 ; d < bytes ; d += 32) { | ||
| 106 | asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d])); | ||
| 107 | asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d])); | ||
| 108 | asm volatile("vpxor %ymm4,%ymm2,%ymm2"); | ||
| 109 | /* P/Q data pages */ | ||
| 110 | for (z = z0-1 ; z >= start ; z--) { | ||
| 111 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 112 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 113 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 114 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 115 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 116 | asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d])); | ||
| 117 | asm volatile("vpxor %ymm5,%ymm2,%ymm2"); | ||
| 118 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 119 | } | ||
| 120 | /* P/Q left side optimization */ | ||
| 121 | for (z = start-1 ; z >= 0 ; z--) { | ||
| 122 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 123 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 124 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 125 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 126 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 127 | } | ||
| 128 | asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); | ||
| 129 | /* Don't use movntdq for r/w memory area < cache line */ | ||
| 130 | asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d])); | ||
| 131 | asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d])); | ||
| 132 | } | ||
| 133 | |||
| 134 | asm volatile("sfence" : : : "memory"); | ||
| 135 | kernel_fpu_end(); | ||
| 136 | } | ||
| 137 | |||
| 90 | const struct raid6_calls raid6_avx2x1 = { | 138 | const struct raid6_calls raid6_avx2x1 = { |
| 91 | raid6_avx21_gen_syndrome, | 139 | raid6_avx21_gen_syndrome, |
| 92 | NULL, /* XOR not yet implemented */ | 140 | raid6_avx21_xor_syndrome, |
| 93 | raid6_have_avx2, | 141 | raid6_have_avx2, |
| 94 | "avx2x1", | 142 | "avx2x1", |
| 95 | 1 /* Has cache hints */ | 143 | 1 /* Has cache hints */ |
| @@ -149,9 +197,77 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 149 | kernel_fpu_end(); | 197 | kernel_fpu_end(); |
| 150 | } | 198 | } |
| 151 | 199 | ||
| 200 | static void raid6_avx22_xor_syndrome(int disks, int start, int stop, | ||
| 201 | size_t bytes, void **ptrs) | ||
| 202 | { | ||
| 203 | u8 **dptr = (u8 **)ptrs; | ||
| 204 | u8 *p, *q; | ||
| 205 | int d, z, z0; | ||
| 206 | |||
| 207 | z0 = stop; /* P/Q right side optimization */ | ||
| 208 | p = dptr[disks-2]; /* XOR parity */ | ||
| 209 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 210 | |||
| 211 | kernel_fpu_begin(); | ||
| 212 | |||
| 213 | asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); | ||
| 214 | |||
| 215 | for (d = 0 ; d < bytes ; d += 64) { | ||
| 216 | asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d])); | ||
| 217 | asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32])); | ||
| 218 | asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d])); | ||
| 219 | asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32])); | ||
| 220 | asm volatile("vpxor %ymm4,%ymm2,%ymm2"); | ||
| 221 | asm volatile("vpxor %ymm6,%ymm3,%ymm3"); | ||
| 222 | /* P/Q data pages */ | ||
| 223 | for (z = z0-1 ; z >= start ; z--) { | ||
| 224 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 225 | asm volatile("vpxor %ymm7,%ymm7,%ymm7"); | ||
| 226 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 227 | asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7"); | ||
| 228 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 229 | asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); | ||
| 230 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 231 | asm volatile("vpand %ymm0,%ymm7,%ymm7"); | ||
| 232 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 233 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 234 | asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d])); | ||
| 235 | asm volatile("vmovdqa %0,%%ymm7" | ||
| 236 | :: "m" (dptr[z][d+32])); | ||
| 237 | asm volatile("vpxor %ymm5,%ymm2,%ymm2"); | ||
| 238 | asm volatile("vpxor %ymm7,%ymm3,%ymm3"); | ||
| 239 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 240 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 241 | } | ||
| 242 | /* P/Q left side optimization */ | ||
| 243 | for (z = start-1 ; z >= 0 ; z--) { | ||
| 244 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 245 | asm volatile("vpxor %ymm7,%ymm7,%ymm7"); | ||
| 246 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 247 | asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7"); | ||
| 248 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 249 | asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); | ||
| 250 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 251 | asm volatile("vpand %ymm0,%ymm7,%ymm7"); | ||
| 252 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 253 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 254 | } | ||
| 255 | asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); | ||
| 256 | asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32])); | ||
| 257 | /* Don't use movntdq for r/w memory area < cache line */ | ||
| 258 | asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d])); | ||
| 259 | asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32])); | ||
| 260 | asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d])); | ||
| 261 | asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32])); | ||
| 262 | } | ||
| 263 | |||
| 264 | asm volatile("sfence" : : : "memory"); | ||
| 265 | kernel_fpu_end(); | ||
| 266 | } | ||
| 267 | |||
| 152 | const struct raid6_calls raid6_avx2x2 = { | 268 | const struct raid6_calls raid6_avx2x2 = { |
| 153 | raid6_avx22_gen_syndrome, | 269 | raid6_avx22_gen_syndrome, |
| 154 | NULL, /* XOR not yet implemented */ | 270 | raid6_avx22_xor_syndrome, |
| 155 | raid6_have_avx2, | 271 | raid6_have_avx2, |
| 156 | "avx2x2", | 272 | "avx2x2", |
| 157 | 1 /* Has cache hints */ | 273 | 1 /* Has cache hints */ |
| @@ -242,9 +358,119 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 242 | kernel_fpu_end(); | 358 | kernel_fpu_end(); |
| 243 | } | 359 | } |
| 244 | 360 | ||
| 361 | static void raid6_avx24_xor_syndrome(int disks, int start, int stop, | ||
| 362 | size_t bytes, void **ptrs) | ||
| 363 | { | ||
| 364 | u8 **dptr = (u8 **)ptrs; | ||
| 365 | u8 *p, *q; | ||
| 366 | int d, z, z0; | ||
| 367 | |||
| 368 | z0 = stop; /* P/Q right side optimization */ | ||
| 369 | p = dptr[disks-2]; /* XOR parity */ | ||
| 370 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 371 | |||
| 372 | kernel_fpu_begin(); | ||
| 373 | |||
| 374 | asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0])); | ||
| 375 | |||
| 376 | for (d = 0 ; d < bytes ; d += 128) { | ||
| 377 | asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d])); | ||
| 378 | asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32])); | ||
| 379 | asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64])); | ||
| 380 | asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96])); | ||
| 381 | asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d])); | ||
| 382 | asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32])); | ||
| 383 | asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64])); | ||
| 384 | asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96])); | ||
| 385 | asm volatile("vpxor %ymm4,%ymm2,%ymm2"); | ||
| 386 | asm volatile("vpxor %ymm6,%ymm3,%ymm3"); | ||
| 387 | asm volatile("vpxor %ymm12,%ymm10,%ymm10"); | ||
| 388 | asm volatile("vpxor %ymm14,%ymm11,%ymm11"); | ||
| 389 | /* P/Q data pages */ | ||
| 390 | for (z = z0-1 ; z >= start ; z--) { | ||
| 391 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); | ||
| 392 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64])); | ||
| 393 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 394 | asm volatile("vpxor %ymm7,%ymm7,%ymm7"); | ||
| 395 | asm volatile("vpxor %ymm13,%ymm13,%ymm13"); | ||
| 396 | asm volatile("vpxor %ymm15,%ymm15,%ymm15"); | ||
| 397 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 398 | asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7"); | ||
| 399 | asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13"); | ||
| 400 | asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15"); | ||
| 401 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 402 | asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); | ||
| 403 | asm volatile("vpaddb %ymm12,%ymm12,%ymm12"); | ||
| 404 | asm volatile("vpaddb %ymm14,%ymm14,%ymm14"); | ||
| 405 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 406 | asm volatile("vpand %ymm0,%ymm7,%ymm7"); | ||
| 407 | asm volatile("vpand %ymm0,%ymm13,%ymm13"); | ||
| 408 | asm volatile("vpand %ymm0,%ymm15,%ymm15"); | ||
| 409 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 410 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 411 | asm volatile("vpxor %ymm13,%ymm12,%ymm12"); | ||
| 412 | asm volatile("vpxor %ymm15,%ymm14,%ymm14"); | ||
| 413 | asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d])); | ||
| 414 | asm volatile("vmovdqa %0,%%ymm7" | ||
| 415 | :: "m" (dptr[z][d+32])); | ||
| 416 | asm volatile("vmovdqa %0,%%ymm13" | ||
| 417 | :: "m" (dptr[z][d+64])); | ||
| 418 | asm volatile("vmovdqa %0,%%ymm15" | ||
| 419 | :: "m" (dptr[z][d+96])); | ||
| 420 | asm volatile("vpxor %ymm5,%ymm2,%ymm2"); | ||
| 421 | asm volatile("vpxor %ymm7,%ymm3,%ymm3"); | ||
| 422 | asm volatile("vpxor %ymm13,%ymm10,%ymm10"); | ||
| 423 | asm volatile("vpxor %ymm15,%ymm11,%ymm11"); | ||
| 424 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 425 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 426 | asm volatile("vpxor %ymm13,%ymm12,%ymm12"); | ||
| 427 | asm volatile("vpxor %ymm15,%ymm14,%ymm14"); | ||
| 428 | } | ||
| 429 | asm volatile("prefetchnta %0" :: "m" (q[d])); | ||
| 430 | asm volatile("prefetchnta %0" :: "m" (q[d+64])); | ||
| 431 | /* P/Q left side optimization */ | ||
| 432 | for (z = start-1 ; z >= 0 ; z--) { | ||
| 433 | asm volatile("vpxor %ymm5,%ymm5,%ymm5"); | ||
| 434 | asm volatile("vpxor %ymm7,%ymm7,%ymm7"); | ||
| 435 | asm volatile("vpxor %ymm13,%ymm13,%ymm13"); | ||
| 436 | asm volatile("vpxor %ymm15,%ymm15,%ymm15"); | ||
| 437 | asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5"); | ||
| 438 | asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7"); | ||
| 439 | asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13"); | ||
| 440 | asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15"); | ||
| 441 | asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); | ||
| 442 | asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); | ||
| 443 | asm volatile("vpaddb %ymm12,%ymm12,%ymm12"); | ||
| 444 | asm volatile("vpaddb %ymm14,%ymm14,%ymm14"); | ||
| 445 | asm volatile("vpand %ymm0,%ymm5,%ymm5"); | ||
| 446 | asm volatile("vpand %ymm0,%ymm7,%ymm7"); | ||
| 447 | asm volatile("vpand %ymm0,%ymm13,%ymm13"); | ||
| 448 | asm volatile("vpand %ymm0,%ymm15,%ymm15"); | ||
| 449 | asm volatile("vpxor %ymm5,%ymm4,%ymm4"); | ||
| 450 | asm volatile("vpxor %ymm7,%ymm6,%ymm6"); | ||
| 451 | asm volatile("vpxor %ymm13,%ymm12,%ymm12"); | ||
| 452 | asm volatile("vpxor %ymm15,%ymm14,%ymm14"); | ||
| 453 | } | ||
| 454 | asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); | ||
| 455 | asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32])); | ||
| 456 | asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64])); | ||
| 457 | asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96])); | ||
| 458 | asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); | ||
| 459 | asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32])); | ||
| 460 | asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64])); | ||
| 461 | asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96])); | ||
| 462 | asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); | ||
| 463 | asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); | ||
| 464 | asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64])); | ||
| 465 | asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96])); | ||
| 466 | } | ||
| 467 | asm volatile("sfence" : : : "memory"); | ||
| 468 | kernel_fpu_end(); | ||
| 469 | } | ||
| 470 | |||
| 245 | const struct raid6_calls raid6_avx2x4 = { | 471 | const struct raid6_calls raid6_avx2x4 = { |
| 246 | raid6_avx24_gen_syndrome, | 472 | raid6_avx24_gen_syndrome, |
| 247 | NULL, /* XOR not yet implemented */ | 473 | raid6_avx24_xor_syndrome, |
| 248 | raid6_have_avx2, | 474 | raid6_have_avx2, |
| 249 | "avx2x4", | 475 | "avx2x4", |
| 250 | 1 /* Has cache hints */ | 476 | 1 /* Has cache hints */ |
diff --git a/lib/rbtree.c b/lib/rbtree.c index eb8a19fee110..1f8b112a7c35 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
| @@ -296,11 +296,26 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, | |||
| 296 | * | 296 | * |
| 297 | * (p) (p) | 297 | * (p) (p) |
| 298 | * / \ / \ | 298 | * / \ / \ |
| 299 | * N S --> N Sl | 299 | * N S --> N sl |
| 300 | * / \ \ | 300 | * / \ \ |
| 301 | * sl Sr s | 301 | * sl Sr S |
| 302 | * \ | 302 | * \ |
| 303 | * Sr | 303 | * Sr |
| 304 | * | ||
| 305 | * Note: p might be red, and then both | ||
| 306 | * p and sl are red after rotation(which | ||
| 307 | * breaks property 4). This is fixed in | ||
| 308 | * Case 4 (in __rb_rotate_set_parents() | ||
| 309 | * which set sl the color of p | ||
| 310 | * and set p RB_BLACK) | ||
| 311 | * | ||
| 312 | * (p) (sl) | ||
| 313 | * / \ / \ | ||
| 314 | * N sl --> P S | ||
| 315 | * \ / \ | ||
| 316 | * S N Sr | ||
| 317 | * \ | ||
| 318 | * Sr | ||
| 304 | */ | 319 | */ |
| 305 | tmp1 = tmp2->rb_right; | 320 | tmp1 = tmp2->rb_right; |
| 306 | WRITE_ONCE(sibling->rb_left, tmp1); | 321 | WRITE_ONCE(sibling->rb_left, tmp1); |
| @@ -365,7 +380,7 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, | |||
| 365 | } | 380 | } |
| 366 | break; | 381 | break; |
| 367 | } | 382 | } |
| 368 | /* Case 3 - right rotate at sibling */ | 383 | /* Case 3 - left rotate at sibling */ |
| 369 | tmp1 = tmp2->rb_left; | 384 | tmp1 = tmp2->rb_left; |
| 370 | WRITE_ONCE(sibling->rb_right, tmp1); | 385 | WRITE_ONCE(sibling->rb_right, tmp1); |
| 371 | WRITE_ONCE(tmp2->rb_left, sibling); | 386 | WRITE_ONCE(tmp2->rb_left, sibling); |
| @@ -377,7 +392,7 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, | |||
| 377 | tmp1 = sibling; | 392 | tmp1 = sibling; |
| 378 | sibling = tmp2; | 393 | sibling = tmp2; |
| 379 | } | 394 | } |
| 380 | /* Case 4 - left rotate at parent + color flips */ | 395 | /* Case 4 - right rotate at parent + color flips */ |
| 381 | tmp2 = sibling->rb_right; | 396 | tmp2 = sibling->rb_right; |
| 382 | WRITE_ONCE(parent->rb_left, tmp2); | 397 | WRITE_ONCE(parent->rb_left, tmp2); |
| 383 | WRITE_ONCE(sibling->rb_right, parent); | 398 | WRITE_ONCE(sibling->rb_right, parent); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 22e13a0e19d7..cb1b54ee8527 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, | |||
| 425 | phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | 425 | phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, |
| 426 | dma_addr_t tbl_dma_addr, | 426 | dma_addr_t tbl_dma_addr, |
| 427 | phys_addr_t orig_addr, size_t size, | 427 | phys_addr_t orig_addr, size_t size, |
| 428 | enum dma_data_direction dir) | 428 | enum dma_data_direction dir, |
| 429 | unsigned long attrs) | ||
| 429 | { | 430 | { |
| 430 | unsigned long flags; | 431 | unsigned long flags; |
| 431 | phys_addr_t tlb_addr; | 432 | phys_addr_t tlb_addr; |
| @@ -526,7 +527,8 @@ found: | |||
| 526 | */ | 527 | */ |
| 527 | for (i = 0; i < nslots; i++) | 528 | for (i = 0; i < nslots; i++) |
| 528 | io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); | 529 | io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); |
| 529 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 530 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
| 531 | (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | ||
| 530 | swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); | 532 | swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); |
| 531 | 533 | ||
| 532 | return tlb_addr; | 534 | return tlb_addr; |
| @@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | |||
| 539 | 541 | ||
| 540 | static phys_addr_t | 542 | static phys_addr_t |
| 541 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 543 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, |
| 542 | enum dma_data_direction dir) | 544 | enum dma_data_direction dir, unsigned long attrs) |
| 543 | { | 545 | { |
| 544 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); | 546 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); |
| 545 | 547 | ||
| 546 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); | 548 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, |
| 549 | dir, attrs); | ||
| 547 | } | 550 | } |
| 548 | 551 | ||
| 549 | /* | 552 | /* |
| 550 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 553 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
| 551 | */ | 554 | */ |
| 552 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, | 555 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, |
| 553 | size_t size, enum dma_data_direction dir) | 556 | size_t size, enum dma_data_direction dir, |
| 557 | unsigned long attrs) | ||
| 554 | { | 558 | { |
| 555 | unsigned long flags; | 559 | unsigned long flags; |
| 556 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 560 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| @@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, | |||
| 561 | * First, sync the memory before unmapping the entry | 565 | * First, sync the memory before unmapping the entry |
| 562 | */ | 566 | */ |
| 563 | if (orig_addr != INVALID_PHYS_ADDR && | 567 | if (orig_addr != INVALID_PHYS_ADDR && |
| 568 | !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && | ||
| 564 | ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | 569 | ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) |
| 565 | swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); | 570 | swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); |
| 566 | 571 | ||
| @@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 654 | * GFP_DMA memory; fall back on map_single(), which | 659 | * GFP_DMA memory; fall back on map_single(), which |
| 655 | * will grab memory from the lowest available address range. | 660 | * will grab memory from the lowest available address range. |
| 656 | */ | 661 | */ |
| 657 | phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 662 | phys_addr_t paddr = map_single(hwdev, 0, size, |
| 663 | DMA_FROM_DEVICE, 0); | ||
| 658 | if (paddr == SWIOTLB_MAP_ERROR) | 664 | if (paddr == SWIOTLB_MAP_ERROR) |
| 659 | goto err_warn; | 665 | goto err_warn; |
| 660 | 666 | ||
| @@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 667 | (unsigned long long)dma_mask, | 673 | (unsigned long long)dma_mask, |
| 668 | (unsigned long long)dev_addr); | 674 | (unsigned long long)dev_addr); |
| 669 | 675 | ||
| 670 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 676 | /* |
| 677 | * DMA_TO_DEVICE to avoid memcpy in unmap_single. | ||
| 678 | * The DMA_ATTR_SKIP_CPU_SYNC is optional. | ||
| 679 | */ | ||
| 671 | swiotlb_tbl_unmap_single(hwdev, paddr, | 680 | swiotlb_tbl_unmap_single(hwdev, paddr, |
| 672 | size, DMA_TO_DEVICE); | 681 | size, DMA_TO_DEVICE, |
| 682 | DMA_ATTR_SKIP_CPU_SYNC); | ||
| 673 | goto err_warn; | 683 | goto err_warn; |
| 674 | } | 684 | } |
| 675 | } | 685 | } |
| @@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 698 | if (!is_swiotlb_buffer(paddr)) | 708 | if (!is_swiotlb_buffer(paddr)) |
| 699 | free_pages((unsigned long)vaddr, get_order(size)); | 709 | free_pages((unsigned long)vaddr, get_order(size)); |
| 700 | else | 710 | else |
| 701 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ | 711 | /* |
| 702 | swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); | 712 | * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. |
| 713 | * DMA_ATTR_SKIP_CPU_SYNC is optional. | ||
| 714 | */ | ||
| 715 | swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE, | ||
| 716 | DMA_ATTR_SKIP_CPU_SYNC); | ||
| 703 | } | 717 | } |
| 704 | EXPORT_SYMBOL(swiotlb_free_coherent); | 718 | EXPORT_SYMBOL(swiotlb_free_coherent); |
| 705 | 719 | ||
| @@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, | |||
| 714 | * When the mapping is small enough return a static buffer to limit | 728 | * When the mapping is small enough return a static buffer to limit |
| 715 | * the damage, or panic when the transfer is too big. | 729 | * the damage, or panic when the transfer is too big. |
| 716 | */ | 730 | */ |
| 717 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " | 731 | dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", |
| 718 | "device %s\n", size, dev ? dev_name(dev) : "?"); | 732 | size); |
| 719 | 733 | ||
| 720 | if (size <= io_tlb_overflow || !do_panic) | 734 | if (size <= io_tlb_overflow || !do_panic) |
| 721 | return; | 735 | return; |
| @@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 755 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); | 769 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
| 756 | 770 | ||
| 757 | /* Oh well, have to allocate and map a bounce buffer. */ | 771 | /* Oh well, have to allocate and map a bounce buffer. */ |
| 758 | map = map_single(dev, phys, size, dir); | 772 | map = map_single(dev, phys, size, dir, attrs); |
| 759 | if (map == SWIOTLB_MAP_ERROR) { | 773 | if (map == SWIOTLB_MAP_ERROR) { |
| 760 | swiotlb_full(dev, size, dir, 1); | 774 | swiotlb_full(dev, size, dir, 1); |
| 761 | return phys_to_dma(dev, io_tlb_overflow_buffer); | 775 | return phys_to_dma(dev, io_tlb_overflow_buffer); |
| @@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 764 | dev_addr = phys_to_dma(dev, map); | 778 | dev_addr = phys_to_dma(dev, map); |
| 765 | 779 | ||
| 766 | /* Ensure that the address returned is DMA'ble */ | 780 | /* Ensure that the address returned is DMA'ble */ |
| 767 | if (!dma_capable(dev, dev_addr, size)) { | 781 | if (dma_capable(dev, dev_addr, size)) |
| 768 | swiotlb_tbl_unmap_single(dev, map, size, dir); | 782 | return dev_addr; |
| 769 | return phys_to_dma(dev, io_tlb_overflow_buffer); | ||
| 770 | } | ||
| 771 | 783 | ||
| 772 | return dev_addr; | 784 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
| 785 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); | ||
| 786 | |||
| 787 | return phys_to_dma(dev, io_tlb_overflow_buffer); | ||
| 773 | } | 788 | } |
| 774 | EXPORT_SYMBOL_GPL(swiotlb_map_page); | 789 | EXPORT_SYMBOL_GPL(swiotlb_map_page); |
| 775 | 790 | ||
| @@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
| 782 | * whatever the device wrote there. | 797 | * whatever the device wrote there. |
| 783 | */ | 798 | */ |
| 784 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 799 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
| 785 | size_t size, enum dma_data_direction dir) | 800 | size_t size, enum dma_data_direction dir, |
| 801 | unsigned long attrs) | ||
| 786 | { | 802 | { |
| 787 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 803 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
| 788 | 804 | ||
| 789 | BUG_ON(dir == DMA_NONE); | 805 | BUG_ON(dir == DMA_NONE); |
| 790 | 806 | ||
| 791 | if (is_swiotlb_buffer(paddr)) { | 807 | if (is_swiotlb_buffer(paddr)) { |
| 792 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); | 808 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); |
| 793 | return; | 809 | return; |
| 794 | } | 810 | } |
| 795 | 811 | ||
| @@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | |||
| 809 | size_t size, enum dma_data_direction dir, | 825 | size_t size, enum dma_data_direction dir, |
| 810 | unsigned long attrs) | 826 | unsigned long attrs) |
| 811 | { | 827 | { |
| 812 | unmap_single(hwdev, dev_addr, size, dir); | 828 | unmap_single(hwdev, dev_addr, size, dir, attrs); |
| 813 | } | 829 | } |
| 814 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | 830 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
| 815 | 831 | ||
| @@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 891 | if (swiotlb_force || | 907 | if (swiotlb_force || |
| 892 | !dma_capable(hwdev, dev_addr, sg->length)) { | 908 | !dma_capable(hwdev, dev_addr, sg->length)) { |
| 893 | phys_addr_t map = map_single(hwdev, sg_phys(sg), | 909 | phys_addr_t map = map_single(hwdev, sg_phys(sg), |
| 894 | sg->length, dir); | 910 | sg->length, dir, attrs); |
| 895 | if (map == SWIOTLB_MAP_ERROR) { | 911 | if (map == SWIOTLB_MAP_ERROR) { |
| 896 | /* Don't panic here, we expect map_sg users | 912 | /* Don't panic here, we expect map_sg users |
| 897 | to do proper error handling. */ | 913 | to do proper error handling. */ |
| 898 | swiotlb_full(hwdev, sg->length, dir, 0); | 914 | swiotlb_full(hwdev, sg->length, dir, 0); |
| 915 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; | ||
| 899 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | 916 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
| 900 | attrs); | 917 | attrs); |
| 901 | sg_dma_len(sgl) = 0; | 918 | sg_dma_len(sgl) = 0; |
| @@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 910 | } | 927 | } |
| 911 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | 928 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); |
| 912 | 929 | ||
| 913 | int | ||
| 914 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
| 915 | enum dma_data_direction dir) | ||
| 916 | { | ||
| 917 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0); | ||
| 918 | } | ||
| 919 | EXPORT_SYMBOL(swiotlb_map_sg); | ||
| 920 | |||
| 921 | /* | 930 | /* |
| 922 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 931 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 923 | * concerning calls here are the same as for swiotlb_unmap_page() above. | 932 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| @@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
| 933 | BUG_ON(dir == DMA_NONE); | 942 | BUG_ON(dir == DMA_NONE); |
| 934 | 943 | ||
| 935 | for_each_sg(sgl, sg, nelems, i) | 944 | for_each_sg(sgl, sg, nelems, i) |
| 936 | unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); | 945 | unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, |
| 937 | 946 | attrs); | |
| 938 | } | 947 | } |
| 939 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 948 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
| 940 | 949 | ||
| 941 | void | ||
| 942 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
| 943 | enum dma_data_direction dir) | ||
| 944 | { | ||
| 945 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0); | ||
| 946 | } | ||
| 947 | EXPORT_SYMBOL(swiotlb_unmap_sg); | ||
| 948 | |||
| 949 | /* | 950 | /* |
| 950 | * Make physical memory consistent for a set of streaming mode DMA translations | 951 | * Make physical memory consistent for a set of streaming mode DMA translations |
| 951 | * after a transfer. | 952 | * after a transfer. |
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 5e51872b3fc1..fbdf87920093 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
| @@ -20,6 +20,11 @@ | |||
| 20 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | 22 | ||
| 23 | /* | ||
| 24 | * Note: test functions are marked noinline so that their names appear in | ||
| 25 | * reports. | ||
| 26 | */ | ||
| 27 | |||
| 23 | static noinline void __init kmalloc_oob_right(void) | 28 | static noinline void __init kmalloc_oob_right(void) |
| 24 | { | 29 | { |
| 25 | char *ptr; | 30 | char *ptr; |
| @@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void) | |||
| 411 | kfree(kmem); | 416 | kfree(kmem); |
| 412 | } | 417 | } |
| 413 | 418 | ||
| 419 | static noinline void __init use_after_scope_test(void) | ||
| 420 | { | ||
| 421 | volatile char *volatile p; | ||
| 422 | |||
| 423 | pr_info("use-after-scope on int\n"); | ||
| 424 | { | ||
| 425 | int local = 0; | ||
| 426 | |||
| 427 | p = (char *)&local; | ||
| 428 | } | ||
| 429 | p[0] = 1; | ||
| 430 | p[3] = 1; | ||
| 431 | |||
| 432 | pr_info("use-after-scope on array\n"); | ||
| 433 | { | ||
| 434 | char local[1024] = {0}; | ||
| 435 | |||
| 436 | p = local; | ||
| 437 | } | ||
| 438 | p[0] = 1; | ||
| 439 | p[1023] = 1; | ||
| 440 | } | ||
| 441 | |||
| 414 | static int __init kmalloc_tests_init(void) | 442 | static int __init kmalloc_tests_init(void) |
| 415 | { | 443 | { |
| 416 | kmalloc_oob_right(); | 444 | kmalloc_oob_right(); |
| @@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void) | |||
| 436 | kasan_global_oob(); | 464 | kasan_global_oob(); |
| 437 | ksize_unpoisons_memory(); | 465 | ksize_unpoisons_memory(); |
| 438 | copy_user_test(); | 466 | copy_user_test(); |
| 467 | use_after_scope_test(); | ||
| 439 | return -EAGAIN; | 468 | return -EAGAIN; |
| 440 | } | 469 | } |
| 441 | 470 | ||
