diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 3 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 67 | ||||
| -rw-r--r-- | lib/Makefile | 5 | ||||
| -rw-r--r-- | lib/debugobjects.c | 58 | ||||
| -rw-r--r-- | lib/dma-debug.c | 5 | ||||
| -rw-r--r-- | lib/halfmd4.c | 67 | ||||
| -rw-r--r-- | lib/ioremap.c | 1 | ||||
| -rw-r--r-- | lib/iov_iter.c | 54 | ||||
| -rw-r--r-- | lib/nmi_backtrace.c | 2 | ||||
| -rw-r--r-- | lib/parman.c | 376 | ||||
| -rw-r--r-- | lib/radix-tree.c | 2 | ||||
| -rw-r--r-- | lib/rhashtable.c | 270 | ||||
| -rw-r--r-- | lib/sbitmap.c | 139 | ||||
| -rw-r--r-- | lib/show_mem.c | 4 | ||||
| -rw-r--r-- | lib/swiotlb.c | 6 | ||||
| -rw-r--r-- | lib/test_firmware.c | 92 | ||||
| -rw-r--r-- | lib/test_parman.c | 395 | ||||
| -rw-r--r-- | lib/test_user_copy.c | 117 | ||||
| -rw-r--r-- | lib/timerqueue.c | 3 |
19 files changed, 1435 insertions, 231 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 260a80e313b9..5d644f180fe5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -550,4 +550,7 @@ config STACKDEPOT | |||
| 550 | config SBITMAP | 550 | config SBITMAP |
| 551 | bool | 551 | bool |
| 552 | 552 | ||
| 553 | config PARMAN | ||
| 554 | tristate "parman" | ||
| 555 | |||
| 553 | endmenu | 556 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3d2515a770c3..66fb4389f05c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED | |||
| 164 | 164 | ||
| 165 | config DEBUG_INFO_SPLIT | 165 | config DEBUG_INFO_SPLIT |
| 166 | bool "Produce split debuginfo in .dwo files" | 166 | bool "Produce split debuginfo in .dwo files" |
| 167 | depends on DEBUG_INFO | 167 | depends on DEBUG_INFO && !FRV |
| 168 | help | 168 | help |
| 169 | Generate debug info into separate .dwo files. This significantly | 169 | Generate debug info into separate .dwo files. This significantly |
| 170 | reduces the build directory size for builds with DEBUG_INFO, | 170 | reduces the build directory size for builds with DEBUG_INFO, |
| @@ -416,6 +416,16 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE | |||
| 416 | This may be set to 1 or 0 to enable or disable them all, or | 416 | This may be set to 1 or 0 to enable or disable them all, or |
| 417 | to a bitmask as described in Documentation/sysrq.txt. | 417 | to a bitmask as described in Documentation/sysrq.txt. |
| 418 | 418 | ||
| 419 | config MAGIC_SYSRQ_SERIAL | ||
| 420 | bool "Enable magic SysRq key over serial" | ||
| 421 | depends on MAGIC_SYSRQ | ||
| 422 | default y | ||
| 423 | help | ||
| 424 | Many embedded boards have a disconnected TTL level serial which can | ||
| 425 | generate some garbage that can lead to spurious false sysrq detects. | ||
| 426 | This option allows you to decide whether you want to enable the | ||
| 427 | magic SysRq key. | ||
| 428 | |||
| 419 | config DEBUG_KERNEL | 429 | config DEBUG_KERNEL |
| 420 | bool "Kernel debugging" | 430 | bool "Kernel debugging" |
| 421 | help | 431 | help |
| @@ -622,9 +632,12 @@ config DEBUG_VM_PGFLAGS | |||
| 622 | 632 | ||
| 623 | If unsure, say N. | 633 | If unsure, say N. |
| 624 | 634 | ||
| 635 | config ARCH_HAS_DEBUG_VIRTUAL | ||
| 636 | bool | ||
| 637 | |||
| 625 | config DEBUG_VIRTUAL | 638 | config DEBUG_VIRTUAL |
| 626 | bool "Debug VM translations" | 639 | bool "Debug VM translations" |
| 627 | depends on DEBUG_KERNEL && X86 | 640 | depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL |
| 628 | help | 641 | help |
| 629 | Enable some costly sanity checks in virtual to page code. This can | 642 | Enable some costly sanity checks in virtual to page code. This can |
| 630 | catch mistakes with virt_to_page() and friends. | 643 | catch mistakes with virt_to_page() and friends. |
| @@ -716,6 +729,19 @@ source "lib/Kconfig.kmemcheck" | |||
| 716 | 729 | ||
| 717 | source "lib/Kconfig.kasan" | 730 | source "lib/Kconfig.kasan" |
| 718 | 731 | ||
| 732 | config DEBUG_REFCOUNT | ||
| 733 | bool "Verbose refcount checks" | ||
| 734 | help | ||
| 735 | Say Y here if you want reference counters (refcount_t and kref) to | ||
| 736 | generate WARNs on dubious usage. Without this refcount_t will still | ||
| 737 | be a saturating counter and avoid Use-After-Free by turning it into | ||
| 738 | a resource leak Denial-Of-Service. | ||
| 739 | |||
| 740 | Use of this option will increase kernel text size but will alert the | ||
| 741 | admin of potential abuse. | ||
| 742 | |||
| 743 | If in doubt, say "N". | ||
| 744 | |||
| 719 | endmenu # "Memory Debugging" | 745 | endmenu # "Memory Debugging" |
| 720 | 746 | ||
| 721 | config ARCH_HAS_KCOV | 747 | config ARCH_HAS_KCOV |
| @@ -980,20 +1006,6 @@ config DEBUG_TIMEKEEPING | |||
| 980 | 1006 | ||
| 981 | If unsure, say N. | 1007 | If unsure, say N. |
| 982 | 1008 | ||
| 983 | config TIMER_STATS | ||
| 984 | bool "Collect kernel timers statistics" | ||
| 985 | depends on DEBUG_KERNEL && PROC_FS | ||
| 986 | help | ||
| 987 | If you say Y here, additional code will be inserted into the | ||
| 988 | timer routines to collect statistics about kernel timers being | ||
| 989 | reprogrammed. The statistics can be read from /proc/timer_stats. | ||
| 990 | The statistics collection is started by writing 1 to /proc/timer_stats, | ||
| 991 | writing 0 stops it. This feature is useful to collect information | ||
| 992 | about timer usage patterns in kernel and userspace. This feature | ||
| 993 | is lightweight if enabled in the kernel config but not activated | ||
| 994 | (it defaults to deactivated on bootup and will only be activated | ||
| 995 | if some application like powertop activates it explicitly). | ||
| 996 | |||
| 997 | config DEBUG_PREEMPT | 1009 | config DEBUG_PREEMPT |
| 998 | bool "Debug preemptible kernel" | 1010 | bool "Debug preemptible kernel" |
| 999 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT | 1011 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT |
| @@ -1180,6 +1192,18 @@ config LOCK_TORTURE_TEST | |||
| 1180 | Say M if you want these torture tests to build as a module. | 1192 | Say M if you want these torture tests to build as a module. |
| 1181 | Say N if you are unsure. | 1193 | Say N if you are unsure. |
| 1182 | 1194 | ||
| 1195 | config WW_MUTEX_SELFTEST | ||
| 1196 | tristate "Wait/wound mutex selftests" | ||
| 1197 | help | ||
| 1198 | This option provides a kernel module that runs tests on the | ||
| 1199 | on the struct ww_mutex locking API. | ||
| 1200 | |||
| 1201 | It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction | ||
| 1202 | with this test harness. | ||
| 1203 | |||
| 1204 | Say M if you want these self tests to build as a module. | ||
| 1205 | Say N if you are unsure. | ||
| 1206 | |||
| 1183 | endmenu # lock debugging | 1207 | endmenu # lock debugging |
| 1184 | 1208 | ||
| 1185 | config TRACE_IRQFLAGS | 1209 | config TRACE_IRQFLAGS |
| @@ -1450,6 +1474,7 @@ config RCU_CPU_STALL_TIMEOUT | |||
| 1450 | config RCU_TRACE | 1474 | config RCU_TRACE |
| 1451 | bool "Enable tracing for RCU" | 1475 | bool "Enable tracing for RCU" |
| 1452 | depends on DEBUG_KERNEL | 1476 | depends on DEBUG_KERNEL |
| 1477 | default y if TREE_RCU | ||
| 1453 | select TRACE_CLOCK | 1478 | select TRACE_CLOCK |
| 1454 | help | 1479 | help |
| 1455 | This option provides tracing in RCU which presents stats | 1480 | This option provides tracing in RCU which presents stats |
| @@ -1826,6 +1851,16 @@ config TEST_HASH | |||
| 1826 | This is intended to help people writing architecture-specific | 1851 | This is intended to help people writing architecture-specific |
| 1827 | optimized versions. If unsure, say N. | 1852 | optimized versions. If unsure, say N. |
| 1828 | 1853 | ||
| 1854 | config TEST_PARMAN | ||
| 1855 | tristate "Perform selftest on priority array manager" | ||
| 1856 | default n | ||
| 1857 | depends on PARMAN | ||
| 1858 | help | ||
| 1859 | Enable this option to test priority array manager on boot | ||
| 1860 | (or module load). | ||
| 1861 | |||
| 1862 | If unsure, say N. | ||
| 1863 | |||
| 1829 | endmenu # runtime tests | 1864 | endmenu # runtime tests |
| 1830 | 1865 | ||
| 1831 | config PROVIDE_OHCI1394_DMA_INIT | 1866 | config PROVIDE_OHCI1394_DMA_INIT |
diff --git a/lib/Makefile b/lib/Makefile index 7b3008d58600..6b768b58a38d 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -32,7 +32,7 @@ lib-$(CONFIG_HAS_DMA) += dma-noop.o | |||
| 32 | lib-y += kobject.o klist.o | 32 | lib-y += kobject.o klist.o |
| 33 | obj-y += lockref.o | 33 | obj-y += lockref.o |
| 34 | 34 | ||
| 35 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 35 | obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ |
| 36 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 36 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
| 37 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ | 37 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
| 38 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 38 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
| @@ -56,6 +56,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o | |||
| 56 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o | 56 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o |
| 57 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o | 57 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o |
| 58 | obj-$(CONFIG_TEST_UUID) += test_uuid.o | 58 | obj-$(CONFIG_TEST_UUID) += test_uuid.o |
| 59 | obj-$(CONFIG_TEST_PARMAN) += test_parman.o | ||
| 59 | 60 | ||
| 60 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 61 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 61 | CFLAGS_kobject.o += -DDEBUG | 62 | CFLAGS_kobject.o += -DDEBUG |
| @@ -230,3 +231,5 @@ obj-$(CONFIG_UBSAN) += ubsan.o | |||
| 230 | UBSAN_SANITIZE_ubsan.o := n | 231 | UBSAN_SANITIZE_ubsan.o := n |
| 231 | 232 | ||
| 232 | obj-$(CONFIG_SBITMAP) += sbitmap.o | 233 | obj-$(CONFIG_SBITMAP) += sbitmap.o |
| 234 | |||
| 235 | obj-$(CONFIG_PARMAN) += parman.o | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 04c1ef717fe0..8c28cbd7e104 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -52,9 +52,18 @@ static int debug_objects_fixups __read_mostly; | |||
| 52 | static int debug_objects_warnings __read_mostly; | 52 | static int debug_objects_warnings __read_mostly; |
| 53 | static int debug_objects_enabled __read_mostly | 53 | static int debug_objects_enabled __read_mostly |
| 54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; | 54 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
| 55 | 55 | static int debug_objects_pool_size __read_mostly | |
| 56 | = ODEBUG_POOL_SIZE; | ||
| 57 | static int debug_objects_pool_min_level __read_mostly | ||
| 58 | = ODEBUG_POOL_MIN_LEVEL; | ||
| 56 | static struct debug_obj_descr *descr_test __read_mostly; | 59 | static struct debug_obj_descr *descr_test __read_mostly; |
| 57 | 60 | ||
| 61 | /* | ||
| 62 | * Track numbers of kmem_cache_alloc()/free() calls done. | ||
| 63 | */ | ||
| 64 | static int debug_objects_allocated; | ||
| 65 | static int debug_objects_freed; | ||
| 66 | |||
| 58 | static void free_obj_work(struct work_struct *work); | 67 | static void free_obj_work(struct work_struct *work); |
| 59 | static DECLARE_WORK(debug_obj_work, free_obj_work); | 68 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
| 60 | 69 | ||
| @@ -88,13 +97,13 @@ static void fill_pool(void) | |||
| 88 | struct debug_obj *new; | 97 | struct debug_obj *new; |
| 89 | unsigned long flags; | 98 | unsigned long flags; |
| 90 | 99 | ||
| 91 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | 100 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
| 92 | return; | 101 | return; |
| 93 | 102 | ||
| 94 | if (unlikely(!obj_cache)) | 103 | if (unlikely(!obj_cache)) |
| 95 | return; | 104 | return; |
| 96 | 105 | ||
| 97 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | 106 | while (obj_pool_free < debug_objects_pool_min_level) { |
| 98 | 107 | ||
| 99 | new = kmem_cache_zalloc(obj_cache, gfp); | 108 | new = kmem_cache_zalloc(obj_cache, gfp); |
| 100 | if (!new) | 109 | if (!new) |
| @@ -102,6 +111,7 @@ static void fill_pool(void) | |||
| 102 | 111 | ||
| 103 | raw_spin_lock_irqsave(&pool_lock, flags); | 112 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 104 | hlist_add_head(&new->node, &obj_pool); | 113 | hlist_add_head(&new->node, &obj_pool); |
| 114 | debug_objects_allocated++; | ||
| 105 | obj_pool_free++; | 115 | obj_pool_free++; |
| 106 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 116 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 107 | } | 117 | } |
| @@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 162 | 172 | ||
| 163 | /* | 173 | /* |
| 164 | * workqueue function to free objects. | 174 | * workqueue function to free objects. |
| 175 | * | ||
| 176 | * To reduce contention on the global pool_lock, the actual freeing of | ||
| 177 | * debug objects will be delayed if the pool_lock is busy. We also free | ||
| 178 | * the objects in a batch of 4 for each lock/unlock cycle. | ||
| 165 | */ | 179 | */ |
| 180 | #define ODEBUG_FREE_BATCH 4 | ||
| 181 | |||
| 166 | static void free_obj_work(struct work_struct *work) | 182 | static void free_obj_work(struct work_struct *work) |
| 167 | { | 183 | { |
| 168 | struct debug_obj *obj; | 184 | struct debug_obj *objs[ODEBUG_FREE_BATCH]; |
| 169 | unsigned long flags; | 185 | unsigned long flags; |
| 186 | int i; | ||
| 170 | 187 | ||
| 171 | raw_spin_lock_irqsave(&pool_lock, flags); | 188 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
| 172 | while (obj_pool_free > ODEBUG_POOL_SIZE) { | 189 | return; |
| 173 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 190 | while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { |
| 174 | hlist_del(&obj->node); | 191 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) { |
| 175 | obj_pool_free--; | 192 | objs[i] = hlist_entry(obj_pool.first, |
| 193 | typeof(*objs[0]), node); | ||
| 194 | hlist_del(&objs[i]->node); | ||
| 195 | } | ||
| 196 | |||
| 197 | obj_pool_free -= ODEBUG_FREE_BATCH; | ||
| 198 | debug_objects_freed += ODEBUG_FREE_BATCH; | ||
| 176 | /* | 199 | /* |
| 177 | * We release pool_lock across kmem_cache_free() to | 200 | * We release pool_lock across kmem_cache_free() to |
| 178 | * avoid contention on pool_lock. | 201 | * avoid contention on pool_lock. |
| 179 | */ | 202 | */ |
| 180 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 203 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 181 | kmem_cache_free(obj_cache, obj); | 204 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) |
| 182 | raw_spin_lock_irqsave(&pool_lock, flags); | 205 | kmem_cache_free(obj_cache, objs[i]); |
| 206 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | ||
| 207 | return; | ||
| 183 | } | 208 | } |
| 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 209 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 185 | } | 210 | } |
| @@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj) | |||
| 198 | * schedule work when the pool is filled and the cache is | 223 | * schedule work when the pool is filled and the cache is |
| 199 | * initialized: | 224 | * initialized: |
| 200 | */ | 225 | */ |
| 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | 226 | if (obj_pool_free > debug_objects_pool_size && obj_cache) |
| 202 | sched = 1; | 227 | sched = 1; |
| 203 | hlist_add_head(&obj->node, &obj_pool); | 228 | hlist_add_head(&obj->node, &obj_pool); |
| 204 | obj_pool_free++; | 229 | obj_pool_free++; |
| @@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v) | |||
| 758 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | 783 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
| 759 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | 784 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
| 760 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | 785 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
| 786 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); | ||
| 787 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); | ||
| 761 | return 0; | 788 | return 0; |
| 762 | } | 789 | } |
| 763 | 790 | ||
| @@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void) | |||
| 1116 | pr_warn("out of memory.\n"); | 1143 | pr_warn("out of memory.\n"); |
| 1117 | } else | 1144 | } else |
| 1118 | debug_objects_selftest(); | 1145 | debug_objects_selftest(); |
| 1146 | |||
| 1147 | /* | ||
| 1148 | * Increase the thresholds for allocating and freeing objects | ||
| 1149 | * according to the number of possible CPUs available in the system. | ||
| 1150 | */ | ||
| 1151 | debug_objects_pool_size += num_possible_cpus() * 32; | ||
| 1152 | debug_objects_pool_min_level += num_possible_cpus() * 4; | ||
| 1119 | } | 1153 | } |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8971370bfb16..60c57ec936db 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1155,6 +1155,11 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
| 1155 | dir2name[ref->direction]); | 1155 | dir2name[ref->direction]); |
| 1156 | } | 1156 | } |
| 1157 | 1157 | ||
| 1158 | /* | ||
| 1159 | * Drivers should use dma_mapping_error() to check the returned | ||
| 1160 | * addresses of dma_map_single() and dma_map_page(). | ||
| 1161 | * If not, print this warning message. See Documentation/DMA-API.txt. | ||
| 1162 | */ | ||
| 1158 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | 1163 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
| 1159 | err_printk(ref->dev, entry, | 1164 | err_printk(ref->dev, entry, |
| 1160 | "DMA-API: device driver failed to check map error" | 1165 | "DMA-API: device driver failed to check map error" |
diff --git a/lib/halfmd4.c b/lib/halfmd4.c deleted file mode 100644 index 137e861d9690..000000000000 --- a/lib/halfmd4.c +++ /dev/null | |||
| @@ -1,67 +0,0 @@ | |||
| 1 | #include <linux/compiler.h> | ||
| 2 | #include <linux/export.h> | ||
| 3 | #include <linux/cryptohash.h> | ||
| 4 | #include <linux/bitops.h> | ||
| 5 | |||
| 6 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | ||
| 7 | #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) | ||
| 8 | #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) | ||
| 9 | #define H(x, y, z) ((x) ^ (y) ^ (z)) | ||
| 10 | |||
| 11 | /* | ||
| 12 | * The generic round function. The application is so specific that | ||
| 13 | * we don't bother protecting all the arguments with parens, as is generally | ||
| 14 | * good macro practice, in favor of extra legibility. | ||
| 15 | * Rotation is separate from addition to prevent recomputation | ||
| 16 | */ | ||
| 17 | #define ROUND(f, a, b, c, d, x, s) \ | ||
| 18 | (a += f(b, c, d) + x, a = rol32(a, s)) | ||
| 19 | #define K1 0 | ||
| 20 | #define K2 013240474631UL | ||
| 21 | #define K3 015666365641UL | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Basic cut-down MD4 transform. Returns only 32 bits of result. | ||
| 25 | */ | ||
| 26 | __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]) | ||
| 27 | { | ||
| 28 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | ||
| 29 | |||
| 30 | /* Round 1 */ | ||
| 31 | ROUND(F, a, b, c, d, in[0] + K1, 3); | ||
| 32 | ROUND(F, d, a, b, c, in[1] + K1, 7); | ||
| 33 | ROUND(F, c, d, a, b, in[2] + K1, 11); | ||
| 34 | ROUND(F, b, c, d, a, in[3] + K1, 19); | ||
| 35 | ROUND(F, a, b, c, d, in[4] + K1, 3); | ||
| 36 | ROUND(F, d, a, b, c, in[5] + K1, 7); | ||
| 37 | ROUND(F, c, d, a, b, in[6] + K1, 11); | ||
| 38 | ROUND(F, b, c, d, a, in[7] + K1, 19); | ||
| 39 | |||
| 40 | /* Round 2 */ | ||
| 41 | ROUND(G, a, b, c, d, in[1] + K2, 3); | ||
| 42 | ROUND(G, d, a, b, c, in[3] + K2, 5); | ||
| 43 | ROUND(G, c, d, a, b, in[5] + K2, 9); | ||
| 44 | ROUND(G, b, c, d, a, in[7] + K2, 13); | ||
| 45 | ROUND(G, a, b, c, d, in[0] + K2, 3); | ||
| 46 | ROUND(G, d, a, b, c, in[2] + K2, 5); | ||
| 47 | ROUND(G, c, d, a, b, in[4] + K2, 9); | ||
| 48 | ROUND(G, b, c, d, a, in[6] + K2, 13); | ||
| 49 | |||
| 50 | /* Round 3 */ | ||
| 51 | ROUND(H, a, b, c, d, in[3] + K3, 3); | ||
| 52 | ROUND(H, d, a, b, c, in[7] + K3, 9); | ||
| 53 | ROUND(H, c, d, a, b, in[2] + K3, 11); | ||
| 54 | ROUND(H, b, c, d, a, in[6] + K3, 15); | ||
| 55 | ROUND(H, a, b, c, d, in[1] + K3, 3); | ||
| 56 | ROUND(H, d, a, b, c, in[5] + K3, 9); | ||
| 57 | ROUND(H, c, d, a, b, in[0] + K3, 11); | ||
| 58 | ROUND(H, b, c, d, a, in[4] + K3, 15); | ||
| 59 | |||
| 60 | buf[0] += a; | ||
| 61 | buf[1] += b; | ||
| 62 | buf[2] += c; | ||
| 63 | buf[3] += d; | ||
| 64 | |||
| 65 | return buf[1]; /* "most hashed" word */ | ||
| 66 | } | ||
| 67 | EXPORT_SYMBOL(half_md4_transform); | ||
diff --git a/lib/ioremap.c b/lib/ioremap.c index 86c8911b0e3a..a3e14ce92a56 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
| @@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr, | |||
| 144 | 144 | ||
| 145 | return err; | 145 | return err; |
| 146 | } | 146 | } |
| 147 | EXPORT_SYMBOL_GPL(ioremap_page_range); | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 25f572303801..e68604ae3ced 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, | |||
| 730 | } | 730 | } |
| 731 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | 731 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); |
| 732 | 732 | ||
| 733 | static inline void pipe_truncate(struct iov_iter *i) | ||
| 734 | { | ||
| 735 | struct pipe_inode_info *pipe = i->pipe; | ||
| 736 | if (pipe->nrbufs) { | ||
| 737 | size_t off = i->iov_offset; | ||
| 738 | int idx = i->idx; | ||
| 739 | int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1); | ||
| 740 | if (off) { | ||
| 741 | pipe->bufs[idx].len = off - pipe->bufs[idx].offset; | ||
| 742 | idx = next_idx(idx, pipe); | ||
| 743 | nrbufs++; | ||
| 744 | } | ||
| 745 | while (pipe->nrbufs > nrbufs) { | ||
| 746 | pipe_buf_release(pipe, &pipe->bufs[idx]); | ||
| 747 | idx = next_idx(idx, pipe); | ||
| 748 | pipe->nrbufs--; | ||
| 749 | } | ||
| 750 | } | ||
| 751 | } | ||
| 752 | |||
| 733 | static void pipe_advance(struct iov_iter *i, size_t size) | 753 | static void pipe_advance(struct iov_iter *i, size_t size) |
| 734 | { | 754 | { |
| 735 | struct pipe_inode_info *pipe = i->pipe; | 755 | struct pipe_inode_info *pipe = i->pipe; |
| 736 | struct pipe_buffer *buf; | ||
| 737 | int idx = i->idx; | ||
| 738 | size_t off = i->iov_offset, orig_sz; | ||
| 739 | |||
| 740 | if (unlikely(i->count < size)) | 756 | if (unlikely(i->count < size)) |
| 741 | size = i->count; | 757 | size = i->count; |
| 742 | orig_sz = size; | ||
| 743 | |||
| 744 | if (size) { | 758 | if (size) { |
| 759 | struct pipe_buffer *buf; | ||
| 760 | size_t off = i->iov_offset, left = size; | ||
| 761 | int idx = i->idx; | ||
| 745 | if (off) /* make it relative to the beginning of buffer */ | 762 | if (off) /* make it relative to the beginning of buffer */ |
| 746 | size += off - pipe->bufs[idx].offset; | 763 | left += off - pipe->bufs[idx].offset; |
| 747 | while (1) { | 764 | while (1) { |
| 748 | buf = &pipe->bufs[idx]; | 765 | buf = &pipe->bufs[idx]; |
| 749 | if (size <= buf->len) | 766 | if (left <= buf->len) |
| 750 | break; | 767 | break; |
| 751 | size -= buf->len; | 768 | left -= buf->len; |
| 752 | idx = next_idx(idx, pipe); | 769 | idx = next_idx(idx, pipe); |
| 753 | } | 770 | } |
| 754 | buf->len = size; | ||
| 755 | i->idx = idx; | 771 | i->idx = idx; |
| 756 | off = i->iov_offset = buf->offset + size; | 772 | i->iov_offset = buf->offset + left; |
| 757 | } | ||
| 758 | if (off) | ||
| 759 | idx = next_idx(idx, pipe); | ||
| 760 | if (pipe->nrbufs) { | ||
| 761 | int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | ||
| 762 | /* [curbuf,unused) is in use. Free [idx,unused) */ | ||
| 763 | while (idx != unused) { | ||
| 764 | pipe_buf_release(pipe, &pipe->bufs[idx]); | ||
| 765 | idx = next_idx(idx, pipe); | ||
| 766 | pipe->nrbufs--; | ||
| 767 | } | ||
| 768 | } | 773 | } |
| 769 | i->count -= orig_sz; | 774 | i->count -= size; |
| 775 | /* ... and discard everything past that point */ | ||
| 776 | pipe_truncate(i); | ||
| 770 | } | 777 | } |
| 771 | 778 | ||
| 772 | void iov_iter_advance(struct iov_iter *i, size_t size) | 779 | void iov_iter_advance(struct iov_iter *i, size_t size) |
| @@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction, | |||
| 826 | size_t count) | 833 | size_t count) |
| 827 | { | 834 | { |
| 828 | BUG_ON(direction != ITER_PIPE); | 835 | BUG_ON(direction != ITER_PIPE); |
| 836 | WARN_ON(pipe->nrbufs == pipe->buffers); | ||
| 829 | i->type = direction; | 837 | i->type = direction; |
| 830 | i->pipe = pipe; | 838 | i->pipe = pipe; |
| 831 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | 839 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); |
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 75554754eadf..5f7999eacad5 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c | |||
| @@ -77,7 +77,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, | |||
| 77 | * Force flush any remote buffers that might be stuck in IRQ context | 77 | * Force flush any remote buffers that might be stuck in IRQ context |
| 78 | * and therefore could not run their irq_work. | 78 | * and therefore could not run their irq_work. |
| 79 | */ | 79 | */ |
| 80 | printk_nmi_flush(); | 80 | printk_safe_flush(); |
| 81 | 81 | ||
| 82 | clear_bit_unlock(0, &backtrace_flag); | 82 | clear_bit_unlock(0, &backtrace_flag); |
| 83 | put_cpu(); | 83 | put_cpu(); |
diff --git a/lib/parman.c b/lib/parman.c new file mode 100644 index 000000000000..c6e42a8db824 --- /dev/null +++ b/lib/parman.c | |||
| @@ -0,0 +1,376 @@ | |||
| 1 | /* | ||
| 2 | * lib/parman.c - Manager for linear priority array areas | ||
| 3 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> | ||
| 5 | * | ||
| 6 | * Redistribution and use in source and binary forms, with or without | ||
| 7 | * modification, are permitted provided that the following conditions are met: | ||
| 8 | * | ||
| 9 | * 1. Redistributions of source code must retain the above copyright | ||
| 10 | * notice, this list of conditions and the following disclaimer. | ||
| 11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 12 | * notice, this list of conditions and the following disclaimer in the | ||
| 13 | * documentation and/or other materials provided with the distribution. | ||
| 14 | * 3. Neither the names of the copyright holders nor the names of its | ||
| 15 | * contributors may be used to endorse or promote products derived from | ||
| 16 | * this software without specific prior written permission. | ||
| 17 | * | ||
| 18 | * Alternatively, this software may be distributed under the terms of the | ||
| 19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 20 | * Software Foundation. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 32 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/kernel.h> | ||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/export.h> | ||
| 39 | #include <linux/list.h> | ||
| 40 | #include <linux/err.h> | ||
| 41 | #include <linux/parman.h> | ||
| 42 | |||
| 43 | struct parman_algo { | ||
| 44 | int (*item_add)(struct parman *parman, struct parman_prio *prio, | ||
| 45 | struct parman_item *item); | ||
| 46 | void (*item_remove)(struct parman *parman, struct parman_prio *prio, | ||
| 47 | struct parman_item *item); | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct parman { | ||
| 51 | const struct parman_ops *ops; | ||
| 52 | void *priv; | ||
| 53 | const struct parman_algo *algo; | ||
| 54 | unsigned long count; | ||
| 55 | unsigned long limit_count; | ||
| 56 | struct list_head prio_list; | ||
| 57 | }; | ||
| 58 | |||
| 59 | static int parman_enlarge(struct parman *parman) | ||
| 60 | { | ||
| 61 | unsigned long new_count = parman->limit_count + | ||
| 62 | parman->ops->resize_step; | ||
| 63 | int err; | ||
| 64 | |||
| 65 | err = parman->ops->resize(parman->priv, new_count); | ||
| 66 | if (err) | ||
| 67 | return err; | ||
| 68 | parman->limit_count = new_count; | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int parman_shrink(struct parman *parman) | ||
| 73 | { | ||
| 74 | unsigned long new_count = parman->limit_count - | ||
| 75 | parman->ops->resize_step; | ||
| 76 | int err; | ||
| 77 | |||
| 78 | if (new_count < parman->ops->base_count) | ||
| 79 | return 0; | ||
| 80 | err = parman->ops->resize(parman->priv, new_count); | ||
| 81 | if (err) | ||
| 82 | return err; | ||
| 83 | parman->limit_count = new_count; | ||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | static bool parman_prio_used(struct parman_prio *prio) | ||
| 88 | |||
| 89 | { | ||
| 90 | return !list_empty(&prio->item_list); | ||
| 91 | } | ||
| 92 | |||
| 93 | static struct parman_item *parman_prio_first_item(struct parman_prio *prio) | ||
| 94 | { | ||
| 95 | return list_first_entry(&prio->item_list, | ||
| 96 | typeof(struct parman_item), list); | ||
| 97 | } | ||
| 98 | |||
| 99 | static unsigned long parman_prio_first_index(struct parman_prio *prio) | ||
| 100 | { | ||
| 101 | return parman_prio_first_item(prio)->index; | ||
| 102 | } | ||
| 103 | |||
| 104 | static struct parman_item *parman_prio_last_item(struct parman_prio *prio) | ||
| 105 | { | ||
| 106 | return list_last_entry(&prio->item_list, | ||
| 107 | typeof(struct parman_item), list); | ||
| 108 | } | ||
| 109 | |||
| 110 | static unsigned long parman_prio_last_index(struct parman_prio *prio) | ||
| 111 | { | ||
| 112 | return parman_prio_last_item(prio)->index; | ||
| 113 | } | ||
| 114 | |||
| 115 | static unsigned long parman_lsort_new_index_find(struct parman *parman, | ||
| 116 | struct parman_prio *prio) | ||
| 117 | { | ||
| 118 | list_for_each_entry_from_reverse(prio, &parman->prio_list, list) { | ||
| 119 | if (!parman_prio_used(prio)) | ||
| 120 | continue; | ||
| 121 | return parman_prio_last_index(prio) + 1; | ||
| 122 | } | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | static void __parman_prio_move(struct parman *parman, struct parman_prio *prio, | ||
| 127 | struct parman_item *item, unsigned long to_index, | ||
| 128 | unsigned long count) | ||
| 129 | { | ||
| 130 | parman->ops->move(parman->priv, item->index, to_index, count); | ||
| 131 | } | ||
| 132 | |||
| 133 | static void parman_prio_shift_down(struct parman *parman, | ||
| 134 | struct parman_prio *prio) | ||
| 135 | { | ||
| 136 | struct parman_item *item; | ||
| 137 | unsigned long to_index; | ||
| 138 | |||
| 139 | if (!parman_prio_used(prio)) | ||
| 140 | return; | ||
| 141 | item = parman_prio_first_item(prio); | ||
| 142 | to_index = parman_prio_last_index(prio) + 1; | ||
| 143 | __parman_prio_move(parman, prio, item, to_index, 1); | ||
| 144 | list_move_tail(&item->list, &prio->item_list); | ||
| 145 | item->index = to_index; | ||
| 146 | } | ||
| 147 | |||
| 148 | static void parman_prio_shift_up(struct parman *parman, | ||
| 149 | struct parman_prio *prio) | ||
| 150 | { | ||
| 151 | struct parman_item *item; | ||
| 152 | unsigned long to_index; | ||
| 153 | |||
| 154 | if (!parman_prio_used(prio)) | ||
| 155 | return; | ||
| 156 | item = parman_prio_last_item(prio); | ||
| 157 | to_index = parman_prio_first_index(prio) - 1; | ||
| 158 | __parman_prio_move(parman, prio, item, to_index, 1); | ||
| 159 | list_move(&item->list, &prio->item_list); | ||
| 160 | item->index = to_index; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void parman_prio_item_remove(struct parman *parman, | ||
| 164 | struct parman_prio *prio, | ||
| 165 | struct parman_item *item) | ||
| 166 | { | ||
| 167 | struct parman_item *last_item; | ||
| 168 | unsigned long to_index; | ||
| 169 | |||
| 170 | last_item = parman_prio_last_item(prio); | ||
| 171 | if (last_item == item) { | ||
| 172 | list_del(&item->list); | ||
| 173 | return; | ||
| 174 | } | ||
| 175 | to_index = item->index; | ||
| 176 | __parman_prio_move(parman, prio, last_item, to_index, 1); | ||
| 177 | list_del(&last_item->list); | ||
| 178 | list_replace(&item->list, &last_item->list); | ||
| 179 | last_item->index = to_index; | ||
| 180 | } | ||
| 181 | |||
| 182 | static int parman_lsort_item_add(struct parman *parman, | ||
| 183 | struct parman_prio *prio, | ||
| 184 | struct parman_item *item) | ||
| 185 | { | ||
| 186 | struct parman_prio *prio2; | ||
| 187 | unsigned long new_index; | ||
| 188 | int err; | ||
| 189 | |||
| 190 | if (parman->count + 1 > parman->limit_count) { | ||
| 191 | err = parman_enlarge(parman); | ||
| 192 | if (err) | ||
| 193 | return err; | ||
| 194 | } | ||
| 195 | |||
| 196 | new_index = parman_lsort_new_index_find(parman, prio); | ||
| 197 | list_for_each_entry_reverse(prio2, &parman->prio_list, list) { | ||
| 198 | if (prio2 == prio) | ||
| 199 | break; | ||
| 200 | parman_prio_shift_down(parman, prio2); | ||
| 201 | } | ||
| 202 | item->index = new_index; | ||
| 203 | list_add_tail(&item->list, &prio->item_list); | ||
| 204 | parman->count++; | ||
| 205 | return 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | static void parman_lsort_item_remove(struct parman *parman, | ||
| 209 | struct parman_prio *prio, | ||
| 210 | struct parman_item *item) | ||
| 211 | { | ||
| 212 | parman_prio_item_remove(parman, prio, item); | ||
| 213 | list_for_each_entry_continue(prio, &parman->prio_list, list) | ||
| 214 | parman_prio_shift_up(parman, prio); | ||
| 215 | parman->count--; | ||
| 216 | if (parman->limit_count - parman->count >= parman->ops->resize_step) | ||
| 217 | parman_shrink(parman); | ||
| 218 | } | ||
| 219 | |||
| 220 | static const struct parman_algo parman_lsort = { | ||
| 221 | .item_add = parman_lsort_item_add, | ||
| 222 | .item_remove = parman_lsort_item_remove, | ||
| 223 | }; | ||
| 224 | |||
| 225 | static const struct parman_algo *parman_algos[] = { | ||
| 226 | &parman_lsort, | ||
| 227 | }; | ||
| 228 | |||
| 229 | /** | ||
| 230 | * parman_create - creates a new parman instance | ||
| 231 | * @ops: caller-specific callbacks | ||
| 232 | * @priv: pointer to a private data passed to the ops | ||
| 233 | * | ||
| 234 | * Note: all locking must be provided by the caller. | ||
| 235 | * | ||
| 236 | * Each parman instance manages an array area with chunks of entries | ||
| 237 | * with the same priority. Consider following example: | ||
| 238 | * | ||
| 239 | * item 1 with prio 10 | ||
| 240 | * item 2 with prio 10 | ||
| 241 | * item 3 with prio 10 | ||
| 242 | * item 4 with prio 20 | ||
| 243 | * item 5 with prio 20 | ||
| 244 | * item 6 with prio 30 | ||
| 245 | * item 7 with prio 30 | ||
| 246 | * item 8 with prio 30 | ||
| 247 | * | ||
| 248 | * In this example, there are 3 priority chunks. The order of the priorities | ||
| 249 | * matters, however the order of items within a single priority chunk does not | ||
| 250 | * matter. So the same array could be ordered as follows: | ||
| 251 | * | ||
| 252 | * item 2 with prio 10 | ||
| 253 | * item 3 with prio 10 | ||
| 254 | * item 1 with prio 10 | ||
| 255 | * item 5 with prio 20 | ||
| 256 | * item 4 with prio 20 | ||
| 257 | * item 7 with prio 30 | ||
| 258 | * item 8 with prio 30 | ||
| 259 | * item 6 with prio 30 | ||
| 260 | * | ||
| 261 | * The goal of parman is to maintain the priority ordering. The caller | ||
| 262 | * provides @ops with callbacks parman uses to move the items | ||
| 263 | * and resize the array area. | ||
| 264 | * | ||
| 265 | * Returns a pointer to newly created parman instance in case of success, | ||
| 266 | * otherwise it returns NULL. | ||
| 267 | */ | ||
| 268 | struct parman *parman_create(const struct parman_ops *ops, void *priv) | ||
| 269 | { | ||
| 270 | struct parman *parman; | ||
| 271 | |||
| 272 | parman = kzalloc(sizeof(*parman), GFP_KERNEL); | ||
| 273 | if (!parman) | ||
| 274 | return NULL; | ||
| 275 | INIT_LIST_HEAD(&parman->prio_list); | ||
| 276 | parman->ops = ops; | ||
| 277 | parman->priv = priv; | ||
| 278 | parman->limit_count = ops->base_count; | ||
| 279 | parman->algo = parman_algos[ops->algo]; | ||
| 280 | return parman; | ||
| 281 | } | ||
| 282 | EXPORT_SYMBOL(parman_create); | ||
| 283 | |||
| 284 | /** | ||
| 285 | * parman_destroy - destroys existing parman instance | ||
| 286 | * @parman: parman instance | ||
| 287 | * | ||
| 288 | * Note: all locking must be provided by the caller. | ||
| 289 | */ | ||
| 290 | void parman_destroy(struct parman *parman) | ||
| 291 | { | ||
| 292 | WARN_ON(!list_empty(&parman->prio_list)); | ||
| 293 | kfree(parman); | ||
| 294 | } | ||
| 295 | EXPORT_SYMBOL(parman_destroy); | ||
| 296 | |||
| 297 | /** | ||
| 298 | * parman_prio_init - initializes a parman priority chunk | ||
| 299 | * @parman: parman instance | ||
| 300 | * @prio: parman prio structure to be initialized | ||
| 301 | * @prority: desired priority of the chunk | ||
| 302 | * | ||
| 303 | * Note: all locking must be provided by the caller. | ||
| 304 | * | ||
| 305 | * Before caller could add an item with certain priority, he has to | ||
| 306 | * initialize a priority chunk for it using this function. | ||
| 307 | */ | ||
| 308 | void parman_prio_init(struct parman *parman, struct parman_prio *prio, | ||
| 309 | unsigned long priority) | ||
| 310 | { | ||
| 311 | struct parman_prio *prio2; | ||
| 312 | struct list_head *pos; | ||
| 313 | |||
| 314 | INIT_LIST_HEAD(&prio->item_list); | ||
| 315 | prio->priority = priority; | ||
| 316 | |||
| 317 | /* Position inside the list according to priority */ | ||
| 318 | list_for_each(pos, &parman->prio_list) { | ||
| 319 | prio2 = list_entry(pos, typeof(*prio2), list); | ||
| 320 | if (prio2->priority > prio->priority) | ||
| 321 | break; | ||
| 322 | } | ||
| 323 | list_add_tail(&prio->list, pos); | ||
| 324 | } | ||
| 325 | EXPORT_SYMBOL(parman_prio_init); | ||
| 326 | |||
| 327 | /** | ||
| 328 | * parman_prio_fini - finalizes use of parman priority chunk | ||
| 329 | * @prio: parman prio structure | ||
| 330 | * | ||
| 331 | * Note: all locking must be provided by the caller. | ||
| 332 | */ | ||
| 333 | void parman_prio_fini(struct parman_prio *prio) | ||
| 334 | { | ||
| 335 | WARN_ON(parman_prio_used(prio)); | ||
| 336 | list_del(&prio->list); | ||
| 337 | } | ||
| 338 | EXPORT_SYMBOL(parman_prio_fini); | ||
| 339 | |||
| 340 | /** | ||
| 341 | * parman_item_add - adds a parman item under defined priority | ||
| 342 | * @parman: parman instance | ||
| 343 | * @prio: parman prio instance to add the item to | ||
| 344 | * @item: parman item instance | ||
| 345 | * | ||
| 346 | * Note: all locking must be provided by the caller. | ||
| 347 | * | ||
| 348 | * Adds item to a array managed by parman instance under the specified priority. | ||
| 349 | * | ||
| 350 | * Returns 0 in case of success, negative number to indicate an error. | ||
| 351 | */ | ||
| 352 | int parman_item_add(struct parman *parman, struct parman_prio *prio, | ||
| 353 | struct parman_item *item) | ||
| 354 | { | ||
| 355 | return parman->algo->item_add(parman, prio, item); | ||
| 356 | } | ||
| 357 | EXPORT_SYMBOL(parman_item_add); | ||
| 358 | |||
| 359 | /** | ||
| 360 | * parman_item_del - deletes parman item | ||
| 361 | * @parman: parman instance | ||
| 362 | * @prio: parman prio instance to delete the item from | ||
| 363 | * @item: parman item instance | ||
| 364 | * | ||
| 365 | * Note: all locking must be provided by the caller. | ||
| 366 | */ | ||
| 367 | void parman_item_remove(struct parman *parman, struct parman_prio *prio, | ||
| 368 | struct parman_item *item) | ||
| 369 | { | ||
| 370 | parman->algo->item_remove(parman, prio, item); | ||
| 371 | } | ||
| 372 | EXPORT_SYMBOL(parman_item_remove); | ||
| 373 | |||
| 374 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 375 | MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); | ||
| 376 | MODULE_DESCRIPTION("Priority-based array manager"); | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0b92d605fb69..84812a9fb16f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) | |||
| 769 | struct radix_tree_node *old = child; | 769 | struct radix_tree_node *old = child; |
| 770 | offset = child->offset + 1; | 770 | offset = child->offset + 1; |
| 771 | child = child->parent; | 771 | child = child->parent; |
| 772 | WARN_ON_ONCE(!list_empty(&node->private_list)); | 772 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
| 773 | radix_tree_node_free(old); | 773 | radix_tree_node_free(old); |
| 774 | if (old == entry_to_node(node)) | 774 | if (old == entry_to_node(node)) |
| 775 | return; | 775 | return; |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 32d0ad058380..172454e6b979 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -32,6 +32,11 @@ | |||
| 32 | #define HASH_MIN_SIZE 4U | 32 | #define HASH_MIN_SIZE 4U |
| 33 | #define BUCKET_LOCKS_PER_CPU 32UL | 33 | #define BUCKET_LOCKS_PER_CPU 32UL |
| 34 | 34 | ||
| 35 | union nested_table { | ||
| 36 | union nested_table __rcu *table; | ||
| 37 | struct rhash_head __rcu *bucket; | ||
| 38 | }; | ||
| 39 | |||
| 35 | static u32 head_hashfn(struct rhashtable *ht, | 40 | static u32 head_hashfn(struct rhashtable *ht, |
| 36 | const struct bucket_table *tbl, | 41 | const struct bucket_table *tbl, |
| 37 | const struct rhash_head *he) | 42 | const struct rhash_head *he) |
| @@ -76,6 +81,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
| 76 | /* Never allocate more than 0.5 locks per bucket */ | 81 | /* Never allocate more than 0.5 locks per bucket */ |
| 77 | size = min_t(unsigned int, size, tbl->size >> 1); | 82 | size = min_t(unsigned int, size, tbl->size >> 1); |
| 78 | 83 | ||
| 84 | if (tbl->nest) | ||
| 85 | size = min(size, 1U << tbl->nest); | ||
| 86 | |||
| 79 | if (sizeof(spinlock_t) != 0) { | 87 | if (sizeof(spinlock_t) != 0) { |
| 80 | tbl->locks = NULL; | 88 | tbl->locks = NULL; |
| 81 | #ifdef CONFIG_NUMA | 89 | #ifdef CONFIG_NUMA |
| @@ -99,8 +107,45 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
| 99 | return 0; | 107 | return 0; |
| 100 | } | 108 | } |
| 101 | 109 | ||
| 110 | static void nested_table_free(union nested_table *ntbl, unsigned int size) | ||
| 111 | { | ||
| 112 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 113 | const unsigned int len = 1 << shift; | ||
| 114 | unsigned int i; | ||
| 115 | |||
| 116 | ntbl = rcu_dereference_raw(ntbl->table); | ||
| 117 | if (!ntbl) | ||
| 118 | return; | ||
| 119 | |||
| 120 | if (size > len) { | ||
| 121 | size >>= shift; | ||
| 122 | for (i = 0; i < len; i++) | ||
| 123 | nested_table_free(ntbl + i, size); | ||
| 124 | } | ||
| 125 | |||
| 126 | kfree(ntbl); | ||
| 127 | } | ||
| 128 | |||
| 129 | static void nested_bucket_table_free(const struct bucket_table *tbl) | ||
| 130 | { | ||
| 131 | unsigned int size = tbl->size >> tbl->nest; | ||
| 132 | unsigned int len = 1 << tbl->nest; | ||
| 133 | union nested_table *ntbl; | ||
| 134 | unsigned int i; | ||
| 135 | |||
| 136 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 137 | |||
| 138 | for (i = 0; i < len; i++) | ||
| 139 | nested_table_free(ntbl + i, size); | ||
| 140 | |||
| 141 | kfree(ntbl); | ||
| 142 | } | ||
| 143 | |||
| 102 | static void bucket_table_free(const struct bucket_table *tbl) | 144 | static void bucket_table_free(const struct bucket_table *tbl) |
| 103 | { | 145 | { |
| 146 | if (tbl->nest) | ||
| 147 | nested_bucket_table_free(tbl); | ||
| 148 | |||
| 104 | if (tbl) | 149 | if (tbl) |
| 105 | kvfree(tbl->locks); | 150 | kvfree(tbl->locks); |
| 106 | 151 | ||
| @@ -112,6 +157,59 @@ static void bucket_table_free_rcu(struct rcu_head *head) | |||
| 112 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | 157 | bucket_table_free(container_of(head, struct bucket_table, rcu)); |
| 113 | } | 158 | } |
| 114 | 159 | ||
| 160 | static union nested_table *nested_table_alloc(struct rhashtable *ht, | ||
| 161 | union nested_table __rcu **prev, | ||
| 162 | unsigned int shifted, | ||
| 163 | unsigned int nhash) | ||
| 164 | { | ||
| 165 | union nested_table *ntbl; | ||
| 166 | int i; | ||
| 167 | |||
| 168 | ntbl = rcu_dereference(*prev); | ||
| 169 | if (ntbl) | ||
| 170 | return ntbl; | ||
| 171 | |||
| 172 | ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); | ||
| 173 | |||
| 174 | if (ntbl && shifted) { | ||
| 175 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) | ||
| 176 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, | ||
| 177 | (i << shifted) | nhash); | ||
| 178 | } | ||
| 179 | |||
| 180 | rcu_assign_pointer(*prev, ntbl); | ||
| 181 | |||
| 182 | return ntbl; | ||
| 183 | } | ||
| 184 | |||
| 185 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, | ||
| 186 | size_t nbuckets, | ||
| 187 | gfp_t gfp) | ||
| 188 | { | ||
| 189 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 190 | struct bucket_table *tbl; | ||
| 191 | size_t size; | ||
| 192 | |||
| 193 | if (nbuckets < (1 << (shift + 1))) | ||
| 194 | return NULL; | ||
| 195 | |||
| 196 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); | ||
| 197 | |||
| 198 | tbl = kzalloc(size, gfp); | ||
| 199 | if (!tbl) | ||
| 200 | return NULL; | ||
| 201 | |||
| 202 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, | ||
| 203 | 0, 0)) { | ||
| 204 | kfree(tbl); | ||
| 205 | return NULL; | ||
| 206 | } | ||
| 207 | |||
| 208 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; | ||
| 209 | |||
| 210 | return tbl; | ||
| 211 | } | ||
| 212 | |||
| 115 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 213 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
| 116 | size_t nbuckets, | 214 | size_t nbuckets, |
| 117 | gfp_t gfp) | 215 | gfp_t gfp) |
| @@ -126,10 +224,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
| 126 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | 224 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
| 127 | if (tbl == NULL && gfp == GFP_KERNEL) | 225 | if (tbl == NULL && gfp == GFP_KERNEL) |
| 128 | tbl = vzalloc(size); | 226 | tbl = vzalloc(size); |
| 227 | |||
| 228 | size = nbuckets; | ||
| 229 | |||
| 230 | if (tbl == NULL && gfp != GFP_KERNEL) { | ||
| 231 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); | ||
| 232 | nbuckets = 0; | ||
| 233 | } | ||
| 129 | if (tbl == NULL) | 234 | if (tbl == NULL) |
| 130 | return NULL; | 235 | return NULL; |
| 131 | 236 | ||
| 132 | tbl->size = nbuckets; | 237 | tbl->size = size; |
| 133 | 238 | ||
| 134 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { | 239 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
| 135 | bucket_table_free(tbl); | 240 | bucket_table_free(tbl); |
| @@ -164,12 +269,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) | |||
| 164 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 269 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 165 | struct bucket_table *new_tbl = rhashtable_last_table(ht, | 270 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
| 166 | rht_dereference_rcu(old_tbl->future_tbl, ht)); | 271 | rht_dereference_rcu(old_tbl->future_tbl, ht)); |
| 167 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; | 272 | struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); |
| 168 | int err = -ENOENT; | 273 | int err = -EAGAIN; |
| 169 | struct rhash_head *head, *next, *entry; | 274 | struct rhash_head *head, *next, *entry; |
| 170 | spinlock_t *new_bucket_lock; | 275 | spinlock_t *new_bucket_lock; |
| 171 | unsigned int new_hash; | 276 | unsigned int new_hash; |
| 172 | 277 | ||
| 278 | if (new_tbl->nest) | ||
| 279 | goto out; | ||
| 280 | |||
| 281 | err = -ENOENT; | ||
| 282 | |||
| 173 | rht_for_each(entry, old_tbl, old_hash) { | 283 | rht_for_each(entry, old_tbl, old_hash) { |
| 174 | err = 0; | 284 | err = 0; |
| 175 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | 285 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); |
| @@ -202,19 +312,26 @@ out: | |||
| 202 | return err; | 312 | return err; |
| 203 | } | 313 | } |
| 204 | 314 | ||
| 205 | static void rhashtable_rehash_chain(struct rhashtable *ht, | 315 | static int rhashtable_rehash_chain(struct rhashtable *ht, |
| 206 | unsigned int old_hash) | 316 | unsigned int old_hash) |
| 207 | { | 317 | { |
| 208 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 318 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 209 | spinlock_t *old_bucket_lock; | 319 | spinlock_t *old_bucket_lock; |
| 320 | int err; | ||
| 210 | 321 | ||
| 211 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); | 322 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
| 212 | 323 | ||
| 213 | spin_lock_bh(old_bucket_lock); | 324 | spin_lock_bh(old_bucket_lock); |
| 214 | while (!rhashtable_rehash_one(ht, old_hash)) | 325 | while (!(err = rhashtable_rehash_one(ht, old_hash))) |
| 215 | ; | 326 | ; |
| 216 | old_tbl->rehash++; | 327 | |
| 328 | if (err == -ENOENT) { | ||
| 329 | old_tbl->rehash++; | ||
| 330 | err = 0; | ||
| 331 | } | ||
| 217 | spin_unlock_bh(old_bucket_lock); | 332 | spin_unlock_bh(old_bucket_lock); |
| 333 | |||
| 334 | return err; | ||
| 218 | } | 335 | } |
| 219 | 336 | ||
| 220 | static int rhashtable_rehash_attach(struct rhashtable *ht, | 337 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
| @@ -246,13 +363,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht) | |||
| 246 | struct bucket_table *new_tbl; | 363 | struct bucket_table *new_tbl; |
| 247 | struct rhashtable_walker *walker; | 364 | struct rhashtable_walker *walker; |
| 248 | unsigned int old_hash; | 365 | unsigned int old_hash; |
| 366 | int err; | ||
| 249 | 367 | ||
| 250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | 368 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); |
| 251 | if (!new_tbl) | 369 | if (!new_tbl) |
| 252 | return 0; | 370 | return 0; |
| 253 | 371 | ||
| 254 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) | 372 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
| 255 | rhashtable_rehash_chain(ht, old_hash); | 373 | err = rhashtable_rehash_chain(ht, old_hash); |
| 374 | if (err) | ||
| 375 | return err; | ||
| 376 | } | ||
| 256 | 377 | ||
| 257 | /* Publish the new table pointer. */ | 378 | /* Publish the new table pointer. */ |
| 258 | rcu_assign_pointer(ht->tbl, new_tbl); | 379 | rcu_assign_pointer(ht->tbl, new_tbl); |
| @@ -271,31 +392,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht) | |||
| 271 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | 392 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
| 272 | } | 393 | } |
| 273 | 394 | ||
| 274 | /** | 395 | static int rhashtable_rehash_alloc(struct rhashtable *ht, |
| 275 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | 396 | struct bucket_table *old_tbl, |
| 276 | * @ht: the hash table to expand | 397 | unsigned int size) |
| 277 | * | ||
| 278 | * A secondary bucket array is allocated and the hash entries are migrated. | ||
| 279 | * | ||
| 280 | * This function may only be called in a context where it is safe to call | ||
| 281 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
| 282 | * | ||
| 283 | * The caller must ensure that no concurrent resizing occurs by holding | ||
| 284 | * ht->mutex. | ||
| 285 | * | ||
| 286 | * It is valid to have concurrent insertions and deletions protected by per | ||
| 287 | * bucket locks or concurrent RCU protected lookups and traversals. | ||
| 288 | */ | ||
| 289 | static int rhashtable_expand(struct rhashtable *ht) | ||
| 290 | { | 398 | { |
| 291 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 399 | struct bucket_table *new_tbl; |
| 292 | int err; | 400 | int err; |
| 293 | 401 | ||
| 294 | ASSERT_RHT_MUTEX(ht); | 402 | ASSERT_RHT_MUTEX(ht); |
| 295 | 403 | ||
| 296 | old_tbl = rhashtable_last_table(ht, old_tbl); | 404 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
| 297 | |||
| 298 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); | ||
| 299 | if (new_tbl == NULL) | 405 | if (new_tbl == NULL) |
| 300 | return -ENOMEM; | 406 | return -ENOMEM; |
| 301 | 407 | ||
| @@ -324,12 +430,9 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
| 324 | */ | 430 | */ |
| 325 | static int rhashtable_shrink(struct rhashtable *ht) | 431 | static int rhashtable_shrink(struct rhashtable *ht) |
| 326 | { | 432 | { |
| 327 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 433 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 328 | unsigned int nelems = atomic_read(&ht->nelems); | 434 | unsigned int nelems = atomic_read(&ht->nelems); |
| 329 | unsigned int size = 0; | 435 | unsigned int size = 0; |
| 330 | int err; | ||
| 331 | |||
| 332 | ASSERT_RHT_MUTEX(ht); | ||
| 333 | 436 | ||
| 334 | if (nelems) | 437 | if (nelems) |
| 335 | size = roundup_pow_of_two(nelems * 3 / 2); | 438 | size = roundup_pow_of_two(nelems * 3 / 2); |
| @@ -342,15 +445,7 @@ static int rhashtable_shrink(struct rhashtable *ht) | |||
| 342 | if (rht_dereference(old_tbl->future_tbl, ht)) | 445 | if (rht_dereference(old_tbl->future_tbl, ht)) |
| 343 | return -EEXIST; | 446 | return -EEXIST; |
| 344 | 447 | ||
| 345 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); | 448 | return rhashtable_rehash_alloc(ht, old_tbl, size); |
| 346 | if (new_tbl == NULL) | ||
| 347 | return -ENOMEM; | ||
| 348 | |||
| 349 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); | ||
| 350 | if (err) | ||
| 351 | bucket_table_free(new_tbl); | ||
| 352 | |||
| 353 | return err; | ||
| 354 | } | 449 | } |
| 355 | 450 | ||
| 356 | static void rht_deferred_worker(struct work_struct *work) | 451 | static void rht_deferred_worker(struct work_struct *work) |
| @@ -366,11 +461,14 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 366 | tbl = rhashtable_last_table(ht, tbl); | 461 | tbl = rhashtable_last_table(ht, tbl); |
| 367 | 462 | ||
| 368 | if (rht_grow_above_75(ht, tbl)) | 463 | if (rht_grow_above_75(ht, tbl)) |
| 369 | rhashtable_expand(ht); | 464 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
| 370 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) | 465 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
| 371 | rhashtable_shrink(ht); | 466 | err = rhashtable_shrink(ht); |
| 467 | else if (tbl->nest) | ||
| 468 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | ||
| 372 | 469 | ||
| 373 | err = rhashtable_rehash_table(ht); | 470 | if (!err) |
| 471 | err = rhashtable_rehash_table(ht); | ||
| 374 | 472 | ||
| 375 | mutex_unlock(&ht->mutex); | 473 | mutex_unlock(&ht->mutex); |
| 376 | 474 | ||
| @@ -439,8 +537,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
| 439 | int elasticity; | 537 | int elasticity; |
| 440 | 538 | ||
| 441 | elasticity = ht->elasticity; | 539 | elasticity = ht->elasticity; |
| 442 | pprev = &tbl->buckets[hash]; | 540 | pprev = rht_bucket_var(tbl, hash); |
| 443 | rht_for_each(head, tbl, hash) { | 541 | rht_for_each_continue(head, *pprev, tbl, hash) { |
| 444 | struct rhlist_head *list; | 542 | struct rhlist_head *list; |
| 445 | struct rhlist_head *plist; | 543 | struct rhlist_head *plist; |
| 446 | 544 | ||
| @@ -477,6 +575,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 477 | struct rhash_head *obj, | 575 | struct rhash_head *obj, |
| 478 | void *data) | 576 | void *data) |
| 479 | { | 577 | { |
| 578 | struct rhash_head __rcu **pprev; | ||
| 480 | struct bucket_table *new_tbl; | 579 | struct bucket_table *new_tbl; |
| 481 | struct rhash_head *head; | 580 | struct rhash_head *head; |
| 482 | 581 | ||
| @@ -499,7 +598,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 499 | if (unlikely(rht_grow_above_100(ht, tbl))) | 598 | if (unlikely(rht_grow_above_100(ht, tbl))) |
| 500 | return ERR_PTR(-EAGAIN); | 599 | return ERR_PTR(-EAGAIN); |
| 501 | 600 | ||
| 502 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | 601 | pprev = rht_bucket_insert(ht, tbl, hash); |
| 602 | if (!pprev) | ||
| 603 | return ERR_PTR(-ENOMEM); | ||
| 604 | |||
| 605 | head = rht_dereference_bucket(*pprev, tbl, hash); | ||
| 503 | 606 | ||
| 504 | RCU_INIT_POINTER(obj->next, head); | 607 | RCU_INIT_POINTER(obj->next, head); |
| 505 | if (ht->rhlist) { | 608 | if (ht->rhlist) { |
| @@ -509,7 +612,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 509 | RCU_INIT_POINTER(list->next, NULL); | 612 | RCU_INIT_POINTER(list->next, NULL); |
| 510 | } | 613 | } |
| 511 | 614 | ||
| 512 | rcu_assign_pointer(tbl->buckets[hash], obj); | 615 | rcu_assign_pointer(*pprev, obj); |
| 513 | 616 | ||
| 514 | atomic_inc(&ht->nelems); | 617 | atomic_inc(&ht->nelems); |
| 515 | if (rht_grow_above_75(ht, tbl)) | 618 | if (rht_grow_above_75(ht, tbl)) |
| @@ -975,7 +1078,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 975 | void (*free_fn)(void *ptr, void *arg), | 1078 | void (*free_fn)(void *ptr, void *arg), |
| 976 | void *arg) | 1079 | void *arg) |
| 977 | { | 1080 | { |
| 978 | const struct bucket_table *tbl; | 1081 | struct bucket_table *tbl; |
| 979 | unsigned int i; | 1082 | unsigned int i; |
| 980 | 1083 | ||
| 981 | cancel_work_sync(&ht->run_work); | 1084 | cancel_work_sync(&ht->run_work); |
| @@ -986,7 +1089,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 986 | for (i = 0; i < tbl->size; i++) { | 1089 | for (i = 0; i < tbl->size; i++) { |
| 987 | struct rhash_head *pos, *next; | 1090 | struct rhash_head *pos, *next; |
| 988 | 1091 | ||
| 989 | for (pos = rht_dereference(tbl->buckets[i], ht), | 1092 | for (pos = rht_dereference(*rht_bucket(tbl, i), ht), |
| 990 | next = !rht_is_a_nulls(pos) ? | 1093 | next = !rht_is_a_nulls(pos) ? |
| 991 | rht_dereference(pos->next, ht) : NULL; | 1094 | rht_dereference(pos->next, ht) : NULL; |
| 992 | !rht_is_a_nulls(pos); | 1095 | !rht_is_a_nulls(pos); |
| @@ -1007,3 +1110,70 @@ void rhashtable_destroy(struct rhashtable *ht) | |||
| 1007 | return rhashtable_free_and_destroy(ht, NULL, NULL); | 1110 | return rhashtable_free_and_destroy(ht, NULL, NULL); |
| 1008 | } | 1111 | } |
| 1009 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 1112 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
| 1113 | |||
| 1114 | struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, | ||
| 1115 | unsigned int hash) | ||
| 1116 | { | ||
| 1117 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 1118 | static struct rhash_head __rcu *rhnull = | ||
| 1119 | (struct rhash_head __rcu *)NULLS_MARKER(0); | ||
| 1120 | unsigned int index = hash & ((1 << tbl->nest) - 1); | ||
| 1121 | unsigned int size = tbl->size >> tbl->nest; | ||
| 1122 | unsigned int subhash = hash; | ||
| 1123 | union nested_table *ntbl; | ||
| 1124 | |||
| 1125 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 1126 | ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); | ||
| 1127 | subhash >>= tbl->nest; | ||
| 1128 | |||
| 1129 | while (ntbl && size > (1 << shift)) { | ||
| 1130 | index = subhash & ((1 << shift) - 1); | ||
| 1131 | ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); | ||
| 1132 | size >>= shift; | ||
| 1133 | subhash >>= shift; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | if (!ntbl) | ||
| 1137 | return &rhnull; | ||
| 1138 | |||
| 1139 | return &ntbl[subhash].bucket; | ||
| 1140 | |||
| 1141 | } | ||
| 1142 | EXPORT_SYMBOL_GPL(rht_bucket_nested); | ||
| 1143 | |||
| 1144 | struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, | ||
| 1145 | struct bucket_table *tbl, | ||
| 1146 | unsigned int hash) | ||
| 1147 | { | ||
| 1148 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 1149 | unsigned int index = hash & ((1 << tbl->nest) - 1); | ||
| 1150 | unsigned int size = tbl->size >> tbl->nest; | ||
| 1151 | union nested_table *ntbl; | ||
| 1152 | unsigned int shifted; | ||
| 1153 | unsigned int nhash; | ||
| 1154 | |||
| 1155 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 1156 | hash >>= tbl->nest; | ||
| 1157 | nhash = index; | ||
| 1158 | shifted = tbl->nest; | ||
| 1159 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | ||
| 1160 | size <= (1 << shift) ? shifted : 0, nhash); | ||
| 1161 | |||
| 1162 | while (ntbl && size > (1 << shift)) { | ||
| 1163 | index = hash & ((1 << shift) - 1); | ||
| 1164 | size >>= shift; | ||
| 1165 | hash >>= shift; | ||
| 1166 | nhash |= index << shifted; | ||
| 1167 | shifted += shift; | ||
| 1168 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | ||
| 1169 | size <= (1 << shift) ? shifted : 0, | ||
| 1170 | nhash); | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | if (!ntbl) | ||
| 1174 | return NULL; | ||
| 1175 | |||
| 1176 | return &ntbl[hash].bucket; | ||
| 1177 | |||
| 1178 | } | ||
| 1179 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); | ||
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2cecf05c82fd..55e11c4b2f3b 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/random.h> | 18 | #include <linux/random.h> |
| 19 | #include <linux/sbitmap.h> | 19 | #include <linux/sbitmap.h> |
| 20 | #include <linux/seq_file.h> | ||
| 20 | 21 | ||
| 21 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, | 22 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
| 22 | gfp_t flags, int node) | 23 | gfp_t flags, int node) |
| @@ -180,6 +181,62 @@ unsigned int sbitmap_weight(const struct sbitmap *sb) | |||
| 180 | } | 181 | } |
| 181 | EXPORT_SYMBOL_GPL(sbitmap_weight); | 182 | EXPORT_SYMBOL_GPL(sbitmap_weight); |
| 182 | 183 | ||
| 184 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) | ||
| 185 | { | ||
| 186 | seq_printf(m, "depth=%u\n", sb->depth); | ||
| 187 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); | ||
| 188 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); | ||
| 189 | seq_printf(m, "map_nr=%u\n", sb->map_nr); | ||
| 190 | } | ||
| 191 | EXPORT_SYMBOL_GPL(sbitmap_show); | ||
| 192 | |||
| 193 | static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) | ||
| 194 | { | ||
| 195 | if ((offset & 0xf) == 0) { | ||
| 196 | if (offset != 0) | ||
| 197 | seq_putc(m, '\n'); | ||
| 198 | seq_printf(m, "%08x:", offset); | ||
| 199 | } | ||
| 200 | if ((offset & 0x1) == 0) | ||
| 201 | seq_putc(m, ' '); | ||
| 202 | seq_printf(m, "%02x", byte); | ||
| 203 | } | ||
| 204 | |||
| 205 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) | ||
| 206 | { | ||
| 207 | u8 byte = 0; | ||
| 208 | unsigned int byte_bits = 0; | ||
| 209 | unsigned int offset = 0; | ||
| 210 | int i; | ||
| 211 | |||
| 212 | for (i = 0; i < sb->map_nr; i++) { | ||
| 213 | unsigned long word = READ_ONCE(sb->map[i].word); | ||
| 214 | unsigned int word_bits = READ_ONCE(sb->map[i].depth); | ||
| 215 | |||
| 216 | while (word_bits > 0) { | ||
| 217 | unsigned int bits = min(8 - byte_bits, word_bits); | ||
| 218 | |||
| 219 | byte |= (word & (BIT(bits) - 1)) << byte_bits; | ||
| 220 | byte_bits += bits; | ||
| 221 | if (byte_bits == 8) { | ||
| 222 | emit_byte(m, offset, byte); | ||
| 223 | byte = 0; | ||
| 224 | byte_bits = 0; | ||
| 225 | offset++; | ||
| 226 | } | ||
| 227 | word >>= bits; | ||
| 228 | word_bits -= bits; | ||
| 229 | } | ||
| 230 | } | ||
| 231 | if (byte_bits) { | ||
| 232 | emit_byte(m, offset, byte); | ||
| 233 | offset++; | ||
| 234 | } | ||
| 235 | if (offset) | ||
| 236 | seq_putc(m, '\n'); | ||
| 237 | } | ||
| 238 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); | ||
| 239 | |||
| 183 | static unsigned int sbq_calc_wake_batch(unsigned int depth) | 240 | static unsigned int sbq_calc_wake_batch(unsigned int depth) |
| 184 | { | 241 | { |
| 185 | unsigned int wake_batch; | 242 | unsigned int wake_batch; |
| @@ -239,7 +296,19 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | |||
| 239 | 296 | ||
| 240 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | 297 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) |
| 241 | { | 298 | { |
| 242 | sbq->wake_batch = sbq_calc_wake_batch(depth); | 299 | unsigned int wake_batch = sbq_calc_wake_batch(depth); |
| 300 | int i; | ||
| 301 | |||
| 302 | if (sbq->wake_batch != wake_batch) { | ||
| 303 | WRITE_ONCE(sbq->wake_batch, wake_batch); | ||
| 304 | /* | ||
| 305 | * Pairs with the memory barrier in sbq_wake_up() to ensure that | ||
| 306 | * the batch size is updated before the wait counts. | ||
| 307 | */ | ||
| 308 | smp_mb__before_atomic(); | ||
| 309 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) | ||
| 310 | atomic_set(&sbq->ws[i].wait_cnt, 1); | ||
| 311 | } | ||
| 243 | sbitmap_resize(&sbq->sb, depth); | 312 | sbitmap_resize(&sbq->sb, depth); |
| 244 | } | 313 | } |
| 245 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | 314 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); |
| @@ -297,20 +366,39 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | |||
| 297 | static void sbq_wake_up(struct sbitmap_queue *sbq) | 366 | static void sbq_wake_up(struct sbitmap_queue *sbq) |
| 298 | { | 367 | { |
| 299 | struct sbq_wait_state *ws; | 368 | struct sbq_wait_state *ws; |
| 369 | unsigned int wake_batch; | ||
| 300 | int wait_cnt; | 370 | int wait_cnt; |
| 301 | 371 | ||
| 302 | /* Ensure that the wait list checks occur after clear_bit(). */ | 372 | /* |
| 303 | smp_mb(); | 373 | * Pairs with the memory barrier in set_current_state() to ensure the |
| 374 | * proper ordering of clear_bit()/waitqueue_active() in the waker and | ||
| 375 | * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See | ||
| 376 | * the comment on waitqueue_active(). This is __after_atomic because we | ||
| 377 | * just did clear_bit() in the caller. | ||
| 378 | */ | ||
| 379 | smp_mb__after_atomic(); | ||
| 304 | 380 | ||
| 305 | ws = sbq_wake_ptr(sbq); | 381 | ws = sbq_wake_ptr(sbq); |
| 306 | if (!ws) | 382 | if (!ws) |
| 307 | return; | 383 | return; |
| 308 | 384 | ||
| 309 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | 385 | wait_cnt = atomic_dec_return(&ws->wait_cnt); |
| 310 | if (unlikely(wait_cnt < 0)) | 386 | if (wait_cnt <= 0) { |
| 311 | wait_cnt = atomic_inc_return(&ws->wait_cnt); | 387 | wake_batch = READ_ONCE(sbq->wake_batch); |
| 312 | if (wait_cnt == 0) { | 388 | /* |
| 313 | atomic_add(sbq->wake_batch, &ws->wait_cnt); | 389 | * Pairs with the memory barrier in sbitmap_queue_resize() to |
| 390 | * ensure that we see the batch size update before the wait | ||
| 391 | * count is reset. | ||
| 392 | */ | ||
| 393 | smp_mb__before_atomic(); | ||
| 394 | /* | ||
| 395 | * If there are concurrent callers to sbq_wake_up(), the last | ||
| 396 | * one to decrement the wait count below zero will bump it back | ||
| 397 | * up. If there is a concurrent resize, the count reset will | ||
| 398 | * either cause the cmpxchg to fail or overwrite after the | ||
| 399 | * cmpxchg. | ||
| 400 | */ | ||
| 401 | atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch); | ||
| 314 | sbq_index_atomic_inc(&sbq->wake_index); | 402 | sbq_index_atomic_inc(&sbq->wake_index); |
| 315 | wake_up(&ws->wait); | 403 | wake_up(&ws->wait); |
| 316 | } | 404 | } |
| @@ -331,7 +419,8 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |||
| 331 | int i, wake_index; | 419 | int i, wake_index; |
| 332 | 420 | ||
| 333 | /* | 421 | /* |
| 334 | * Make sure all changes prior to this are visible from other CPUs. | 422 | * Pairs with the memory barrier in set_current_state() like in |
| 423 | * sbq_wake_up(). | ||
| 335 | */ | 424 | */ |
| 336 | smp_mb(); | 425 | smp_mb(); |
| 337 | wake_index = atomic_read(&sbq->wake_index); | 426 | wake_index = atomic_read(&sbq->wake_index); |
| @@ -345,3 +434,37 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |||
| 345 | } | 434 | } |
| 346 | } | 435 | } |
| 347 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); | 436 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); |
| 437 | |||
| 438 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | ||
| 439 | { | ||
| 440 | bool first; | ||
| 441 | int i; | ||
| 442 | |||
| 443 | sbitmap_show(&sbq->sb, m); | ||
| 444 | |||
| 445 | seq_puts(m, "alloc_hint={"); | ||
| 446 | first = true; | ||
| 447 | for_each_possible_cpu(i) { | ||
| 448 | if (!first) | ||
| 449 | seq_puts(m, ", "); | ||
| 450 | first = false; | ||
| 451 | seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); | ||
| 452 | } | ||
| 453 | seq_puts(m, "}\n"); | ||
| 454 | |||
| 455 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); | ||
| 456 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); | ||
| 457 | |||
| 458 | seq_puts(m, "ws={\n"); | ||
| 459 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | ||
| 460 | struct sbq_wait_state *ws = &sbq->ws[i]; | ||
| 461 | |||
| 462 | seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", | ||
| 463 | atomic_read(&ws->wait_cnt), | ||
| 464 | waitqueue_active(&ws->wait) ? "active" : "inactive"); | ||
| 465 | } | ||
| 466 | seq_puts(m, "}\n"); | ||
| 467 | |||
| 468 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); | ||
| 469 | } | ||
| 470 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 1feed6a2b12a..0beaa1d899aa 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -9,13 +9,13 @@ | |||
| 9 | #include <linux/quicklist.h> | 9 | #include <linux/quicklist.h> |
| 10 | #include <linux/cma.h> | 10 | #include <linux/cma.h> |
| 11 | 11 | ||
| 12 | void show_mem(unsigned int filter) | 12 | void show_mem(unsigned int filter, nodemask_t *nodemask) |
| 13 | { | 13 | { |
| 14 | pg_data_t *pgdat; | 14 | pg_data_t *pgdat; |
| 15 | unsigned long total = 0, reserved = 0, highmem = 0; | 15 | unsigned long total = 0, reserved = 0, highmem = 0; |
| 16 | 16 | ||
| 17 | printk("Mem-Info:\n"); | 17 | printk("Mem-Info:\n"); |
| 18 | show_free_areas(filter); | 18 | show_free_areas(filter, nodemask); |
| 19 | 19 | ||
| 20 | for_each_online_pgdat(pgdat) { | 20 | for_each_online_pgdat(pgdat) { |
| 21 | unsigned long flags; | 21 | unsigned long flags; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 975b8fc4f1e1..a8d74a733a38 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -483,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
| 483 | : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); | 483 | : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); |
| 484 | 484 | ||
| 485 | /* | 485 | /* |
| 486 | * For mappings greater than a page, we limit the stride (and | 486 | * For mappings greater than or equal to a page, we limit the stride |
| 487 | * hence alignment) to a page size. | 487 | * (and hence alignment) to a page size. |
| 488 | */ | 488 | */ |
| 489 | nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 489 | nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| 490 | if (size > PAGE_SIZE) | 490 | if (size >= PAGE_SIZE) |
| 491 | stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); | 491 | stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); |
| 492 | else | 492 | else |
| 493 | stride = 1; | 493 | stride = 1; |
diff --git a/lib/test_firmware.c b/lib/test_firmware.c index a3e8ec3fb1c5..09371b0a9baf 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c | |||
| @@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = { | |||
| 42 | .read = test_fw_misc_read, | 42 | .read = test_fw_misc_read, |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | static struct miscdevice test_fw_misc_device = { | ||
| 46 | .minor = MISC_DYNAMIC_MINOR, | ||
| 47 | .name = "test_firmware", | ||
| 48 | .fops = &test_fw_fops, | ||
| 49 | }; | ||
| 50 | |||
| 51 | static ssize_t trigger_request_store(struct device *dev, | 45 | static ssize_t trigger_request_store(struct device *dev, |
| 52 | struct device_attribute *attr, | 46 | struct device_attribute *attr, |
| 53 | const char *buf, size_t count) | 47 | const char *buf, size_t count) |
| @@ -132,39 +126,81 @@ out: | |||
| 132 | } | 126 | } |
| 133 | static DEVICE_ATTR_WO(trigger_async_request); | 127 | static DEVICE_ATTR_WO(trigger_async_request); |
| 134 | 128 | ||
| 135 | static int __init test_firmware_init(void) | 129 | static ssize_t trigger_custom_fallback_store(struct device *dev, |
| 130 | struct device_attribute *attr, | ||
| 131 | const char *buf, size_t count) | ||
| 136 | { | 132 | { |
| 137 | int rc; | 133 | int rc; |
| 134 | char *name; | ||
| 138 | 135 | ||
| 139 | rc = misc_register(&test_fw_misc_device); | 136 | name = kstrndup(buf, count, GFP_KERNEL); |
| 137 | if (!name) | ||
| 138 | return -ENOSPC; | ||
| 139 | |||
| 140 | pr_info("loading '%s' using custom fallback mechanism\n", name); | ||
| 141 | |||
| 142 | mutex_lock(&test_fw_mutex); | ||
| 143 | release_firmware(test_firmware); | ||
| 144 | test_firmware = NULL; | ||
| 145 | rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name, | ||
| 146 | dev, GFP_KERNEL, NULL, | ||
| 147 | trigger_async_request_cb); | ||
| 140 | if (rc) { | 148 | if (rc) { |
| 141 | pr_err("could not register misc device: %d\n", rc); | 149 | pr_info("async load of '%s' failed: %d\n", name, rc); |
| 142 | return rc; | 150 | kfree(name); |
| 151 | goto out; | ||
| 143 | } | 152 | } |
| 144 | rc = device_create_file(test_fw_misc_device.this_device, | 153 | /* Free 'name' ASAP, to test for race conditions */ |
| 145 | &dev_attr_trigger_request); | 154 | kfree(name); |
| 146 | if (rc) { | 155 | |
| 147 | pr_err("could not create sysfs interface: %d\n", rc); | 156 | wait_for_completion(&async_fw_done); |
| 148 | goto dereg; | 157 | |
| 158 | if (test_firmware) { | ||
| 159 | pr_info("loaded: %zu\n", test_firmware->size); | ||
| 160 | rc = count; | ||
| 161 | } else { | ||
| 162 | pr_err("failed to async load firmware\n"); | ||
| 163 | rc = -ENODEV; | ||
| 149 | } | 164 | } |
| 150 | 165 | ||
| 151 | rc = device_create_file(test_fw_misc_device.this_device, | 166 | out: |
| 152 | &dev_attr_trigger_async_request); | 167 | mutex_unlock(&test_fw_mutex); |
| 168 | |||
| 169 | return rc; | ||
| 170 | } | ||
| 171 | static DEVICE_ATTR_WO(trigger_custom_fallback); | ||
| 172 | |||
| 173 | #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr | ||
| 174 | |||
| 175 | static struct attribute *test_dev_attrs[] = { | ||
| 176 | TEST_FW_DEV_ATTR(trigger_request), | ||
| 177 | TEST_FW_DEV_ATTR(trigger_async_request), | ||
| 178 | TEST_FW_DEV_ATTR(trigger_custom_fallback), | ||
| 179 | NULL, | ||
| 180 | }; | ||
| 181 | |||
| 182 | ATTRIBUTE_GROUPS(test_dev); | ||
| 183 | |||
| 184 | static struct miscdevice test_fw_misc_device = { | ||
| 185 | .minor = MISC_DYNAMIC_MINOR, | ||
| 186 | .name = "test_firmware", | ||
| 187 | .fops = &test_fw_fops, | ||
| 188 | .groups = test_dev_groups, | ||
| 189 | }; | ||
| 190 | |||
| 191 | static int __init test_firmware_init(void) | ||
| 192 | { | ||
| 193 | int rc; | ||
| 194 | |||
| 195 | rc = misc_register(&test_fw_misc_device); | ||
| 153 | if (rc) { | 196 | if (rc) { |
| 154 | pr_err("could not create async sysfs interface: %d\n", rc); | 197 | pr_err("could not register misc device: %d\n", rc); |
| 155 | goto remove_file; | 198 | return rc; |
| 156 | } | 199 | } |
| 157 | 200 | ||
| 158 | pr_warn("interface ready\n"); | 201 | pr_warn("interface ready\n"); |
| 159 | 202 | ||
| 160 | return 0; | 203 | return 0; |
| 161 | |||
| 162 | remove_file: | ||
| 163 | device_remove_file(test_fw_misc_device.this_device, | ||
| 164 | &dev_attr_trigger_async_request); | ||
| 165 | dereg: | ||
| 166 | misc_deregister(&test_fw_misc_device); | ||
| 167 | return rc; | ||
| 168 | } | 204 | } |
| 169 | 205 | ||
| 170 | module_init(test_firmware_init); | 206 | module_init(test_firmware_init); |
| @@ -172,10 +208,6 @@ module_init(test_firmware_init); | |||
| 172 | static void __exit test_firmware_exit(void) | 208 | static void __exit test_firmware_exit(void) |
| 173 | { | 209 | { |
| 174 | release_firmware(test_firmware); | 210 | release_firmware(test_firmware); |
| 175 | device_remove_file(test_fw_misc_device.this_device, | ||
| 176 | &dev_attr_trigger_async_request); | ||
| 177 | device_remove_file(test_fw_misc_device.this_device, | ||
| 178 | &dev_attr_trigger_request); | ||
| 179 | misc_deregister(&test_fw_misc_device); | 211 | misc_deregister(&test_fw_misc_device); |
| 180 | pr_warn("removed interface\n"); | 212 | pr_warn("removed interface\n"); |
| 181 | } | 213 | } |
diff --git a/lib/test_parman.c b/lib/test_parman.c new file mode 100644 index 000000000000..fe9f3a785804 --- /dev/null +++ b/lib/test_parman.c | |||
| @@ -0,0 +1,395 @@ | |||
| 1 | /* | ||
| 2 | * lib/test_parman.c - Test module for parman | ||
| 3 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> | ||
| 5 | * | ||
| 6 | * Redistribution and use in source and binary forms, with or without | ||
| 7 | * modification, are permitted provided that the following conditions are met: | ||
| 8 | * | ||
| 9 | * 1. Redistributions of source code must retain the above copyright | ||
| 10 | * notice, this list of conditions and the following disclaimer. | ||
| 11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 12 | * notice, this list of conditions and the following disclaimer in the | ||
| 13 | * documentation and/or other materials provided with the distribution. | ||
| 14 | * 3. Neither the names of the copyright holders nor the names of its | ||
| 15 | * contributors may be used to endorse or promote products derived from | ||
| 16 | * this software without specific prior written permission. | ||
| 17 | * | ||
| 18 | * Alternatively, this software may be distributed under the terms of the | ||
| 19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 20 | * Software Foundation. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 32 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 36 | |||
| 37 | #include <linux/kernel.h> | ||
| 38 | #include <linux/module.h> | ||
| 39 | #include <linux/slab.h> | ||
| 40 | #include <linux/bitops.h> | ||
| 41 | #include <linux/err.h> | ||
| 42 | #include <linux/random.h> | ||
| 43 | #include <linux/parman.h> | ||
| 44 | |||
| 45 | #define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */ | ||
| 46 | #define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT) | ||
| 47 | #define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1) | ||
| 48 | |||
| 49 | #define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number | ||
| 50 | * of items for testing | ||
| 51 | */ | ||
| 52 | #define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT) | ||
| 53 | #define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1) | ||
| 54 | |||
| 55 | #define TEST_PARMAN_BASE_SHIFT 8 | ||
| 56 | #define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT) | ||
| 57 | #define TEST_PARMAN_RESIZE_STEP_SHIFT 7 | ||
| 58 | #define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT) | ||
| 59 | |||
| 60 | #define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT) | ||
| 61 | #define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT) | ||
| 62 | #define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1) | ||
| 63 | |||
| 64 | #define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256) | ||
| 65 | |||
| 66 | struct test_parman_prio { | ||
| 67 | struct parman_prio parman_prio; | ||
| 68 | unsigned long priority; | ||
| 69 | }; | ||
| 70 | |||
| 71 | struct test_parman_item { | ||
| 72 | struct parman_item parman_item; | ||
| 73 | struct test_parman_prio *prio; | ||
| 74 | bool used; | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct test_parman { | ||
| 78 | struct parman *parman; | ||
| 79 | struct test_parman_item **prio_array; | ||
| 80 | unsigned long prio_array_limit; | ||
| 81 | struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT]; | ||
| 82 | struct test_parman_item items[TEST_PARMAN_ITEM_COUNT]; | ||
| 83 | struct rnd_state rnd; | ||
| 84 | unsigned long run_budget; | ||
| 85 | unsigned long bulk_budget; | ||
| 86 | bool bulk_noop; | ||
| 87 | unsigned int used_items; | ||
| 88 | }; | ||
| 89 | |||
| 90 | #define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count)) | ||
| 91 | |||
| 92 | static int test_parman_resize(void *priv, unsigned long new_count) | ||
| 93 | { | ||
| 94 | struct test_parman *test_parman = priv; | ||
| 95 | struct test_parman_item **prio_array; | ||
| 96 | unsigned long old_count; | ||
| 97 | |||
| 98 | prio_array = krealloc(test_parman->prio_array, | ||
| 99 | ITEM_PTRS_SIZE(new_count), GFP_KERNEL); | ||
| 100 | if (new_count == 0) | ||
| 101 | return 0; | ||
| 102 | if (!prio_array) | ||
| 103 | return -ENOMEM; | ||
| 104 | old_count = test_parman->prio_array_limit; | ||
| 105 | if (new_count > old_count) | ||
| 106 | memset(&prio_array[old_count], 0, | ||
| 107 | ITEM_PTRS_SIZE(new_count - old_count)); | ||
| 108 | test_parman->prio_array = prio_array; | ||
| 109 | test_parman->prio_array_limit = new_count; | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static void test_parman_move(void *priv, unsigned long from_index, | ||
| 114 | unsigned long to_index, unsigned long count) | ||
| 115 | { | ||
| 116 | struct test_parman *test_parman = priv; | ||
| 117 | struct test_parman_item **prio_array = test_parman->prio_array; | ||
| 118 | |||
| 119 | memmove(&prio_array[to_index], &prio_array[from_index], | ||
| 120 | ITEM_PTRS_SIZE(count)); | ||
| 121 | memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count)); | ||
| 122 | } | ||
| 123 | |||
| 124 | static const struct parman_ops test_parman_lsort_ops = { | ||
| 125 | .base_count = TEST_PARMAN_BASE_COUNT, | ||
| 126 | .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT, | ||
| 127 | .resize = test_parman_resize, | ||
| 128 | .move = test_parman_move, | ||
| 129 | .algo = PARMAN_ALGO_TYPE_LSORT, | ||
| 130 | }; | ||
| 131 | |||
| 132 | static void test_parman_rnd_init(struct test_parman *test_parman) | ||
| 133 | { | ||
| 134 | prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL); | ||
| 135 | } | ||
| 136 | |||
| 137 | static u32 test_parman_rnd_get(struct test_parman *test_parman) | ||
| 138 | { | ||
| 139 | return prandom_u32_state(&test_parman->rnd); | ||
| 140 | } | ||
| 141 | |||
| 142 | static unsigned long test_parman_priority_gen(struct test_parman *test_parman) | ||
| 143 | { | ||
| 144 | unsigned long priority; | ||
| 145 | int i; | ||
| 146 | |||
| 147 | again: | ||
| 148 | priority = test_parman_rnd_get(test_parman); | ||
| 149 | if (priority == 0) | ||
| 150 | goto again; | ||
| 151 | |||
| 152 | for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { | ||
| 153 | struct test_parman_prio *prio = &test_parman->prios[i]; | ||
| 154 | |||
| 155 | if (prio->priority == 0) | ||
| 156 | break; | ||
| 157 | if (prio->priority == priority) | ||
| 158 | goto again; | ||
| 159 | } | ||
| 160 | return priority; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void test_parman_prios_init(struct test_parman *test_parman) | ||
| 164 | { | ||
| 165 | int i; | ||
| 166 | |||
| 167 | for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { | ||
| 168 | struct test_parman_prio *prio = &test_parman->prios[i]; | ||
| 169 | |||
| 170 | /* Assign random uniqueue priority to each prio structure */ | ||
| 171 | prio->priority = test_parman_priority_gen(test_parman); | ||
| 172 | parman_prio_init(test_parman->parman, &prio->parman_prio, | ||
| 173 | prio->priority); | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | static void test_parman_prios_fini(struct test_parman *test_parman) | ||
| 178 | { | ||
| 179 | int i; | ||
| 180 | |||
| 181 | for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { | ||
| 182 | struct test_parman_prio *prio = &test_parman->prios[i]; | ||
| 183 | |||
| 184 | parman_prio_fini(&prio->parman_prio); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | static void test_parman_items_init(struct test_parman *test_parman) | ||
| 189 | { | ||
| 190 | int i; | ||
| 191 | |||
| 192 | for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { | ||
| 193 | struct test_parman_item *item = &test_parman->items[i]; | ||
| 194 | unsigned int prio_index = test_parman_rnd_get(test_parman) & | ||
| 195 | TEST_PARMAN_PRIO_MASK; | ||
| 196 | |||
| 197 | /* Assign random prio to each item structure */ | ||
| 198 | item->prio = &test_parman->prios[prio_index]; | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | static void test_parman_items_fini(struct test_parman *test_parman) | ||
| 203 | { | ||
| 204 | int i; | ||
| 205 | |||
| 206 | for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { | ||
| 207 | struct test_parman_item *item = &test_parman->items[i]; | ||
| 208 | |||
| 209 | if (!item->used) | ||
| 210 | continue; | ||
| 211 | parman_item_remove(test_parman->parman, | ||
| 212 | &item->prio->parman_prio, | ||
| 213 | &item->parman_item); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | static struct test_parman *test_parman_create(const struct parman_ops *ops) | ||
| 218 | { | ||
| 219 | struct test_parman *test_parman; | ||
| 220 | int err; | ||
| 221 | |||
| 222 | test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL); | ||
| 223 | if (!test_parman) | ||
| 224 | return ERR_PTR(-ENOMEM); | ||
| 225 | err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT); | ||
| 226 | if (err) | ||
| 227 | goto err_resize; | ||
| 228 | test_parman->parman = parman_create(ops, test_parman); | ||
| 229 | if (!test_parman->parman) { | ||
| 230 | err = -ENOMEM; | ||
| 231 | goto err_parman_create; | ||
| 232 | } | ||
| 233 | test_parman_rnd_init(test_parman); | ||
| 234 | test_parman_prios_init(test_parman); | ||
| 235 | test_parman_items_init(test_parman); | ||
| 236 | test_parman->run_budget = TEST_PARMAN_RUN_BUDGET; | ||
| 237 | return test_parman; | ||
| 238 | |||
| 239 | err_parman_create: | ||
| 240 | test_parman_resize(test_parman, 0); | ||
| 241 | err_resize: | ||
| 242 | kfree(test_parman); | ||
| 243 | return ERR_PTR(err); | ||
| 244 | } | ||
| 245 | |||
| 246 | static void test_parman_destroy(struct test_parman *test_parman) | ||
| 247 | { | ||
| 248 | test_parman_items_fini(test_parman); | ||
| 249 | test_parman_prios_fini(test_parman); | ||
| 250 | parman_destroy(test_parman->parman); | ||
| 251 | test_parman_resize(test_parman, 0); | ||
| 252 | kfree(test_parman); | ||
| 253 | } | ||
| 254 | |||
| 255 | static bool test_parman_run_check_budgets(struct test_parman *test_parman) | ||
| 256 | { | ||
| 257 | if (test_parman->run_budget-- == 0) | ||
| 258 | return false; | ||
| 259 | if (test_parman->bulk_budget-- != 0) | ||
| 260 | return true; | ||
| 261 | |||
| 262 | test_parman->bulk_budget = test_parman_rnd_get(test_parman) & | ||
| 263 | TEST_PARMAN_BULK_MAX_MASK; | ||
| 264 | test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1; | ||
| 265 | return true; | ||
| 266 | } | ||
| 267 | |||
| 268 | static int test_parman_run(struct test_parman *test_parman) | ||
| 269 | { | ||
| 270 | unsigned int i = test_parman_rnd_get(test_parman); | ||
| 271 | int err; | ||
| 272 | |||
| 273 | while (test_parman_run_check_budgets(test_parman)) { | ||
| 274 | unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK; | ||
| 275 | struct test_parman_item *item = &test_parman->items[item_index]; | ||
| 276 | |||
| 277 | if (test_parman->bulk_noop) | ||
| 278 | continue; | ||
| 279 | |||
| 280 | if (!item->used) { | ||
| 281 | err = parman_item_add(test_parman->parman, | ||
| 282 | &item->prio->parman_prio, | ||
| 283 | &item->parman_item); | ||
| 284 | if (err) | ||
| 285 | return err; | ||
| 286 | test_parman->prio_array[item->parman_item.index] = item; | ||
| 287 | test_parman->used_items++; | ||
| 288 | } else { | ||
| 289 | test_parman->prio_array[item->parman_item.index] = NULL; | ||
| 290 | parman_item_remove(test_parman->parman, | ||
| 291 | &item->prio->parman_prio, | ||
| 292 | &item->parman_item); | ||
| 293 | test_parman->used_items--; | ||
| 294 | } | ||
| 295 | item->used = !item->used; | ||
| 296 | } | ||
| 297 | return 0; | ||
| 298 | } | ||
| 299 | |||
| 300 | static int test_parman_check_array(struct test_parman *test_parman, | ||
| 301 | bool gaps_allowed) | ||
| 302 | { | ||
| 303 | unsigned int last_unused_items = 0; | ||
| 304 | unsigned long last_priority = 0; | ||
| 305 | unsigned int used_items = 0; | ||
| 306 | int i; | ||
| 307 | |||
| 308 | if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) { | ||
| 309 | pr_err("Array limit is lower than the base count (%lu < %lu)\n", | ||
| 310 | test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT); | ||
| 311 | return -EINVAL; | ||
| 312 | } | ||
| 313 | |||
| 314 | for (i = 0; i < test_parman->prio_array_limit; i++) { | ||
| 315 | struct test_parman_item *item = test_parman->prio_array[i]; | ||
| 316 | |||
| 317 | if (!item) { | ||
| 318 | last_unused_items++; | ||
| 319 | continue; | ||
| 320 | } | ||
| 321 | if (last_unused_items && !gaps_allowed) { | ||
| 322 | pr_err("Gap found in array even though they are forbidden\n"); | ||
| 323 | return -EINVAL; | ||
| 324 | } | ||
| 325 | |||
| 326 | last_unused_items = 0; | ||
| 327 | used_items++; | ||
| 328 | |||
| 329 | if (item->prio->priority < last_priority) { | ||
| 330 | pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n", | ||
| 331 | item->prio->priority, last_priority); | ||
| 332 | return -EINVAL; | ||
| 333 | } | ||
| 334 | last_priority = item->prio->priority; | ||
| 335 | |||
| 336 | if (item->parman_item.index != i) { | ||
| 337 | pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n", | ||
| 338 | item->parman_item.index, i); | ||
| 339 | return -EINVAL; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | if (used_items != test_parman->used_items) { | ||
| 344 | pr_err("Number of used items in array does not match (%u != %u)\n", | ||
| 345 | used_items, test_parman->used_items); | ||
| 346 | return -EINVAL; | ||
| 347 | } | ||
| 348 | |||
| 349 | if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) { | ||
| 350 | pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n", | ||
| 351 | last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT); | ||
| 352 | return -EINVAL; | ||
| 353 | } | ||
| 354 | |||
| 355 | pr_info("Priority array check successful\n"); | ||
| 356 | |||
| 357 | return 0; | ||
| 358 | } | ||
| 359 | |||
| 360 | static int test_parman_lsort(void) | ||
| 361 | { | ||
| 362 | struct test_parman *test_parman; | ||
| 363 | int err; | ||
| 364 | |||
| 365 | test_parman = test_parman_create(&test_parman_lsort_ops); | ||
| 366 | if (IS_ERR(test_parman)) | ||
| 367 | return PTR_ERR(test_parman); | ||
| 368 | |||
| 369 | err = test_parman_run(test_parman); | ||
| 370 | if (err) | ||
| 371 | goto out; | ||
| 372 | |||
| 373 | err = test_parman_check_array(test_parman, false); | ||
| 374 | if (err) | ||
| 375 | goto out; | ||
| 376 | out: | ||
| 377 | test_parman_destroy(test_parman); | ||
| 378 | return err; | ||
| 379 | } | ||
| 380 | |||
| 381 | static int __init test_parman_init(void) | ||
| 382 | { | ||
| 383 | return test_parman_lsort(); | ||
| 384 | } | ||
| 385 | |||
| 386 | static void __exit test_parman_exit(void) | ||
| 387 | { | ||
| 388 | } | ||
| 389 | |||
| 390 | module_init(test_parman_init); | ||
| 391 | module_exit(test_parman_exit); | ||
| 392 | |||
| 393 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 394 | MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); | ||
| 395 | MODULE_DESCRIPTION("Test module for parman"); | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index 0ecef3e4690e..6f335a3d4ae2 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c | |||
| @@ -25,6 +25,23 @@ | |||
| 25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
| 26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * Several 32-bit architectures support 64-bit {get,put}_user() calls. | ||
| 30 | * As there doesn't appear to be anything that can safely determine | ||
| 31 | * their capability at compile-time, we just have to opt-out certain archs. | ||
| 32 | */ | ||
| 33 | #if BITS_PER_LONG == 64 || (!defined(CONFIG_AVR32) && \ | ||
| 34 | !defined(CONFIG_BLACKFIN) && \ | ||
| 35 | !defined(CONFIG_M32R) && \ | ||
| 36 | !defined(CONFIG_M68K) && \ | ||
| 37 | !defined(CONFIG_MICROBLAZE) && \ | ||
| 38 | !defined(CONFIG_MN10300) && \ | ||
| 39 | !defined(CONFIG_NIOS2) && \ | ||
| 40 | !defined(CONFIG_PPC32) && \ | ||
| 41 | !defined(CONFIG_SUPERH)) | ||
| 42 | # define TEST_U64 | ||
| 43 | #endif | ||
| 44 | |||
| 28 | #define test(condition, msg) \ | 45 | #define test(condition, msg) \ |
| 29 | ({ \ | 46 | ({ \ |
| 30 | int cond = (condition); \ | 47 | int cond = (condition); \ |
| @@ -40,7 +57,12 @@ static int __init test_user_copy_init(void) | |||
| 40 | char __user *usermem; | 57 | char __user *usermem; |
| 41 | char *bad_usermem; | 58 | char *bad_usermem; |
| 42 | unsigned long user_addr; | 59 | unsigned long user_addr; |
| 43 | unsigned long value = 0x5A; | 60 | u8 val_u8; |
| 61 | u16 val_u16; | ||
| 62 | u32 val_u32; | ||
| 63 | #ifdef TEST_U64 | ||
| 64 | u64 val_u64; | ||
| 65 | #endif | ||
| 44 | 66 | ||
| 45 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | 67 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); |
| 46 | if (!kmem) | 68 | if (!kmem) |
| @@ -58,33 +80,100 @@ static int __init test_user_copy_init(void) | |||
| 58 | usermem = (char __user *)user_addr; | 80 | usermem = (char __user *)user_addr; |
| 59 | bad_usermem = (char *)user_addr; | 81 | bad_usermem = (char *)user_addr; |
| 60 | 82 | ||
| 61 | /* Legitimate usage: none of these should fail. */ | 83 | /* |
| 62 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | 84 | * Legitimate usage: none of these copies should fail. |
| 63 | "legitimate copy_from_user failed"); | 85 | */ |
| 86 | memset(kmem, 0x3a, PAGE_SIZE * 2); | ||
| 64 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), | 87 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), |
| 65 | "legitimate copy_to_user failed"); | 88 | "legitimate copy_to_user failed"); |
| 66 | ret |= test(get_user(value, (unsigned long __user *)usermem), | 89 | memset(kmem, 0x0, PAGE_SIZE); |
| 67 | "legitimate get_user failed"); | 90 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), |
| 68 | ret |= test(put_user(value, (unsigned long __user *)usermem), | 91 | "legitimate copy_from_user failed"); |
| 69 | "legitimate put_user failed"); | 92 | ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE), |
| 70 | 93 | "legitimate usercopy failed to copy data"); | |
| 71 | /* Invalid usage: none of these should succeed. */ | 94 | |
| 95 | #define test_legit(size, check) \ | ||
| 96 | do { \ | ||
| 97 | val_##size = check; \ | ||
| 98 | ret |= test(put_user(val_##size, (size __user *)usermem), \ | ||
| 99 | "legitimate put_user (" #size ") failed"); \ | ||
| 100 | val_##size = 0; \ | ||
| 101 | ret |= test(get_user(val_##size, (size __user *)usermem), \ | ||
| 102 | "legitimate get_user (" #size ") failed"); \ | ||
| 103 | ret |= test(val_##size != check, \ | ||
| 104 | "legitimate get_user (" #size ") failed to do copy"); \ | ||
| 105 | if (val_##size != check) { \ | ||
| 106 | pr_info("0x%llx != 0x%llx\n", \ | ||
| 107 | (unsigned long long)val_##size, \ | ||
| 108 | (unsigned long long)check); \ | ||
| 109 | } \ | ||
| 110 | } while (0) | ||
| 111 | |||
| 112 | test_legit(u8, 0x5a); | ||
| 113 | test_legit(u16, 0x5a5b); | ||
| 114 | test_legit(u32, 0x5a5b5c5d); | ||
| 115 | #ifdef TEST_U64 | ||
| 116 | test_legit(u64, 0x5a5b5c5d6a6b6c6d); | ||
| 117 | #endif | ||
| 118 | #undef test_legit | ||
| 119 | |||
| 120 | /* | ||
| 121 | * Invalid usage: none of these copies should succeed. | ||
| 122 | */ | ||
| 123 | |||
| 124 | /* Prepare kernel memory with check values. */ | ||
| 125 | memset(kmem, 0x5a, PAGE_SIZE); | ||
| 126 | memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); | ||
| 127 | |||
| 128 | /* Reject kernel-to-kernel copies through copy_from_user(). */ | ||
| 72 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), | 129 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), |
| 73 | PAGE_SIZE), | 130 | PAGE_SIZE), |
| 74 | "illegal all-kernel copy_from_user passed"); | 131 | "illegal all-kernel copy_from_user passed"); |
| 132 | |||
| 133 | /* Destination half of buffer should have been zeroed. */ | ||
| 134 | ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE), | ||
| 135 | "zeroing failure for illegal all-kernel copy_from_user"); | ||
| 136 | |||
| 137 | #if 0 | ||
| 138 | /* | ||
| 139 | * When running with SMAP/PAN/etc, this will Oops the kernel | ||
| 140 | * due to the zeroing of userspace memory on failure. This needs | ||
| 141 | * to be tested in LKDTM instead, since this test module does not | ||
| 142 | * expect to explode. | ||
| 143 | */ | ||
| 75 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, | 144 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, |
| 76 | PAGE_SIZE), | 145 | PAGE_SIZE), |
| 77 | "illegal reversed copy_from_user passed"); | 146 | "illegal reversed copy_from_user passed"); |
| 147 | #endif | ||
| 78 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, | 148 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, |
| 79 | PAGE_SIZE), | 149 | PAGE_SIZE), |
| 80 | "illegal all-kernel copy_to_user passed"); | 150 | "illegal all-kernel copy_to_user passed"); |
| 81 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | 151 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, |
| 82 | PAGE_SIZE), | 152 | PAGE_SIZE), |
| 83 | "illegal reversed copy_to_user passed"); | 153 | "illegal reversed copy_to_user passed"); |
| 84 | ret |= test(!get_user(value, (unsigned long __user *)kmem), | 154 | |
| 85 | "illegal get_user passed"); | 155 | #define test_illegal(size, check) \ |
| 86 | ret |= test(!put_user(value, (unsigned long __user *)kmem), | 156 | do { \ |
| 87 | "illegal put_user passed"); | 157 | val_##size = (check); \ |
| 158 | ret |= test(!get_user(val_##size, (size __user *)kmem), \ | ||
| 159 | "illegal get_user (" #size ") passed"); \ | ||
| 160 | ret |= test(val_##size != (size)0, \ | ||
| 161 | "zeroing failure for illegal get_user (" #size ")"); \ | ||
| 162 | if (val_##size != (size)0) { \ | ||
| 163 | pr_info("0x%llx != 0\n", \ | ||
| 164 | (unsigned long long)val_##size); \ | ||
| 165 | } \ | ||
| 166 | ret |= test(!put_user(val_##size, (size __user *)kmem), \ | ||
| 167 | "illegal put_user (" #size ") passed"); \ | ||
| 168 | } while (0) | ||
| 169 | |||
| 170 | test_illegal(u8, 0x5a); | ||
| 171 | test_illegal(u16, 0x5a5b); | ||
| 172 | test_illegal(u32, 0x5a5b5c5d); | ||
| 173 | #ifdef TEST_U64 | ||
| 174 | test_illegal(u64, 0x5a5b5c5d6a6b6c6d); | ||
| 175 | #endif | ||
| 176 | #undef test_illegal | ||
| 88 | 177 | ||
| 89 | vm_munmap(user_addr, PAGE_SIZE * 2); | 178 | vm_munmap(user_addr, PAGE_SIZE * 2); |
| 90 | kfree(kmem); | 179 | kfree(kmem); |
diff --git a/lib/timerqueue.c b/lib/timerqueue.c index adc6ee0a5126..4a720ed4fdaf 100644 --- a/lib/timerqueue.c +++ b/lib/timerqueue.c | |||
| @@ -80,8 +80,7 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) | |||
| 80 | if (head->next == node) { | 80 | if (head->next == node) { |
| 81 | struct rb_node *rbn = rb_next(&node->node); | 81 | struct rb_node *rbn = rb_next(&node->node); |
| 82 | 82 | ||
| 83 | head->next = rbn ? | 83 | head->next = rb_entry_safe(rbn, struct timerqueue_node, node); |
| 84 | rb_entry(rbn, struct timerqueue_node, node) : NULL; | ||
| 85 | } | 84 | } |
| 86 | rb_erase(&node->node, &head->head); | 85 | rb_erase(&node->node, &head->head); |
| 87 | RB_CLEAR_NODE(&node->node); | 86 | RB_CLEAR_NODE(&node->node); |
