diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 20 | ||||
| -rw-r--r-- | lib/Kconfig.ubsan | 11 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/cpu-notifier-error-inject.c | 46 | ||||
| -rw-r--r-- | lib/dma-debug.c | 88 | ||||
| -rw-r--r-- | lib/iov_iter.c | 24 | ||||
| -rw-r--r-- | lib/irq_poll.c | 26 | ||||
| -rw-r--r-- | lib/radix-tree.c | 22 | ||||
| -rw-r--r-- | lib/raid6/.gitignore | 1 | ||||
| -rw-r--r-- | lib/raid6/Makefile | 6 | ||||
| -rw-r--r-- | lib/raid6/algos.c | 6 | ||||
| -rw-r--r-- | lib/raid6/recov_s390xc.c | 116 | ||||
| -rw-r--r-- | lib/raid6/s390vx.uc | 168 | ||||
| -rw-r--r-- | lib/random32.c | 4 | ||||
| -rw-r--r-- | lib/rhashtable.c | 320 | ||||
| -rw-r--r-- | lib/syscall.c | 15 | ||||
| -rw-r--r-- | lib/test_bpf.c | 1 | ||||
| -rw-r--r-- | lib/test_hash.c | 26 | ||||
| -rw-r--r-- | lib/test_rhashtable.c | 2 | ||||
| -rw-r--r-- | lib/ucs2_string.c | 2 | ||||
| -rw-r--r-- | lib/usercopy.c | 9 | ||||
| -rw-r--r-- | lib/win_minmax.c | 98 |
22 files changed, 817 insertions, 197 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2307d7c89dac..cab7405f48d2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -821,7 +821,7 @@ config DETECT_HUNG_TASK | |||
| 821 | help | 821 | help |
| 822 | Say Y here to enable the kernel to detect "hung tasks", | 822 | Say Y here to enable the kernel to detect "hung tasks", |
| 823 | which are bugs that cause the task to be stuck in | 823 | which are bugs that cause the task to be stuck in |
| 824 | uninterruptible "D" state indefinitiley. | 824 | uninterruptible "D" state indefinitely. |
| 825 | 825 | ||
| 826 | When a hung task is detected, the kernel will print the | 826 | When a hung task is detected, the kernel will print the |
| 827 | current stack trace (which you should report), but the | 827 | current stack trace (which you should report), but the |
| @@ -1686,24 +1686,6 @@ config LATENCYTOP | |||
| 1686 | Enable this option if you want to use the LatencyTOP tool | 1686 | Enable this option if you want to use the LatencyTOP tool |
| 1687 | to find out which userspace is blocking on what kernel operations. | 1687 | to find out which userspace is blocking on what kernel operations. |
| 1688 | 1688 | ||
| 1689 | config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS | ||
| 1690 | bool | ||
| 1691 | |||
| 1692 | config DEBUG_STRICT_USER_COPY_CHECKS | ||
| 1693 | bool "Strict user copy size checks" | ||
| 1694 | depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS | ||
| 1695 | depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING | ||
| 1696 | help | ||
| 1697 | Enabling this option turns a certain set of sanity checks for user | ||
| 1698 | copy operations into compile time failures. | ||
| 1699 | |||
| 1700 | The copy_from_user() etc checks are there to help test if there | ||
| 1701 | are sufficient security checks on the length argument of | ||
| 1702 | the copy operation, by having gcc prove that the argument is | ||
| 1703 | within bounds. | ||
| 1704 | |||
| 1705 | If unsure, say N. | ||
| 1706 | |||
| 1707 | source kernel/trace/Kconfig | 1689 | source kernel/trace/Kconfig |
| 1708 | 1690 | ||
| 1709 | menu "Runtime Testing" | 1691 | menu "Runtime Testing" |
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 39494af9a84a..bc6e651df68c 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | config ARCH_HAS_UBSAN_SANITIZE_ALL | 1 | config ARCH_HAS_UBSAN_SANITIZE_ALL |
| 2 | bool | 2 | bool |
| 3 | 3 | ||
| 4 | config ARCH_WANTS_UBSAN_NO_NULL | ||
| 5 | def_bool n | ||
| 6 | |||
| 4 | config UBSAN | 7 | config UBSAN |
| 5 | bool "Undefined behaviour sanity checker" | 8 | bool "Undefined behaviour sanity checker" |
| 6 | help | 9 | help |
| @@ -34,3 +37,11 @@ config UBSAN_ALIGNMENT | |||
| 34 | This option enables detection of unaligned memory accesses. | 37 | This option enables detection of unaligned memory accesses. |
| 35 | Enabling this option on architectures that support unaligned | 38 | Enabling this option on architectures that support unaligned |
| 36 | accesses may produce a lot of false positives. | 39 | accesses may produce a lot of false positives. |
| 40 | |||
| 41 | config UBSAN_NULL | ||
| 42 | bool "Enable checking of null pointers" | ||
| 43 | depends on UBSAN | ||
| 44 | default y if !ARCH_WANTS_UBSAN_NO_NULL | ||
| 45 | help | ||
| 46 | This option enables detection of memory accesses via a | ||
| 47 | null pointer. | ||
diff --git a/lib/Makefile b/lib/Makefile index cfa68eb269e4..df747e5eeb7a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -22,9 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 22 | sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ | 22 | sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ |
| 23 | flex_proportions.o ratelimit.o show_mem.o \ | 23 | flex_proportions.o ratelimit.o show_mem.o \ |
| 24 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 24 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
| 25 | earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o | 25 | earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o |
| 26 | 26 | ||
| 27 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o | ||
| 28 | lib-$(CONFIG_MMU) += ioremap.o | 27 | lib-$(CONFIG_MMU) += ioremap.o |
| 29 | lib-$(CONFIG_SMP) += cpumask.o | 28 | lib-$(CONFIG_SMP) += cpumask.o |
| 30 | lib-$(CONFIG_HAS_DMA) += dma-noop.o | 29 | lib-$(CONFIG_HAS_DMA) += dma-noop.o |
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c index 707ca24f7b18..0e2c9a1e958a 100644 --- a/lib/cpu-notifier-error-inject.c +++ b/lib/cpu-notifier-error-inject.c | |||
| @@ -8,16 +8,47 @@ static int priority; | |||
| 8 | module_param(priority, int, 0); | 8 | module_param(priority, int, 0); |
| 9 | MODULE_PARM_DESC(priority, "specify cpu notifier priority"); | 9 | MODULE_PARM_DESC(priority, "specify cpu notifier priority"); |
| 10 | 10 | ||
| 11 | #define UP_PREPARE 0 | ||
| 12 | #define UP_PREPARE_FROZEN 0 | ||
| 13 | #define DOWN_PREPARE 0 | ||
| 14 | #define DOWN_PREPARE_FROZEN 0 | ||
| 15 | |||
| 11 | static struct notifier_err_inject cpu_notifier_err_inject = { | 16 | static struct notifier_err_inject cpu_notifier_err_inject = { |
| 12 | .actions = { | 17 | .actions = { |
| 13 | { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) }, | 18 | { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) }, |
| 14 | { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) }, | 19 | { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) }, |
| 15 | { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) }, | 20 | { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) }, |
| 16 | { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) }, | 21 | { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) }, |
| 17 | {} | 22 | {} |
| 18 | } | 23 | } |
| 19 | }; | 24 | }; |
| 20 | 25 | ||
| 26 | static int notf_err_handle(struct notifier_err_inject_action *action) | ||
| 27 | { | ||
| 28 | int ret; | ||
| 29 | |||
| 30 | ret = action->error; | ||
| 31 | if (ret) | ||
| 32 | pr_info("Injecting error (%d) to %s\n", ret, action->name); | ||
| 33 | return ret; | ||
| 34 | } | ||
| 35 | |||
| 36 | static int notf_err_inj_up_prepare(unsigned int cpu) | ||
| 37 | { | ||
| 38 | if (!cpuhp_tasks_frozen) | ||
| 39 | return notf_err_handle(&cpu_notifier_err_inject.actions[0]); | ||
| 40 | else | ||
| 41 | return notf_err_handle(&cpu_notifier_err_inject.actions[1]); | ||
| 42 | } | ||
| 43 | |||
| 44 | static int notf_err_inj_dead(unsigned int cpu) | ||
| 45 | { | ||
| 46 | if (!cpuhp_tasks_frozen) | ||
| 47 | return notf_err_handle(&cpu_notifier_err_inject.actions[2]); | ||
| 48 | else | ||
| 49 | return notf_err_handle(&cpu_notifier_err_inject.actions[3]); | ||
| 50 | } | ||
| 51 | |||
| 21 | static struct dentry *dir; | 52 | static struct dentry *dir; |
| 22 | 53 | ||
| 23 | static int err_inject_init(void) | 54 | static int err_inject_init(void) |
| @@ -29,7 +60,10 @@ static int err_inject_init(void) | |||
| 29 | if (IS_ERR(dir)) | 60 | if (IS_ERR(dir)) |
| 30 | return PTR_ERR(dir); | 61 | return PTR_ERR(dir); |
| 31 | 62 | ||
| 32 | err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb); | 63 | err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE, |
| 64 | "cpu-err-notif:prepare", | ||
| 65 | notf_err_inj_up_prepare, | ||
| 66 | notf_err_inj_dead); | ||
| 33 | if (err) | 67 | if (err) |
| 34 | debugfs_remove_recursive(dir); | 68 | debugfs_remove_recursive(dir); |
| 35 | 69 | ||
| @@ -38,7 +72,7 @@ static int err_inject_init(void) | |||
| 38 | 72 | ||
| 39 | static void err_inject_exit(void) | 73 | static void err_inject_exit(void) |
| 40 | { | 74 | { |
| 41 | unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb); | 75 | cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE); |
| 42 | debugfs_remove_recursive(dir); | 76 | debugfs_remove_recursive(dir); |
| 43 | } | 77 | } |
| 44 | 78 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index fcfa1939ac41..8971370bfb16 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/stacktrace.h> | 22 | #include <linux/stacktrace.h> |
| 23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/vmalloc.h> | ||
| 25 | #include <linux/debugfs.h> | 26 | #include <linux/debugfs.h> |
| 26 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
| 27 | #include <linux/export.h> | 28 | #include <linux/export.h> |
| @@ -43,6 +44,7 @@ enum { | |||
| 43 | dma_debug_page, | 44 | dma_debug_page, |
| 44 | dma_debug_sg, | 45 | dma_debug_sg, |
| 45 | dma_debug_coherent, | 46 | dma_debug_coherent, |
| 47 | dma_debug_resource, | ||
| 46 | }; | 48 | }; |
| 47 | 49 | ||
| 48 | enum map_err_types { | 50 | enum map_err_types { |
| @@ -150,8 +152,9 @@ static const char *const maperr2str[] = { | |||
| 150 | [MAP_ERR_CHECKED] = "dma map error checked", | 152 | [MAP_ERR_CHECKED] = "dma map error checked", |
| 151 | }; | 153 | }; |
| 152 | 154 | ||
| 153 | static const char *type2name[4] = { "single", "page", | 155 | static const char *type2name[5] = { "single", "page", |
| 154 | "scather-gather", "coherent" }; | 156 | "scather-gather", "coherent", |
| 157 | "resource" }; | ||
| 155 | 158 | ||
| 156 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | 159 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
| 157 | "DMA_FROM_DEVICE", "DMA_NONE" }; | 160 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
| @@ -399,6 +402,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry) | |||
| 399 | 402 | ||
| 400 | static unsigned long long phys_addr(struct dma_debug_entry *entry) | 403 | static unsigned long long phys_addr(struct dma_debug_entry *entry) |
| 401 | { | 404 | { |
| 405 | if (entry->type == dma_debug_resource) | ||
| 406 | return __pfn_to_phys(entry->pfn) + entry->offset; | ||
| 407 | |||
| 402 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; | 408 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; |
| 403 | } | 409 | } |
| 404 | 410 | ||
| @@ -1164,11 +1170,32 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
| 1164 | put_hash_bucket(bucket, &flags); | 1170 | put_hash_bucket(bucket, &flags); |
| 1165 | } | 1171 | } |
| 1166 | 1172 | ||
| 1167 | static void check_for_stack(struct device *dev, void *addr) | 1173 | static void check_for_stack(struct device *dev, |
| 1174 | struct page *page, size_t offset) | ||
| 1168 | { | 1175 | { |
| 1169 | if (object_is_on_stack(addr)) | 1176 | void *addr; |
| 1170 | err_printk(dev, NULL, "DMA-API: device driver maps memory from " | 1177 | struct vm_struct *stack_vm_area = task_stack_vm_area(current); |
| 1171 | "stack [addr=%p]\n", addr); | 1178 | |
| 1179 | if (!stack_vm_area) { | ||
| 1180 | /* Stack is direct-mapped. */ | ||
| 1181 | if (PageHighMem(page)) | ||
| 1182 | return; | ||
| 1183 | addr = page_address(page) + offset; | ||
| 1184 | if (object_is_on_stack(addr)) | ||
| 1185 | err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr); | ||
| 1186 | } else { | ||
| 1187 | /* Stack is vmalloced. */ | ||
| 1188 | int i; | ||
| 1189 | |||
| 1190 | for (i = 0; i < stack_vm_area->nr_pages; i++) { | ||
| 1191 | if (page != stack_vm_area->pages[i]) | ||
| 1192 | continue; | ||
| 1193 | |||
| 1194 | addr = (u8 *)current->stack + i * PAGE_SIZE + offset; | ||
| 1195 | err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr); | ||
| 1196 | break; | ||
| 1197 | } | ||
| 1198 | } | ||
| 1172 | } | 1199 | } |
| 1173 | 1200 | ||
| 1174 | static inline bool overlap(void *addr, unsigned long len, void *start, void *end) | 1201 | static inline bool overlap(void *addr, unsigned long len, void *start, void *end) |
| @@ -1291,10 +1318,11 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
| 1291 | if (map_single) | 1318 | if (map_single) |
| 1292 | entry->type = dma_debug_single; | 1319 | entry->type = dma_debug_single; |
| 1293 | 1320 | ||
| 1321 | check_for_stack(dev, page, offset); | ||
| 1322 | |||
| 1294 | if (!PageHighMem(page)) { | 1323 | if (!PageHighMem(page)) { |
| 1295 | void *addr = page_address(page) + offset; | 1324 | void *addr = page_address(page) + offset; |
| 1296 | 1325 | ||
| 1297 | check_for_stack(dev, addr); | ||
| 1298 | check_for_illegal_area(dev, addr, size); | 1326 | check_for_illegal_area(dev, addr, size); |
| 1299 | } | 1327 | } |
| 1300 | 1328 | ||
| @@ -1386,8 +1414,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1386 | entry->sg_call_ents = nents; | 1414 | entry->sg_call_ents = nents; |
| 1387 | entry->sg_mapped_ents = mapped_ents; | 1415 | entry->sg_mapped_ents = mapped_ents; |
| 1388 | 1416 | ||
| 1417 | check_for_stack(dev, sg_page(s), s->offset); | ||
| 1418 | |||
| 1389 | if (!PageHighMem(sg_page(s))) { | 1419 | if (!PageHighMem(sg_page(s))) { |
| 1390 | check_for_stack(dev, sg_virt(s)); | ||
| 1391 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); | 1420 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
| 1392 | } | 1421 | } |
| 1393 | 1422 | ||
| @@ -1495,6 +1524,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1495 | } | 1524 | } |
| 1496 | EXPORT_SYMBOL(debug_dma_free_coherent); | 1525 | EXPORT_SYMBOL(debug_dma_free_coherent); |
| 1497 | 1526 | ||
| 1527 | void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, | ||
| 1528 | int direction, dma_addr_t dma_addr) | ||
| 1529 | { | ||
| 1530 | struct dma_debug_entry *entry; | ||
| 1531 | |||
| 1532 | if (unlikely(dma_debug_disabled())) | ||
| 1533 | return; | ||
| 1534 | |||
| 1535 | entry = dma_entry_alloc(); | ||
| 1536 | if (!entry) | ||
| 1537 | return; | ||
| 1538 | |||
| 1539 | entry->type = dma_debug_resource; | ||
| 1540 | entry->dev = dev; | ||
| 1541 | entry->pfn = PHYS_PFN(addr); | ||
| 1542 | entry->offset = offset_in_page(addr); | ||
| 1543 | entry->size = size; | ||
| 1544 | entry->dev_addr = dma_addr; | ||
| 1545 | entry->direction = direction; | ||
| 1546 | entry->map_err_type = MAP_ERR_NOT_CHECKED; | ||
| 1547 | |||
| 1548 | add_dma_entry(entry); | ||
| 1549 | } | ||
| 1550 | EXPORT_SYMBOL(debug_dma_map_resource); | ||
| 1551 | |||
| 1552 | void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, | ||
| 1553 | size_t size, int direction) | ||
| 1554 | { | ||
| 1555 | struct dma_debug_entry ref = { | ||
| 1556 | .type = dma_debug_resource, | ||
| 1557 | .dev = dev, | ||
| 1558 | .dev_addr = dma_addr, | ||
| 1559 | .size = size, | ||
| 1560 | .direction = direction, | ||
| 1561 | }; | ||
| 1562 | |||
| 1563 | if (unlikely(dma_debug_disabled())) | ||
| 1564 | return; | ||
| 1565 | |||
| 1566 | check_unmap(&ref); | ||
| 1567 | } | ||
| 1568 | EXPORT_SYMBOL(debug_dma_unmap_resource); | ||
| 1569 | |||
| 1498 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 1570 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 1499 | size_t size, int direction) | 1571 | size_t size, int direction) |
| 1500 | { | 1572 | { |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 9e8c7386b3a0..7e3138cfc8c9 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -291,33 +291,13 @@ done: | |||
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | /* | 293 | /* |
| 294 | * Fault in the first iovec of the given iov_iter, to a maximum length | ||
| 295 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | ||
| 296 | * accessed (ie. because it is an invalid address). | ||
| 297 | * | ||
| 298 | * writev-intensive code may want this to prefault several iovecs -- that | ||
| 299 | * would be possible (callers must not rely on the fact that _only_ the | ||
| 300 | * first iovec will be faulted with the current implementation). | ||
| 301 | */ | ||
| 302 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | ||
| 303 | { | ||
| 304 | if (!(i->type & (ITER_BVEC|ITER_KVEC))) { | ||
| 305 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
| 306 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | ||
| 307 | return fault_in_pages_readable(buf, bytes); | ||
| 308 | } | ||
| 309 | return 0; | ||
| 310 | } | ||
| 311 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | ||
| 312 | |||
| 313 | /* | ||
| 314 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of | 294 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
| 315 | * bytes. For each iovec, fault in each page that constitutes the iovec. | 295 | * bytes. For each iovec, fault in each page that constitutes the iovec. |
| 316 | * | 296 | * |
| 317 | * Return 0 on success, or non-zero if the memory could not be accessed (i.e. | 297 | * Return 0 on success, or non-zero if the memory could not be accessed (i.e. |
| 318 | * because it is an invalid address). | 298 | * because it is an invalid address). |
| 319 | */ | 299 | */ |
| 320 | int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) | 300 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) |
| 321 | { | 301 | { |
| 322 | size_t skip = i->iov_offset; | 302 | size_t skip = i->iov_offset; |
| 323 | const struct iovec *iov; | 303 | const struct iovec *iov; |
| @@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) | |||
| 334 | } | 314 | } |
| 335 | return 0; | 315 | return 0; |
| 336 | } | 316 | } |
| 337 | EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable); | 317 | EXPORT_SYMBOL(iov_iter_fault_in_readable); |
| 338 | 318 | ||
| 339 | void iov_iter_init(struct iov_iter *i, int direction, | 319 | void iov_iter_init(struct iov_iter *i, int direction, |
| 340 | const struct iovec *iov, unsigned long nr_segs, | 320 | const struct iovec *iov, unsigned long nr_segs, |
diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 836f7db4e548..2be55692aa43 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c | |||
| @@ -184,30 +184,21 @@ void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn) | |||
| 184 | } | 184 | } |
| 185 | EXPORT_SYMBOL(irq_poll_init); | 185 | EXPORT_SYMBOL(irq_poll_init); |
| 186 | 186 | ||
| 187 | static int irq_poll_cpu_notify(struct notifier_block *self, | 187 | static int irq_poll_cpu_dead(unsigned int cpu) |
| 188 | unsigned long action, void *hcpu) | ||
| 189 | { | 188 | { |
| 190 | /* | 189 | /* |
| 191 | * If a CPU goes away, splice its entries to the current CPU | 190 | * If a CPU goes away, splice its entries to the current CPU |
| 192 | * and trigger a run of the softirq | 191 | * and trigger a run of the softirq |
| 193 | */ | 192 | */ |
| 194 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 193 | local_irq_disable(); |
| 195 | int cpu = (unsigned long) hcpu; | 194 | list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), |
| 196 | 195 | this_cpu_ptr(&blk_cpu_iopoll)); | |
| 197 | local_irq_disable(); | 196 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); |
| 198 | list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), | 197 | local_irq_enable(); |
| 199 | this_cpu_ptr(&blk_cpu_iopoll)); | ||
| 200 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | ||
| 201 | local_irq_enable(); | ||
| 202 | } | ||
| 203 | 198 | ||
| 204 | return NOTIFY_OK; | 199 | return 0; |
| 205 | } | 200 | } |
| 206 | 201 | ||
| 207 | static struct notifier_block irq_poll_cpu_notifier = { | ||
| 208 | .notifier_call = irq_poll_cpu_notify, | ||
| 209 | }; | ||
| 210 | |||
| 211 | static __init int irq_poll_setup(void) | 202 | static __init int irq_poll_setup(void) |
| 212 | { | 203 | { |
| 213 | int i; | 204 | int i; |
| @@ -216,7 +207,8 @@ static __init int irq_poll_setup(void) | |||
| 216 | INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); | 207 | INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); |
| 217 | 208 | ||
| 218 | open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); | 209 | open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); |
| 219 | register_hotcpu_notifier(&irq_poll_cpu_notifier); | 210 | cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL, |
| 211 | irq_poll_cpu_dead); | ||
| 220 | return 0; | 212 | return 0; |
| 221 | } | 213 | } |
| 222 | subsys_initcall(irq_poll_setup); | 214 | subsys_initcall(irq_poll_setup); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1b7bf7314141..8e6d552c40dd 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent, | |||
| 105 | 105 | ||
| 106 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | 106 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 107 | if (radix_tree_is_internal_node(entry)) { | 107 | if (radix_tree_is_internal_node(entry)) { |
| 108 | unsigned long siboff = get_slot_offset(parent, entry); | 108 | if (is_sibling_entry(parent, entry)) { |
| 109 | if (siboff < RADIX_TREE_MAP_SIZE) { | 109 | void **sibentry = (void **) entry_to_node(entry); |
| 110 | offset = siboff; | 110 | offset = get_slot_offset(parent, sibentry); |
| 111 | entry = rcu_dereference_raw(parent->slots[offset]); | 111 | entry = rcu_dereference_raw(*sibentry); |
| 112 | } | 112 | } |
| 113 | } | 113 | } |
| 114 | #endif | 114 | #endif |
| @@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
| 1583 | } | 1583 | } |
| 1584 | EXPORT_SYMBOL(radix_tree_delete); | 1584 | EXPORT_SYMBOL(radix_tree_delete); |
| 1585 | 1585 | ||
| 1586 | struct radix_tree_node *radix_tree_replace_clear_tags( | 1586 | void radix_tree_clear_tags(struct radix_tree_root *root, |
| 1587 | struct radix_tree_root *root, | 1587 | struct radix_tree_node *node, |
| 1588 | unsigned long index, void *entry) | 1588 | void **slot) |
| 1589 | { | 1589 | { |
| 1590 | struct radix_tree_node *node; | ||
| 1591 | void **slot; | ||
| 1592 | |||
| 1593 | __radix_tree_lookup(root, index, &node, &slot); | ||
| 1594 | |||
| 1595 | if (node) { | 1590 | if (node) { |
| 1596 | unsigned int tag, offset = get_slot_offset(node, slot); | 1591 | unsigned int tag, offset = get_slot_offset(node, slot); |
| 1597 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | 1592 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| @@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags( | |||
| 1600 | /* Clear root node tags */ | 1595 | /* Clear root node tags */ |
| 1601 | root->gfp_mask &= __GFP_BITS_MASK; | 1596 | root->gfp_mask &= __GFP_BITS_MASK; |
| 1602 | } | 1597 | } |
| 1603 | |||
| 1604 | radix_tree_replace_slot(slot, entry); | ||
| 1605 | return node; | ||
| 1606 | } | 1598 | } |
| 1607 | 1599 | ||
| 1608 | /** | 1600 | /** |
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore index 0a7e494b2bcd..f01b1cb04f91 100644 --- a/lib/raid6/.gitignore +++ b/lib/raid6/.gitignore | |||
| @@ -3,3 +3,4 @@ altivec*.c | |||
| 3 | int*.c | 3 | int*.c |
| 4 | tables.c | 4 | tables.c |
| 5 | neon?.c | 5 | neon?.c |
| 6 | s390vx?.c | ||
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 3b10a48fa040..29f503ebfd60 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile | |||
| @@ -7,6 +7,7 @@ raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o | |||
| 7 | raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o | 7 | raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o |
| 8 | raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o | 8 | raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o |
| 9 | raid6_pq-$(CONFIG_TILEGX) += tilegx8.o | 9 | raid6_pq-$(CONFIG_TILEGX) += tilegx8.o |
| 10 | raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o | ||
| 10 | 11 | ||
| 11 | hostprogs-y += mktables | 12 | hostprogs-y += mktables |
| 12 | 13 | ||
| @@ -116,6 +117,11 @@ $(obj)/tilegx8.c: UNROLL := 8 | |||
| 116 | $(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE | 117 | $(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE |
| 117 | $(call if_changed,unroll) | 118 | $(call if_changed,unroll) |
| 118 | 119 | ||
| 120 | targets += s390vx8.c | ||
| 121 | $(obj)/s390vx8.c: UNROLL := 8 | ||
| 122 | $(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE | ||
| 123 | $(call if_changed,unroll) | ||
| 124 | |||
| 119 | quiet_cmd_mktable = TABLE $@ | 125 | quiet_cmd_mktable = TABLE $@ |
| 120 | cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) | 126 | cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) |
| 121 | 127 | ||
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 975c6e0434bd..592ff49df47d 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c | |||
| @@ -69,6 +69,9 @@ const struct raid6_calls * const raid6_algos[] = { | |||
| 69 | #if defined(CONFIG_TILEGX) | 69 | #if defined(CONFIG_TILEGX) |
| 70 | &raid6_tilegx8, | 70 | &raid6_tilegx8, |
| 71 | #endif | 71 | #endif |
| 72 | #if defined(CONFIG_S390) | ||
| 73 | &raid6_s390vx8, | ||
| 74 | #endif | ||
| 72 | &raid6_intx1, | 75 | &raid6_intx1, |
| 73 | &raid6_intx2, | 76 | &raid6_intx2, |
| 74 | &raid6_intx4, | 77 | &raid6_intx4, |
| @@ -95,6 +98,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = { | |||
| 95 | #ifdef CONFIG_AS_SSSE3 | 98 | #ifdef CONFIG_AS_SSSE3 |
| 96 | &raid6_recov_ssse3, | 99 | &raid6_recov_ssse3, |
| 97 | #endif | 100 | #endif |
| 101 | #ifdef CONFIG_S390 | ||
| 102 | &raid6_recov_s390xc, | ||
| 103 | #endif | ||
| 98 | &raid6_recov_intx1, | 104 | &raid6_recov_intx1, |
| 99 | NULL | 105 | NULL |
| 100 | }; | 106 | }; |
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid6/recov_s390xc.c new file mode 100644 index 000000000000..b042dac826cc --- /dev/null +++ b/lib/raid6/recov_s390xc.c | |||
| @@ -0,0 +1,116 @@ | |||
| 1 | /* | ||
| 2 | * RAID-6 data recovery in dual failure mode based on the XC instruction. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2016 | ||
| 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/export.h> | ||
| 9 | #include <linux/raid/pq.h> | ||
| 10 | |||
| 11 | static inline void xor_block(u8 *p1, u8 *p2) | ||
| 12 | { | ||
| 13 | typedef struct { u8 _[256]; } addrtype; | ||
| 14 | |||
| 15 | asm volatile( | ||
| 16 | " xc 0(256,%[p1]),0(%[p2])\n" | ||
| 17 | : "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2), | ||
| 18 | [p1] "a" (p1), [p2] "a" (p2) : "cc"); | ||
| 19 | } | ||
| 20 | |||
| 21 | /* Recover two failed data blocks. */ | ||
| 22 | static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila, | ||
| 23 | int failb, void **ptrs) | ||
| 24 | { | ||
| 25 | u8 *p, *q, *dp, *dq; | ||
| 26 | const u8 *pbmul; /* P multiplier table for B data */ | ||
| 27 | const u8 *qmul; /* Q multiplier table (for both) */ | ||
| 28 | int i; | ||
| 29 | |||
| 30 | p = (u8 *)ptrs[disks-2]; | ||
| 31 | q = (u8 *)ptrs[disks-1]; | ||
| 32 | |||
| 33 | /* Compute syndrome with zero for the missing data pages | ||
| 34 | Use the dead data pages as temporary storage for | ||
| 35 | delta p and delta q */ | ||
| 36 | dp = (u8 *)ptrs[faila]; | ||
| 37 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
| 38 | ptrs[disks-2] = dp; | ||
| 39 | dq = (u8 *)ptrs[failb]; | ||
| 40 | ptrs[failb] = (void *)raid6_empty_zero_page; | ||
| 41 | ptrs[disks-1] = dq; | ||
| 42 | |||
| 43 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
| 44 | |||
| 45 | /* Restore pointer table */ | ||
| 46 | ptrs[faila] = dp; | ||
| 47 | ptrs[failb] = dq; | ||
| 48 | ptrs[disks-2] = p; | ||
| 49 | ptrs[disks-1] = q; | ||
| 50 | |||
| 51 | /* Now, pick the proper data tables */ | ||
| 52 | pbmul = raid6_gfmul[raid6_gfexi[failb-faila]]; | ||
| 53 | qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]]; | ||
| 54 | |||
| 55 | /* Now do it... */ | ||
| 56 | while (bytes) { | ||
| 57 | xor_block(dp, p); | ||
| 58 | xor_block(dq, q); | ||
| 59 | for (i = 0; i < 256; i++) | ||
| 60 | dq[i] = pbmul[dp[i]] ^ qmul[dq[i]]; | ||
| 61 | xor_block(dp, dq); | ||
| 62 | p += 256; | ||
| 63 | q += 256; | ||
| 64 | dp += 256; | ||
| 65 | dq += 256; | ||
| 66 | bytes -= 256; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | |||
| 70 | /* Recover failure of one data block plus the P block */ | ||
| 71 | static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila, | ||
| 72 | void **ptrs) | ||
| 73 | { | ||
| 74 | u8 *p, *q, *dq; | ||
| 75 | const u8 *qmul; /* Q multiplier table */ | ||
| 76 | int i; | ||
| 77 | |||
| 78 | p = (u8 *)ptrs[disks-2]; | ||
| 79 | q = (u8 *)ptrs[disks-1]; | ||
| 80 | |||
| 81 | /* Compute syndrome with zero for the missing data page | ||
| 82 | Use the dead data page as temporary storage for delta q */ | ||
| 83 | dq = (u8 *)ptrs[faila]; | ||
| 84 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
| 85 | ptrs[disks-1] = dq; | ||
| 86 | |||
| 87 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
| 88 | |||
| 89 | /* Restore pointer table */ | ||
| 90 | ptrs[faila] = dq; | ||
| 91 | ptrs[disks-1] = q; | ||
| 92 | |||
| 93 | /* Now, pick the proper data tables */ | ||
| 94 | qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]]; | ||
| 95 | |||
| 96 | /* Now do it... */ | ||
| 97 | while (bytes) { | ||
| 98 | xor_block(dq, q); | ||
| 99 | for (i = 0; i < 256; i++) | ||
| 100 | dq[i] = qmul[dq[i]]; | ||
| 101 | xor_block(p, dq); | ||
| 102 | p += 256; | ||
| 103 | q += 256; | ||
| 104 | dq += 256; | ||
| 105 | bytes -= 256; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | |||
| 110 | const struct raid6_recov_calls raid6_recov_s390xc = { | ||
| 111 | .data2 = raid6_2data_recov_s390xc, | ||
| 112 | .datap = raid6_datap_recov_s390xc, | ||
| 113 | .valid = NULL, | ||
| 114 | .name = "s390xc", | ||
| 115 | .priority = 1, | ||
| 116 | }; | ||
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc new file mode 100644 index 000000000000..7b45191a655f --- /dev/null +++ b/lib/raid6/s390vx.uc | |||
| @@ -0,0 +1,168 @@ | |||
| 1 | /* | ||
| 2 | * raid6_vx$#.c | ||
| 3 | * | ||
| 4 | * $#-way unrolled RAID6 gen/xor functions for s390 | ||
| 5 | * based on the vector facility | ||
| 6 | * | ||
| 7 | * Copyright IBM Corp. 2016 | ||
| 8 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
| 9 | * | ||
| 10 | * This file is postprocessed using unroll.awk. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/raid/pq.h> | ||
| 14 | #include <asm/fpu/api.h> | ||
| 15 | |||
| 16 | asm(".include \"asm/vx-insn.h\"\n"); | ||
| 17 | |||
| 18 | #define NSIZE 16 | ||
| 19 | |||
| 20 | static inline void LOAD_CONST(void) | ||
| 21 | { | ||
| 22 | asm volatile("VREPIB %v24,7"); | ||
| 23 | asm volatile("VREPIB %v25,0x1d"); | ||
| 24 | } | ||
| 25 | |||
| 26 | /* | ||
| 27 | * The SHLBYTE() operation shifts each of the 16 bytes in | ||
| 28 | * vector register y left by 1 bit and stores the result in | ||
| 29 | * vector register x. | ||
| 30 | */ | ||
| 31 | static inline void SHLBYTE(int x, int y) | ||
| 32 | { | ||
| 33 | asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y)); | ||
| 34 | } | ||
| 35 | |||
| 36 | /* | ||
| 37 | * For each of the 16 bytes in the vector register y the MASK() | ||
| 38 | * operation returns 0xFF if the high bit of the byte is 1, | ||
| 39 | * or 0x00 if the high bit is 0. The result is stored in vector | ||
| 40 | * register x. | ||
| 41 | */ | ||
| 42 | static inline void MASK(int x, int y) | ||
| 43 | { | ||
| 44 | asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y)); | ||
| 45 | } | ||
| 46 | |||
| 47 | static inline void AND(int x, int y, int z) | ||
| 48 | { | ||
| 49 | asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void XOR(int x, int y, int z) | ||
| 53 | { | ||
| 54 | asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void LOAD_DATA(int x, int n, u8 *ptr) | ||
| 58 | { | ||
| 59 | typedef struct { u8 _[16*n]; } addrtype; | ||
| 60 | register addrtype *__ptr asm("1") = (addrtype *) ptr; | ||
| 61 | |||
| 62 | asm volatile ("VLM %2,%3,0,%r1" | ||
| 63 | : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1)); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void STORE_DATA(int x, int n, u8 *ptr) | ||
| 67 | { | ||
| 68 | typedef struct { u8 _[16*n]; } addrtype; | ||
| 69 | register addrtype *__ptr asm("1") = (addrtype *) ptr; | ||
| 70 | |||
| 71 | asm volatile ("VSTM %2,%3,0,1" | ||
| 72 | : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1)); | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline void COPY_VEC(int x, int y) | ||
| 76 | { | ||
| 77 | asm volatile ("VLR %0,%1" : : "i" (x), "i" (y)); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
| 81 | { | ||
| 82 | struct kernel_fpu vxstate; | ||
| 83 | u8 **dptr, *p, *q; | ||
| 84 | int d, z, z0; | ||
| 85 | |||
| 86 | kernel_fpu_begin(&vxstate, KERNEL_VXR); | ||
| 87 | LOAD_CONST(); | ||
| 88 | |||
| 89 | dptr = (u8 **) ptrs; | ||
| 90 | z0 = disks - 3; /* Highest data disk */ | ||
| 91 | p = dptr[z0 + 1]; /* XOR parity */ | ||
| 92 | q = dptr[z0 + 2]; /* RS syndrome */ | ||
| 93 | |||
| 94 | for (d = 0; d < bytes; d += $#*NSIZE) { | ||
| 95 | LOAD_DATA(0,$#,&dptr[z0][d]); | ||
| 96 | COPY_VEC(8+$$,0+$$); | ||
| 97 | for (z = z0 - 1; z >= 0; z--) { | ||
| 98 | MASK(16+$$,8+$$); | ||
| 99 | AND(16+$$,16+$$,25); | ||
| 100 | SHLBYTE(8+$$,8+$$); | ||
| 101 | XOR(8+$$,8+$$,16+$$); | ||
| 102 | LOAD_DATA(16,$#,&dptr[z][d]); | ||
| 103 | XOR(0+$$,0+$$,16+$$); | ||
| 104 | XOR(8+$$,8+$$,16+$$); | ||
| 105 | } | ||
| 106 | STORE_DATA(0,$#,&p[d]); | ||
| 107 | STORE_DATA(8,$#,&q[d]); | ||
| 108 | } | ||
| 109 | kernel_fpu_end(&vxstate, KERNEL_VXR); | ||
| 110 | } | ||
| 111 | |||
| 112 | static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop, | ||
| 113 | size_t bytes, void **ptrs) | ||
| 114 | { | ||
| 115 | struct kernel_fpu vxstate; | ||
| 116 | u8 **dptr, *p, *q; | ||
| 117 | int d, z, z0; | ||
| 118 | |||
| 119 | dptr = (u8 **) ptrs; | ||
| 120 | z0 = stop; /* P/Q right side optimization */ | ||
| 121 | p = dptr[disks - 2]; /* XOR parity */ | ||
| 122 | q = dptr[disks - 1]; /* RS syndrome */ | ||
| 123 | |||
| 124 | kernel_fpu_begin(&vxstate, KERNEL_VXR); | ||
| 125 | LOAD_CONST(); | ||
| 126 | |||
| 127 | for (d = 0; d < bytes; d += $#*NSIZE) { | ||
| 128 | /* P/Q data pages */ | ||
| 129 | LOAD_DATA(0,$#,&dptr[z0][d]); | ||
| 130 | COPY_VEC(8+$$,0+$$); | ||
| 131 | for (z = z0 - 1; z >= start; z--) { | ||
| 132 | MASK(16+$$,8+$$); | ||
| 133 | AND(16+$$,16+$$,25); | ||
| 134 | SHLBYTE(8+$$,8+$$); | ||
| 135 | XOR(8+$$,8+$$,16+$$); | ||
| 136 | LOAD_DATA(16,$#,&dptr[z][d]); | ||
| 137 | XOR(0+$$,0+$$,16+$$); | ||
| 138 | XOR(8+$$,8+$$,16+$$); | ||
| 139 | } | ||
| 140 | /* P/Q left side optimization */ | ||
| 141 | for (z = start - 1; z >= 0; z--) { | ||
| 142 | MASK(16+$$,8+$$); | ||
| 143 | AND(16+$$,16+$$,25); | ||
| 144 | SHLBYTE(8+$$,8+$$); | ||
| 145 | XOR(8+$$,8+$$,16+$$); | ||
| 146 | } | ||
| 147 | LOAD_DATA(16,$#,&p[d]); | ||
| 148 | XOR(16+$$,16+$$,0+$$); | ||
| 149 | STORE_DATA(16,$#,&p[d]); | ||
| 150 | LOAD_DATA(16,$#,&q[d]); | ||
| 151 | XOR(16+$$,16+$$,8+$$); | ||
| 152 | STORE_DATA(16,$#,&q[d]); | ||
| 153 | } | ||
| 154 | kernel_fpu_end(&vxstate, KERNEL_VXR); | ||
| 155 | } | ||
| 156 | |||
| 157 | static int raid6_s390vx$#_valid(void) | ||
| 158 | { | ||
| 159 | return MACHINE_HAS_VX; | ||
| 160 | } | ||
| 161 | |||
| 162 | const struct raid6_calls raid6_s390vx$# = { | ||
| 163 | raid6_s390vx$#_gen_syndrome, | ||
| 164 | raid6_s390vx$#_xor_syndrome, | ||
| 165 | raid6_s390vx$#_valid, | ||
| 166 | "vx128x$#", | ||
| 167 | 1 | ||
| 168 | }; | ||
diff --git a/lib/random32.c b/lib/random32.c index 69ed593aab07..915982b304bb 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -81,7 +81,7 @@ u32 prandom_u32(void) | |||
| 81 | u32 res; | 81 | u32 res; |
| 82 | 82 | ||
| 83 | res = prandom_u32_state(state); | 83 | res = prandom_u32_state(state); |
| 84 | put_cpu_var(state); | 84 | put_cpu_var(net_rand_state); |
| 85 | 85 | ||
| 86 | return res; | 86 | return res; |
| 87 | } | 87 | } |
| @@ -128,7 +128,7 @@ void prandom_bytes(void *buf, size_t bytes) | |||
| 128 | struct rnd_state *state = &get_cpu_var(net_rand_state); | 128 | struct rnd_state *state = &get_cpu_var(net_rand_state); |
| 129 | 129 | ||
| 130 | prandom_bytes_state(state, buf, bytes); | 130 | prandom_bytes_state(state, buf, bytes); |
| 131 | put_cpu_var(state); | 131 | put_cpu_var(net_rand_state); |
| 132 | } | 132 | } |
| 133 | EXPORT_SYMBOL(prandom_bytes); | 133 | EXPORT_SYMBOL(prandom_bytes); |
| 134 | 134 | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5d845ffd7982..32d0ad058380 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | #define HASH_DEFAULT_SIZE 64UL | 31 | #define HASH_DEFAULT_SIZE 64UL |
| 32 | #define HASH_MIN_SIZE 4U | 32 | #define HASH_MIN_SIZE 4U |
| 33 | #define BUCKET_LOCKS_PER_CPU 128UL | 33 | #define BUCKET_LOCKS_PER_CPU 32UL |
| 34 | 34 | ||
| 35 | static u32 head_hashfn(struct rhashtable *ht, | 35 | static u32 head_hashfn(struct rhashtable *ht, |
| 36 | const struct bucket_table *tbl, | 36 | const struct bucket_table *tbl, |
| @@ -70,21 +70,25 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
| 70 | unsigned int nr_pcpus = num_possible_cpus(); | 70 | unsigned int nr_pcpus = num_possible_cpus(); |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| 73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | 73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
| 74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | 74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
| 75 | 75 | ||
| 76 | /* Never allocate more than 0.5 locks per bucket */ | 76 | /* Never allocate more than 0.5 locks per bucket */ |
| 77 | size = min_t(unsigned int, size, tbl->size >> 1); | 77 | size = min_t(unsigned int, size, tbl->size >> 1); |
| 78 | 78 | ||
| 79 | if (sizeof(spinlock_t) != 0) { | 79 | if (sizeof(spinlock_t) != 0) { |
| 80 | tbl->locks = NULL; | ||
| 80 | #ifdef CONFIG_NUMA | 81 | #ifdef CONFIG_NUMA |
| 81 | if (size * sizeof(spinlock_t) > PAGE_SIZE && | 82 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
| 82 | gfp == GFP_KERNEL) | 83 | gfp == GFP_KERNEL) |
| 83 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); | 84 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
| 84 | else | ||
| 85 | #endif | 85 | #endif |
| 86 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | 86 | if (gfp != GFP_KERNEL) |
| 87 | gfp); | 87 | gfp |= __GFP_NOWARN | __GFP_NORETRY; |
| 88 | |||
| 89 | if (!tbl->locks) | ||
| 90 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | ||
| 91 | gfp); | ||
| 88 | if (!tbl->locks) | 92 | if (!tbl->locks) |
| 89 | return -ENOMEM; | 93 | return -ENOMEM; |
| 90 | for (i = 0; i < size; i++) | 94 | for (i = 0; i < size; i++) |
| @@ -321,12 +325,14 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
| 321 | static int rhashtable_shrink(struct rhashtable *ht) | 325 | static int rhashtable_shrink(struct rhashtable *ht) |
| 322 | { | 326 | { |
| 323 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 327 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
| 324 | unsigned int size; | 328 | unsigned int nelems = atomic_read(&ht->nelems); |
| 329 | unsigned int size = 0; | ||
| 325 | int err; | 330 | int err; |
| 326 | 331 | ||
| 327 | ASSERT_RHT_MUTEX(ht); | 332 | ASSERT_RHT_MUTEX(ht); |
| 328 | 333 | ||
| 329 | size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); | 334 | if (nelems) |
| 335 | size = roundup_pow_of_two(nelems * 3 / 2); | ||
| 330 | if (size < ht->p.min_size) | 336 | if (size < ht->p.min_size) |
| 331 | size = ht->p.min_size; | 337 | size = ht->p.min_size; |
| 332 | 338 | ||
| @@ -372,22 +378,8 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 372 | schedule_work(&ht->run_work); | 378 | schedule_work(&ht->run_work); |
| 373 | } | 379 | } |
| 374 | 380 | ||
| 375 | static bool rhashtable_check_elasticity(struct rhashtable *ht, | 381 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
| 376 | struct bucket_table *tbl, | 382 | struct bucket_table *tbl) |
| 377 | unsigned int hash) | ||
| 378 | { | ||
| 379 | unsigned int elasticity = ht->elasticity; | ||
| 380 | struct rhash_head *head; | ||
| 381 | |||
| 382 | rht_for_each(head, tbl, hash) | ||
| 383 | if (!--elasticity) | ||
| 384 | return true; | ||
| 385 | |||
| 386 | return false; | ||
| 387 | } | ||
| 388 | |||
| 389 | int rhashtable_insert_rehash(struct rhashtable *ht, | ||
| 390 | struct bucket_table *tbl) | ||
| 391 | { | 383 | { |
| 392 | struct bucket_table *old_tbl; | 384 | struct bucket_table *old_tbl; |
| 393 | struct bucket_table *new_tbl; | 385 | struct bucket_table *new_tbl; |
| @@ -433,61 +425,172 @@ fail: | |||
| 433 | 425 | ||
| 434 | return err; | 426 | return err; |
| 435 | } | 427 | } |
| 436 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | ||
| 437 | 428 | ||
| 438 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, | 429 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
| 439 | const void *key, | 430 | struct bucket_table *tbl, unsigned int hash, |
| 440 | struct rhash_head *obj, | 431 | const void *key, struct rhash_head *obj) |
| 441 | struct bucket_table *tbl) | ||
| 442 | { | 432 | { |
| 433 | struct rhashtable_compare_arg arg = { | ||
| 434 | .ht = ht, | ||
| 435 | .key = key, | ||
| 436 | }; | ||
| 437 | struct rhash_head __rcu **pprev; | ||
| 443 | struct rhash_head *head; | 438 | struct rhash_head *head; |
| 444 | unsigned int hash; | 439 | int elasticity; |
| 445 | int err; | ||
| 446 | 440 | ||
| 447 | tbl = rhashtable_last_table(ht, tbl); | 441 | elasticity = ht->elasticity; |
| 448 | hash = head_hashfn(ht, tbl, obj); | 442 | pprev = &tbl->buckets[hash]; |
| 449 | spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); | 443 | rht_for_each(head, tbl, hash) { |
| 444 | struct rhlist_head *list; | ||
| 445 | struct rhlist_head *plist; | ||
| 450 | 446 | ||
| 451 | err = -EEXIST; | 447 | elasticity--; |
| 452 | if (key && rhashtable_lookup_fast(ht, key, ht->p)) | 448 | if (!key || |
| 453 | goto exit; | 449 | (ht->p.obj_cmpfn ? |
| 450 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : | ||
| 451 | rhashtable_compare(&arg, rht_obj(ht, head)))) | ||
| 452 | continue; | ||
| 454 | 453 | ||
| 455 | err = -E2BIG; | 454 | if (!ht->rhlist) |
| 456 | if (unlikely(rht_grow_above_max(ht, tbl))) | 455 | return rht_obj(ht, head); |
| 457 | goto exit; | 456 | |
| 457 | list = container_of(obj, struct rhlist_head, rhead); | ||
| 458 | plist = container_of(head, struct rhlist_head, rhead); | ||
| 459 | |||
| 460 | RCU_INIT_POINTER(list->next, plist); | ||
| 461 | head = rht_dereference_bucket(head->next, tbl, hash); | ||
| 462 | RCU_INIT_POINTER(list->rhead.next, head); | ||
| 463 | rcu_assign_pointer(*pprev, obj); | ||
| 458 | 464 | ||
| 459 | err = -EAGAIN; | 465 | return NULL; |
| 460 | if (rhashtable_check_elasticity(ht, tbl, hash) || | 466 | } |
| 461 | rht_grow_above_100(ht, tbl)) | ||
| 462 | goto exit; | ||
| 463 | 467 | ||
| 464 | err = 0; | 468 | if (elasticity <= 0) |
| 469 | return ERR_PTR(-EAGAIN); | ||
| 470 | |||
| 471 | return ERR_PTR(-ENOENT); | ||
| 472 | } | ||
| 473 | |||
| 474 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | ||
| 475 | struct bucket_table *tbl, | ||
| 476 | unsigned int hash, | ||
| 477 | struct rhash_head *obj, | ||
| 478 | void *data) | ||
| 479 | { | ||
| 480 | struct bucket_table *new_tbl; | ||
| 481 | struct rhash_head *head; | ||
| 482 | |||
| 483 | if (!IS_ERR_OR_NULL(data)) | ||
| 484 | return ERR_PTR(-EEXIST); | ||
| 485 | |||
| 486 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) | ||
| 487 | return ERR_CAST(data); | ||
| 488 | |||
| 489 | new_tbl = rcu_dereference(tbl->future_tbl); | ||
| 490 | if (new_tbl) | ||
| 491 | return new_tbl; | ||
| 492 | |||
| 493 | if (PTR_ERR(data) != -ENOENT) | ||
| 494 | return ERR_CAST(data); | ||
| 495 | |||
| 496 | if (unlikely(rht_grow_above_max(ht, tbl))) | ||
| 497 | return ERR_PTR(-E2BIG); | ||
| 498 | |||
| 499 | if (unlikely(rht_grow_above_100(ht, tbl))) | ||
| 500 | return ERR_PTR(-EAGAIN); | ||
| 465 | 501 | ||
| 466 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | 502 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); |
| 467 | 503 | ||
| 468 | RCU_INIT_POINTER(obj->next, head); | 504 | RCU_INIT_POINTER(obj->next, head); |
| 505 | if (ht->rhlist) { | ||
| 506 | struct rhlist_head *list; | ||
| 507 | |||
| 508 | list = container_of(obj, struct rhlist_head, rhead); | ||
| 509 | RCU_INIT_POINTER(list->next, NULL); | ||
| 510 | } | ||
| 469 | 511 | ||
| 470 | rcu_assign_pointer(tbl->buckets[hash], obj); | 512 | rcu_assign_pointer(tbl->buckets[hash], obj); |
| 471 | 513 | ||
| 472 | atomic_inc(&ht->nelems); | 514 | atomic_inc(&ht->nelems); |
| 515 | if (rht_grow_above_75(ht, tbl)) | ||
| 516 | schedule_work(&ht->run_work); | ||
| 473 | 517 | ||
| 474 | exit: | 518 | return NULL; |
| 475 | spin_unlock(rht_bucket_lock(tbl, hash)); | 519 | } |
| 476 | 520 | ||
| 477 | if (err == 0) | 521 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
| 478 | return NULL; | 522 | struct rhash_head *obj) |
| 479 | else if (err == -EAGAIN) | 523 | { |
| 480 | return tbl; | 524 | struct bucket_table *new_tbl; |
| 481 | else | 525 | struct bucket_table *tbl; |
| 482 | return ERR_PTR(err); | 526 | unsigned int hash; |
| 527 | spinlock_t *lock; | ||
| 528 | void *data; | ||
| 529 | |||
| 530 | tbl = rcu_dereference(ht->tbl); | ||
| 531 | |||
| 532 | /* All insertions must grab the oldest table containing | ||
| 533 | * the hashed bucket that is yet to be rehashed. | ||
| 534 | */ | ||
| 535 | for (;;) { | ||
| 536 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); | ||
| 537 | lock = rht_bucket_lock(tbl, hash); | ||
| 538 | spin_lock_bh(lock); | ||
| 539 | |||
| 540 | if (tbl->rehash <= hash) | ||
| 541 | break; | ||
| 542 | |||
| 543 | spin_unlock_bh(lock); | ||
| 544 | tbl = rcu_dereference(tbl->future_tbl); | ||
| 545 | } | ||
| 546 | |||
| 547 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); | ||
| 548 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); | ||
| 549 | if (PTR_ERR(new_tbl) != -EEXIST) | ||
| 550 | data = ERR_CAST(new_tbl); | ||
| 551 | |||
| 552 | while (!IS_ERR_OR_NULL(new_tbl)) { | ||
| 553 | tbl = new_tbl; | ||
| 554 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); | ||
| 555 | spin_lock_nested(rht_bucket_lock(tbl, hash), | ||
| 556 | SINGLE_DEPTH_NESTING); | ||
| 557 | |||
| 558 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); | ||
| 559 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); | ||
| 560 | if (PTR_ERR(new_tbl) != -EEXIST) | ||
| 561 | data = ERR_CAST(new_tbl); | ||
| 562 | |||
| 563 | spin_unlock(rht_bucket_lock(tbl, hash)); | ||
| 564 | } | ||
| 565 | |||
| 566 | spin_unlock_bh(lock); | ||
| 567 | |||
| 568 | if (PTR_ERR(data) == -EAGAIN) | ||
| 569 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: | ||
| 570 | -EAGAIN); | ||
| 571 | |||
| 572 | return data; | ||
| 573 | } | ||
| 574 | |||
| 575 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | ||
| 576 | struct rhash_head *obj) | ||
| 577 | { | ||
| 578 | void *data; | ||
| 579 | |||
| 580 | do { | ||
| 581 | rcu_read_lock(); | ||
| 582 | data = rhashtable_try_insert(ht, key, obj); | ||
| 583 | rcu_read_unlock(); | ||
| 584 | } while (PTR_ERR(data) == -EAGAIN); | ||
| 585 | |||
| 586 | return data; | ||
| 483 | } | 587 | } |
| 484 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | 588 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 485 | 589 | ||
| 486 | /** | 590 | /** |
| 487 | * rhashtable_walk_init - Initialise an iterator | 591 | * rhashtable_walk_enter - Initialise an iterator |
| 488 | * @ht: Table to walk over | 592 | * @ht: Table to walk over |
| 489 | * @iter: Hash table Iterator | 593 | * @iter: Hash table Iterator |
| 490 | * @gfp: GFP flags for allocations | ||
| 491 | * | 594 | * |
| 492 | * This function prepares a hash table walk. | 595 | * This function prepares a hash table walk. |
| 493 | * | 596 | * |
| @@ -502,30 +605,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |||
| 502 | * This function may sleep so you must not call it from interrupt | 605 | * This function may sleep so you must not call it from interrupt |
| 503 | * context or with spin locks held. | 606 | * context or with spin locks held. |
| 504 | * | 607 | * |
| 505 | * You must call rhashtable_walk_exit if this function returns | 608 | * You must call rhashtable_walk_exit after this function returns. |
| 506 | * successfully. | ||
| 507 | */ | 609 | */ |
| 508 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, | 610 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
| 509 | gfp_t gfp) | ||
| 510 | { | 611 | { |
| 511 | iter->ht = ht; | 612 | iter->ht = ht; |
| 512 | iter->p = NULL; | 613 | iter->p = NULL; |
| 513 | iter->slot = 0; | 614 | iter->slot = 0; |
| 514 | iter->skip = 0; | 615 | iter->skip = 0; |
| 515 | 616 | ||
| 516 | iter->walker = kmalloc(sizeof(*iter->walker), gfp); | ||
| 517 | if (!iter->walker) | ||
| 518 | return -ENOMEM; | ||
| 519 | |||
| 520 | spin_lock(&ht->lock); | 617 | spin_lock(&ht->lock); |
| 521 | iter->walker->tbl = | 618 | iter->walker.tbl = |
| 522 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); | 619 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
| 523 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | 620 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
| 524 | spin_unlock(&ht->lock); | 621 | spin_unlock(&ht->lock); |
| 525 | |||
| 526 | return 0; | ||
| 527 | } | 622 | } |
| 528 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | 623 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
| 529 | 624 | ||
| 530 | /** | 625 | /** |
| 531 | * rhashtable_walk_exit - Free an iterator | 626 | * rhashtable_walk_exit - Free an iterator |
| @@ -536,10 +631,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |||
| 536 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | 631 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 537 | { | 632 | { |
| 538 | spin_lock(&iter->ht->lock); | 633 | spin_lock(&iter->ht->lock); |
| 539 | if (iter->walker->tbl) | 634 | if (iter->walker.tbl) |
| 540 | list_del(&iter->walker->list); | 635 | list_del(&iter->walker.list); |
| 541 | spin_unlock(&iter->ht->lock); | 636 | spin_unlock(&iter->ht->lock); |
| 542 | kfree(iter->walker); | ||
| 543 | } | 637 | } |
| 544 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 638 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| 545 | 639 | ||
| @@ -565,12 +659,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
| 565 | rcu_read_lock(); | 659 | rcu_read_lock(); |
| 566 | 660 | ||
| 567 | spin_lock(&ht->lock); | 661 | spin_lock(&ht->lock); |
| 568 | if (iter->walker->tbl) | 662 | if (iter->walker.tbl) |
| 569 | list_del(&iter->walker->list); | 663 | list_del(&iter->walker.list); |
| 570 | spin_unlock(&ht->lock); | 664 | spin_unlock(&ht->lock); |
| 571 | 665 | ||
| 572 | if (!iter->walker->tbl) { | 666 | if (!iter->walker.tbl) { |
| 573 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | 667 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
| 574 | return -EAGAIN; | 668 | return -EAGAIN; |
| 575 | } | 669 | } |
| 576 | 670 | ||
| @@ -592,12 +686,17 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start); | |||
| 592 | */ | 686 | */ |
| 593 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | 687 | void *rhashtable_walk_next(struct rhashtable_iter *iter) |
| 594 | { | 688 | { |
| 595 | struct bucket_table *tbl = iter->walker->tbl; | 689 | struct bucket_table *tbl = iter->walker.tbl; |
| 690 | struct rhlist_head *list = iter->list; | ||
| 596 | struct rhashtable *ht = iter->ht; | 691 | struct rhashtable *ht = iter->ht; |
| 597 | struct rhash_head *p = iter->p; | 692 | struct rhash_head *p = iter->p; |
| 693 | bool rhlist = ht->rhlist; | ||
| 598 | 694 | ||
| 599 | if (p) { | 695 | if (p) { |
| 600 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | 696 | if (!rhlist || !(list = rcu_dereference(list->next))) { |
| 697 | p = rcu_dereference(p->next); | ||
| 698 | list = container_of(p, struct rhlist_head, rhead); | ||
| 699 | } | ||
| 601 | goto next; | 700 | goto next; |
| 602 | } | 701 | } |
| 603 | 702 | ||
| @@ -605,6 +704,18 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter) | |||
| 605 | int skip = iter->skip; | 704 | int skip = iter->skip; |
| 606 | 705 | ||
| 607 | rht_for_each_rcu(p, tbl, iter->slot) { | 706 | rht_for_each_rcu(p, tbl, iter->slot) { |
| 707 | if (rhlist) { | ||
| 708 | list = container_of(p, struct rhlist_head, | ||
| 709 | rhead); | ||
| 710 | do { | ||
| 711 | if (!skip) | ||
| 712 | goto next; | ||
| 713 | skip--; | ||
| 714 | list = rcu_dereference(list->next); | ||
| 715 | } while (list); | ||
| 716 | |||
| 717 | continue; | ||
| 718 | } | ||
| 608 | if (!skip) | 719 | if (!skip) |
| 609 | break; | 720 | break; |
| 610 | skip--; | 721 | skip--; |
| @@ -614,7 +725,8 @@ next: | |||
| 614 | if (!rht_is_a_nulls(p)) { | 725 | if (!rht_is_a_nulls(p)) { |
| 615 | iter->skip++; | 726 | iter->skip++; |
| 616 | iter->p = p; | 727 | iter->p = p; |
| 617 | return rht_obj(ht, p); | 728 | iter->list = list; |
| 729 | return rht_obj(ht, rhlist ? &list->rhead : p); | ||
| 618 | } | 730 | } |
| 619 | 731 | ||
| 620 | iter->skip = 0; | 732 | iter->skip = 0; |
| @@ -625,8 +737,8 @@ next: | |||
| 625 | /* Ensure we see any new tables. */ | 737 | /* Ensure we see any new tables. */ |
| 626 | smp_rmb(); | 738 | smp_rmb(); |
| 627 | 739 | ||
| 628 | iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); | 740 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 629 | if (iter->walker->tbl) { | 741 | if (iter->walker.tbl) { |
| 630 | iter->slot = 0; | 742 | iter->slot = 0; |
| 631 | iter->skip = 0; | 743 | iter->skip = 0; |
| 632 | return ERR_PTR(-EAGAIN); | 744 | return ERR_PTR(-EAGAIN); |
| @@ -646,7 +758,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
| 646 | __releases(RCU) | 758 | __releases(RCU) |
| 647 | { | 759 | { |
| 648 | struct rhashtable *ht; | 760 | struct rhashtable *ht; |
| 649 | struct bucket_table *tbl = iter->walker->tbl; | 761 | struct bucket_table *tbl = iter->walker.tbl; |
| 650 | 762 | ||
| 651 | if (!tbl) | 763 | if (!tbl) |
| 652 | goto out; | 764 | goto out; |
| @@ -655,9 +767,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
| 655 | 767 | ||
| 656 | spin_lock(&ht->lock); | 768 | spin_lock(&ht->lock); |
| 657 | if (tbl->rehash < tbl->size) | 769 | if (tbl->rehash < tbl->size) |
| 658 | list_add(&iter->walker->list, &tbl->walkers); | 770 | list_add(&iter->walker.list, &tbl->walkers); |
| 659 | else | 771 | else |
| 660 | iter->walker->tbl = NULL; | 772 | iter->walker.tbl = NULL; |
| 661 | spin_unlock(&ht->lock); | 773 | spin_unlock(&ht->lock); |
| 662 | 774 | ||
| 663 | iter->p = NULL; | 775 | iter->p = NULL; |
| @@ -803,6 +915,48 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 803 | EXPORT_SYMBOL_GPL(rhashtable_init); | 915 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| 804 | 916 | ||
| 805 | /** | 917 | /** |
| 918 | * rhltable_init - initialize a new hash list table | ||
| 919 | * @hlt: hash list table to be initialized | ||
| 920 | * @params: configuration parameters | ||
| 921 | * | ||
| 922 | * Initializes a new hash list table. | ||
| 923 | * | ||
| 924 | * See documentation for rhashtable_init. | ||
| 925 | */ | ||
| 926 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) | ||
| 927 | { | ||
| 928 | int err; | ||
| 929 | |||
| 930 | /* No rhlist NULLs marking for now. */ | ||
| 931 | if (params->nulls_base) | ||
| 932 | return -EINVAL; | ||
| 933 | |||
| 934 | err = rhashtable_init(&hlt->ht, params); | ||
| 935 | hlt->ht.rhlist = true; | ||
| 936 | return err; | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL_GPL(rhltable_init); | ||
| 939 | |||
| 940 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, | ||
| 941 | void (*free_fn)(void *ptr, void *arg), | ||
| 942 | void *arg) | ||
| 943 | { | ||
| 944 | struct rhlist_head *list; | ||
| 945 | |||
| 946 | if (!ht->rhlist) { | ||
| 947 | free_fn(rht_obj(ht, obj), arg); | ||
| 948 | return; | ||
| 949 | } | ||
| 950 | |||
| 951 | list = container_of(obj, struct rhlist_head, rhead); | ||
| 952 | do { | ||
| 953 | obj = &list->rhead; | ||
| 954 | list = rht_dereference(list->next, ht); | ||
| 955 | free_fn(rht_obj(ht, obj), arg); | ||
| 956 | } while (list); | ||
| 957 | } | ||
| 958 | |||
| 959 | /** | ||
| 806 | * rhashtable_free_and_destroy - free elements and destroy hash table | 960 | * rhashtable_free_and_destroy - free elements and destroy hash table |
| 807 | * @ht: the hash table to destroy | 961 | * @ht: the hash table to destroy |
| 808 | * @free_fn: callback to release resources of element | 962 | * @free_fn: callback to release resources of element |
| @@ -839,7 +993,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 839 | pos = next, | 993 | pos = next, |
| 840 | next = !rht_is_a_nulls(pos) ? | 994 | next = !rht_is_a_nulls(pos) ? |
| 841 | rht_dereference(pos->next, ht) : NULL) | 995 | rht_dereference(pos->next, ht) : NULL) |
| 842 | free_fn(rht_obj(ht, pos), arg); | 996 | rhashtable_free_one(ht, pos, free_fn, arg); |
| 843 | } | 997 | } |
| 844 | } | 998 | } |
| 845 | 999 | ||
diff --git a/lib/syscall.c b/lib/syscall.c index e30e03932480..63239e097b13 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -7,9 +7,19 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 7 | unsigned long args[6], unsigned int maxargs, | 7 | unsigned long args[6], unsigned int maxargs, |
| 8 | unsigned long *sp, unsigned long *pc) | 8 | unsigned long *sp, unsigned long *pc) |
| 9 | { | 9 | { |
| 10 | struct pt_regs *regs = task_pt_regs(target); | 10 | struct pt_regs *regs; |
| 11 | if (unlikely(!regs)) | 11 | |
| 12 | if (!try_get_task_stack(target)) { | ||
| 13 | /* Task has no stack, so the task isn't in a syscall. */ | ||
| 14 | *callno = -1; | ||
| 15 | return 0; | ||
| 16 | } | ||
| 17 | |||
| 18 | regs = task_pt_regs(target); | ||
| 19 | if (unlikely(!regs)) { | ||
| 20 | put_task_stack(target); | ||
| 12 | return -EAGAIN; | 21 | return -EAGAIN; |
| 22 | } | ||
| 13 | 23 | ||
| 14 | *sp = user_stack_pointer(regs); | 24 | *sp = user_stack_pointer(regs); |
| 15 | *pc = instruction_pointer(regs); | 25 | *pc = instruction_pointer(regs); |
| @@ -18,6 +28,7 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 18 | if (*callno != -1L && maxargs > 0) | 28 | if (*callno != -1L && maxargs > 0) |
| 19 | syscall_get_arguments(target, regs, 0, maxargs, args); | 29 | syscall_get_arguments(target, regs, 0, maxargs, args); |
| 20 | 30 | ||
| 31 | put_task_stack(target); | ||
| 21 | return 0; | 32 | return 0; |
| 22 | } | 33 | } |
| 23 | 34 | ||
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 93f45011a59d..94346b4d8984 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -5485,6 +5485,7 @@ static struct sk_buff *populate_skb(char *buf, int size) | |||
| 5485 | skb->hash = SKB_HASH; | 5485 | skb->hash = SKB_HASH; |
| 5486 | skb->queue_mapping = SKB_QUEUE_MAP; | 5486 | skb->queue_mapping = SKB_QUEUE_MAP; |
| 5487 | skb->vlan_tci = SKB_VLAN_TCI; | 5487 | skb->vlan_tci = SKB_VLAN_TCI; |
| 5488 | skb->vlan_proto = htons(ETH_P_IP); | ||
| 5488 | skb->dev = &dev; | 5489 | skb->dev = &dev; |
| 5489 | skb->dev->ifindex = SKB_DEV_IFINDEX; | 5490 | skb->dev->ifindex = SKB_DEV_IFINDEX; |
| 5490 | skb->dev->type = SKB_DEV_TYPE; | 5491 | skb->dev->type = SKB_DEV_TYPE; |
diff --git a/lib/test_hash.c b/lib/test_hash.c index 66c5fc8351e8..cac20c5fb304 100644 --- a/lib/test_hash.c +++ b/lib/test_hash.c | |||
| @@ -143,7 +143,7 @@ static int __init | |||
| 143 | test_hash_init(void) | 143 | test_hash_init(void) |
| 144 | { | 144 | { |
| 145 | char buf[SIZE+1]; | 145 | char buf[SIZE+1]; |
| 146 | u32 string_or = 0, hash_or[2][33] = { 0 }; | 146 | u32 string_or = 0, hash_or[2][33] = { { 0, } }; |
| 147 | unsigned tests = 0; | 147 | unsigned tests = 0; |
| 148 | unsigned long long h64 = 0; | 148 | unsigned long long h64 = 0; |
| 149 | int i, j; | 149 | int i, j; |
| @@ -219,21 +219,27 @@ test_hash_init(void) | |||
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | /* Issue notices about skipped tests. */ | 221 | /* Issue notices about skipped tests. */ |
| 222 | #ifndef HAVE_ARCH__HASH_32 | 222 | #ifdef HAVE_ARCH__HASH_32 |
| 223 | pr_info("__hash_32() has no arch implementation to test."); | 223 | #if HAVE_ARCH__HASH_32 != 1 |
| 224 | #elif HAVE_ARCH__HASH_32 != 1 | ||
| 225 | pr_info("__hash_32() is arch-specific; not compared to generic."); | 224 | pr_info("__hash_32() is arch-specific; not compared to generic."); |
| 226 | #endif | 225 | #endif |
| 227 | #ifndef HAVE_ARCH_HASH_32 | 226 | #else |
| 228 | pr_info("hash_32() has no arch implementation to test."); | 227 | pr_info("__hash_32() has no arch implementation to test."); |
| 229 | #elif HAVE_ARCH_HASH_32 != 1 | 228 | #endif |
| 229 | #ifdef HAVE_ARCH_HASH_32 | ||
| 230 | #if HAVE_ARCH_HASH_32 != 1 | ||
| 230 | pr_info("hash_32() is arch-specific; not compared to generic."); | 231 | pr_info("hash_32() is arch-specific; not compared to generic."); |
| 231 | #endif | 232 | #endif |
| 232 | #ifndef HAVE_ARCH_HASH_64 | 233 | #else |
| 233 | pr_info("hash_64() has no arch implementation to test."); | 234 | pr_info("hash_32() has no arch implementation to test."); |
| 234 | #elif HAVE_ARCH_HASH_64 != 1 | 235 | #endif |
| 236 | #ifdef HAVE_ARCH_HASH_64 | ||
| 237 | #if HAVE_ARCH_HASH_64 != 1 | ||
| 235 | pr_info("hash_64() is arch-specific; not compared to generic."); | 238 | pr_info("hash_64() is arch-specific; not compared to generic."); |
| 236 | #endif | 239 | #endif |
| 240 | #else | ||
| 241 | pr_info("hash_64() has no arch implementation to test."); | ||
| 242 | #endif | ||
| 237 | 243 | ||
| 238 | pr_notice("%u tests passed.", tests); | 244 | pr_notice("%u tests passed.", tests); |
| 239 | 245 | ||
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 297fdb5e74bd..64e899b63337 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
| @@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); | |||
| 38 | 38 | ||
| 39 | static int max_size = 0; | 39 | static int max_size = 0; |
| 40 | module_param(max_size, int, 0); | 40 | module_param(max_size, int, 0); |
| 41 | MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); | 41 | MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)"); |
| 42 | 42 | ||
| 43 | static bool shrinking = false; | 43 | static bool shrinking = false; |
| 44 | module_param(shrinking, bool, 0); | 44 | module_param(shrinking, bool, 0); |
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c index f0b323abb4c6..ae8d2491133c 100644 --- a/lib/ucs2_string.c +++ b/lib/ucs2_string.c | |||
| @@ -56,7 +56,7 @@ ucs2_utf8size(const ucs2_char_t *src) | |||
| 56 | unsigned long i; | 56 | unsigned long i; |
| 57 | unsigned long j = 0; | 57 | unsigned long j = 0; |
| 58 | 58 | ||
| 59 | for (i = 0; i < ucs2_strlen(src); i++) { | 59 | for (i = 0; src[i]; i++) { |
| 60 | u16 c = src[i]; | 60 | u16 c = src[i]; |
| 61 | 61 | ||
| 62 | if (c >= 0x800) | 62 | if (c >= 0x800) |
diff --git a/lib/usercopy.c b/lib/usercopy.c deleted file mode 100644 index 4f5b1ddbcd25..000000000000 --- a/lib/usercopy.c +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | #include <linux/export.h> | ||
| 2 | #include <linux/bug.h> | ||
| 3 | #include <linux/uaccess.h> | ||
| 4 | |||
| 5 | void copy_from_user_overflow(void) | ||
| 6 | { | ||
| 7 | WARN(1, "Buffer overflow detected!\n"); | ||
| 8 | } | ||
| 9 | EXPORT_SYMBOL(copy_from_user_overflow); | ||
diff --git a/lib/win_minmax.c b/lib/win_minmax.c new file mode 100644 index 000000000000..c8420d404926 --- /dev/null +++ b/lib/win_minmax.c | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | /** | ||
| 2 | * lib/minmax.c: windowed min/max tracker | ||
| 3 | * | ||
| 4 | * Kathleen Nichols' algorithm for tracking the minimum (or maximum) | ||
| 5 | * value of a data stream over some fixed time interval. (E.g., | ||
| 6 | * the minimum RTT over the past five minutes.) It uses constant | ||
| 7 | * space and constant time per update yet almost always delivers | ||
| 8 | * the same minimum as an implementation that has to keep all the | ||
| 9 | * data in the window. | ||
| 10 | * | ||
| 11 | * The algorithm keeps track of the best, 2nd best & 3rd best min | ||
| 12 | * values, maintaining an invariant that the measurement time of | ||
| 13 | * the n'th best >= n-1'th best. It also makes sure that the three | ||
| 14 | * values are widely separated in the time window since that bounds | ||
| 15 | * the worse case error when that data is monotonically increasing | ||
| 16 | * over the window. | ||
| 17 | * | ||
| 18 | * Upon getting a new min, we can forget everything earlier because | ||
| 19 | * it has no value - the new min is <= everything else in the window | ||
| 20 | * by definition and it's the most recent. So we restart fresh on | ||
| 21 | * every new min and overwrites 2nd & 3rd choices. The same property | ||
| 22 | * holds for 2nd & 3rd best. | ||
| 23 | */ | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/win_minmax.h> | ||
| 26 | |||
| 27 | /* As time advances, update the 1st, 2nd, and 3rd choices. */ | ||
| 28 | static u32 minmax_subwin_update(struct minmax *m, u32 win, | ||
| 29 | const struct minmax_sample *val) | ||
| 30 | { | ||
| 31 | u32 dt = val->t - m->s[0].t; | ||
| 32 | |||
| 33 | if (unlikely(dt > win)) { | ||
| 34 | /* | ||
| 35 | * Passed entire window without a new val so make 2nd | ||
| 36 | * choice the new val & 3rd choice the new 2nd choice. | ||
| 37 | * we may have to iterate this since our 2nd choice | ||
| 38 | * may also be outside the window (we checked on entry | ||
| 39 | * that the third choice was in the window). | ||
| 40 | */ | ||
| 41 | m->s[0] = m->s[1]; | ||
| 42 | m->s[1] = m->s[2]; | ||
| 43 | m->s[2] = *val; | ||
| 44 | if (unlikely(val->t - m->s[0].t > win)) { | ||
| 45 | m->s[0] = m->s[1]; | ||
| 46 | m->s[1] = m->s[2]; | ||
| 47 | m->s[2] = *val; | ||
| 48 | } | ||
| 49 | } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { | ||
| 50 | /* | ||
| 51 | * We've passed a quarter of the window without a new val | ||
| 52 | * so take a 2nd choice from the 2nd quarter of the window. | ||
| 53 | */ | ||
| 54 | m->s[2] = m->s[1] = *val; | ||
| 55 | } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { | ||
| 56 | /* | ||
| 57 | * We've passed half the window without finding a new val | ||
| 58 | * so take a 3rd choice from the last half of the window | ||
| 59 | */ | ||
| 60 | m->s[2] = *val; | ||
| 61 | } | ||
| 62 | return m->s[0].v; | ||
| 63 | } | ||
| 64 | |||
| 65 | /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ | ||
| 66 | u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) | ||
| 67 | { | ||
| 68 | struct minmax_sample val = { .t = t, .v = meas }; | ||
| 69 | |||
| 70 | if (unlikely(val.v >= m->s[0].v) || /* found new max? */ | ||
| 71 | unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ | ||
| 72 | return minmax_reset(m, t, meas); /* forget earlier samples */ | ||
| 73 | |||
| 74 | if (unlikely(val.v >= m->s[1].v)) | ||
| 75 | m->s[2] = m->s[1] = val; | ||
| 76 | else if (unlikely(val.v >= m->s[2].v)) | ||
| 77 | m->s[2] = val; | ||
| 78 | |||
| 79 | return minmax_subwin_update(m, win, &val); | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(minmax_running_max); | ||
| 82 | |||
| 83 | /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ | ||
| 84 | u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) | ||
| 85 | { | ||
| 86 | struct minmax_sample val = { .t = t, .v = meas }; | ||
| 87 | |||
| 88 | if (unlikely(val.v <= m->s[0].v) || /* found new min? */ | ||
| 89 | unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ | ||
| 90 | return minmax_reset(m, t, meas); /* forget earlier samples */ | ||
| 91 | |||
| 92 | if (unlikely(val.v <= m->s[1].v)) | ||
| 93 | m->s[2] = m->s[1] = val; | ||
| 94 | else if (unlikely(val.v <= m->s[2].v)) | ||
| 95 | m->s[2] = val; | ||
| 96 | |||
| 97 | return minmax_subwin_update(m, win, &val); | ||
| 98 | } | ||
