diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 46 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 22 | ||||
| -rw-r--r-- | lib/Makefile | 4 | ||||
| -rw-r--r-- | lib/bitmap.c | 5 | ||||
| -rw-r--r-- | lib/bucket_locks.c | 5 | ||||
| -rw-r--r-- | lib/dma-debug.c | 65 | ||||
| -rw-r--r-- | lib/dma-direct.c | 29 | ||||
| -rw-r--r-- | lib/dma-noncoherent.c | 102 | ||||
| -rw-r--r-- | lib/idr.c | 10 | ||||
| -rw-r--r-- | lib/iommu-common.c | 267 | ||||
| -rw-r--r-- | lib/iommu-helper.c | 14 | ||||
| -rw-r--r-- | lib/iov_iter.c | 61 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 178 | ||||
| -rw-r--r-- | lib/mpi/mpi-internal.h | 75 | ||||
| -rw-r--r-- | lib/percpu_ida.c | 63 | ||||
| -rw-r--r-- | lib/reed_solomon/decode_rs.c | 34 | ||||
| -rw-r--r-- | lib/reed_solomon/encode_rs.c | 15 | ||||
| -rw-r--r-- | lib/reed_solomon/reed_solomon.c | 240 | ||||
| -rw-r--r-- | lib/rhashtable.c | 51 | ||||
| -rw-r--r-- | lib/sbitmap.c | 113 | ||||
| -rw-r--r-- | lib/swiotlb.c | 11 | ||||
| -rw-r--r-- | lib/test_bpf.c | 595 | ||||
| -rw-r--r-- | lib/test_overflow.c | 417 | ||||
| -rw-r--r-- | lib/test_printf.c | 2 | ||||
| -rw-r--r-- | lib/ucs2_string.c | 2 | ||||
| -rw-r--r-- | lib/vsprintf.c | 133 |
26 files changed, 1620 insertions, 939 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5fe577673b98..abc111eb5054 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -429,15 +429,50 @@ config SGL_ALLOC | |||
| 429 | bool | 429 | bool |
| 430 | default n | 430 | default n |
| 431 | 431 | ||
| 432 | config NEED_SG_DMA_LENGTH | ||
| 433 | bool | ||
| 434 | |||
| 435 | config NEED_DMA_MAP_STATE | ||
| 436 | bool | ||
| 437 | |||
| 438 | config ARCH_DMA_ADDR_T_64BIT | ||
| 439 | def_bool 64BIT || PHYS_ADDR_T_64BIT | ||
| 440 | |||
| 441 | config IOMMU_HELPER | ||
| 442 | bool | ||
| 443 | |||
| 444 | config ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
| 445 | bool | ||
| 446 | |||
| 447 | config ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 448 | bool | ||
| 449 | select NEED_DMA_MAP_STATE | ||
| 450 | |||
| 432 | config DMA_DIRECT_OPS | 451 | config DMA_DIRECT_OPS |
| 433 | bool | 452 | bool |
| 434 | depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) | 453 | depends on HAS_DMA |
| 435 | default n | 454 | |
| 455 | config DMA_NONCOHERENT_OPS | ||
| 456 | bool | ||
| 457 | depends on HAS_DMA | ||
| 458 | select DMA_DIRECT_OPS | ||
| 459 | |||
| 460 | config DMA_NONCOHERENT_MMAP | ||
| 461 | bool | ||
| 462 | depends on DMA_NONCOHERENT_OPS | ||
| 463 | |||
| 464 | config DMA_NONCOHERENT_CACHE_SYNC | ||
| 465 | bool | ||
| 466 | depends on DMA_NONCOHERENT_OPS | ||
| 436 | 467 | ||
| 437 | config DMA_VIRT_OPS | 468 | config DMA_VIRT_OPS |
| 438 | bool | 469 | bool |
| 439 | depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) | 470 | depends on HAS_DMA |
| 440 | default n | 471 | |
| 472 | config SWIOTLB | ||
| 473 | bool | ||
| 474 | select DMA_DIRECT_OPS | ||
| 475 | select NEED_DMA_MAP_STATE | ||
| 441 | 476 | ||
| 442 | config CHECK_SIGNATURE | 477 | config CHECK_SIGNATURE |
| 443 | bool | 478 | bool |
| @@ -586,6 +621,9 @@ config ARCH_HAS_PMEM_API | |||
| 586 | config ARCH_HAS_UACCESS_FLUSHCACHE | 621 | config ARCH_HAS_UACCESS_FLUSHCACHE |
| 587 | bool | 622 | bool |
| 588 | 623 | ||
| 624 | config ARCH_HAS_UACCESS_MCSAFE | ||
| 625 | bool | ||
| 626 | |||
| 589 | config STACKDEPOT | 627 | config STACKDEPOT |
| 590 | bool | 628 | bool |
| 591 | select STACKTRACE | 629 | select STACKTRACE |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c40c7b734cd1..eb885942eb0f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1634,7 +1634,7 @@ config PROVIDE_OHCI1394_DMA_INIT | |||
| 1634 | 1634 | ||
| 1635 | config DMA_API_DEBUG | 1635 | config DMA_API_DEBUG |
| 1636 | bool "Enable debugging of DMA-API usage" | 1636 | bool "Enable debugging of DMA-API usage" |
| 1637 | depends on HAVE_DMA_API_DEBUG | 1637 | select NEED_DMA_MAP_STATE |
| 1638 | help | 1638 | help |
| 1639 | Enable this option to debug the use of the DMA API by device drivers. | 1639 | Enable this option to debug the use of the DMA API by device drivers. |
| 1640 | With this option you will be able to detect common bugs in device | 1640 | With this option you will be able to detect common bugs in device |
| @@ -1651,6 +1651,23 @@ config DMA_API_DEBUG | |||
| 1651 | 1651 | ||
| 1652 | If unsure, say N. | 1652 | If unsure, say N. |
| 1653 | 1653 | ||
| 1654 | config DMA_API_DEBUG_SG | ||
| 1655 | bool "Debug DMA scatter-gather usage" | ||
| 1656 | default y | ||
| 1657 | depends on DMA_API_DEBUG | ||
| 1658 | help | ||
| 1659 | Perform extra checking that callers of dma_map_sg() have respected the | ||
| 1660 | appropriate segment length/boundary limits for the given device when | ||
| 1661 | preparing DMA scatterlists. | ||
| 1662 | |||
| 1663 | This is particularly likely to have been overlooked in cases where the | ||
| 1664 | dma_map_sg() API is used for general bulk mapping of pages rather than | ||
| 1665 | preparing literal scatter-gather descriptors, where there is a risk of | ||
| 1666 | unexpected behaviour from DMA API implementations if the scatterlist | ||
| 1667 | is technically out-of-spec. | ||
| 1668 | |||
| 1669 | If unsure, say N. | ||
| 1670 | |||
| 1654 | menuconfig RUNTIME_TESTING_MENU | 1671 | menuconfig RUNTIME_TESTING_MENU |
| 1655 | bool "Runtime Testing" | 1672 | bool "Runtime Testing" |
| 1656 | def_bool y | 1673 | def_bool y |
| @@ -1785,6 +1802,9 @@ config TEST_BITMAP | |||
| 1785 | config TEST_UUID | 1802 | config TEST_UUID |
| 1786 | tristate "Test functions located in the uuid module at runtime" | 1803 | tristate "Test functions located in the uuid module at runtime" |
| 1787 | 1804 | ||
| 1805 | config TEST_OVERFLOW | ||
| 1806 | tristate "Test check_*_overflow() functions at runtime" | ||
| 1807 | |||
| 1788 | config TEST_RHASHTABLE | 1808 | config TEST_RHASHTABLE |
| 1789 | tristate "Perform selftest on resizable hash table" | 1809 | tristate "Perform selftest on resizable hash table" |
| 1790 | default n | 1810 | default n |
diff --git a/lib/Makefile b/lib/Makefile index ce20696d5a92..84c6dcb31fbb 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o | |||
| 30 | lib-$(CONFIG_MMU) += ioremap.o | 30 | lib-$(CONFIG_MMU) += ioremap.o |
| 31 | lib-$(CONFIG_SMP) += cpumask.o | 31 | lib-$(CONFIG_SMP) += cpumask.o |
| 32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o | 32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o |
| 33 | lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o | ||
| 33 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o | 34 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o |
| 34 | 35 | ||
| 35 | lib-y += kobject.o klist.o | 36 | lib-y += kobject.o klist.o |
| @@ -59,6 +60,7 @@ UBSAN_SANITIZE_test_ubsan.o := y | |||
| 59 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 60 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| 60 | obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o | 61 | obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o |
| 61 | obj-$(CONFIG_TEST_LKM) += test_module.o | 62 | obj-$(CONFIG_TEST_LKM) += test_module.o |
| 63 | obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o | ||
| 62 | obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o | 64 | obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o |
| 63 | obj-$(CONFIG_TEST_SORT) += test_sort.o | 65 | obj-$(CONFIG_TEST_SORT) += test_sort.o |
| 64 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | 66 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o |
| @@ -147,7 +149,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o | |||
| 147 | obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o | 149 | obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o |
| 148 | 150 | ||
| 149 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 151 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
| 150 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o | 152 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o |
| 151 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | 153 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o |
| 152 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o | 154 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o |
| 153 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o | 155 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index a42eff7e8c48..58f9750e49c6 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -64,12 +64,9 @@ EXPORT_SYMBOL(__bitmap_equal); | |||
| 64 | 64 | ||
| 65 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) | 65 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) |
| 66 | { | 66 | { |
| 67 | unsigned int k, lim = bits/BITS_PER_LONG; | 67 | unsigned int k, lim = BITS_TO_LONGS(bits); |
| 68 | for (k = 0; k < lim; ++k) | 68 | for (k = 0; k < lim; ++k) |
| 69 | dst[k] = ~src[k]; | 69 | dst[k] = ~src[k]; |
| 70 | |||
| 71 | if (bits % BITS_PER_LONG) | ||
| 72 | dst[k] = ~src[k]; | ||
| 73 | } | 70 | } |
| 74 | EXPORT_SYMBOL(__bitmap_complement); | 71 | EXPORT_SYMBOL(__bitmap_complement); |
| 75 | 72 | ||
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c index 266a97c5708b..ade3ce6c4af6 100644 --- a/lib/bucket_locks.c +++ b/lib/bucket_locks.c | |||
| @@ -30,10 +30,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, | |||
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | if (sizeof(spinlock_t) != 0) { | 32 | if (sizeof(spinlock_t) != 0) { |
| 33 | if (gfpflags_allow_blocking(gfp)) | 33 | tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); |
| 34 | tlocks = kvmalloc(size * sizeof(spinlock_t), gfp); | ||
| 35 | else | ||
| 36 | tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp); | ||
| 37 | if (!tlocks) | 34 | if (!tlocks) |
| 38 | return -ENOMEM; | 35 | return -ENOMEM; |
| 39 | for (i = 0; i < size; i++) | 36 | for (i = 0; i < size; i++) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 7f5cdc1e6b29..c007d25bee09 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -41,6 +41,11 @@ | |||
| 41 | #define HASH_FN_SHIFT 13 | 41 | #define HASH_FN_SHIFT 13 |
| 42 | #define HASH_FN_MASK (HASH_SIZE - 1) | 42 | #define HASH_FN_MASK (HASH_SIZE - 1) |
| 43 | 43 | ||
| 44 | /* allow architectures to override this if absolutely required */ | ||
| 45 | #ifndef PREALLOC_DMA_DEBUG_ENTRIES | ||
| 46 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
| 47 | #endif | ||
| 48 | |||
| 44 | enum { | 49 | enum { |
| 45 | dma_debug_single, | 50 | dma_debug_single, |
| 46 | dma_debug_page, | 51 | dma_debug_page, |
| @@ -127,7 +132,7 @@ static u32 min_free_entries; | |||
| 127 | static u32 nr_total_entries; | 132 | static u32 nr_total_entries; |
| 128 | 133 | ||
| 129 | /* number of preallocated entries requested by kernel cmdline */ | 134 | /* number of preallocated entries requested by kernel cmdline */ |
| 130 | static u32 req_entries; | 135 | static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
| 131 | 136 | ||
| 132 | /* debugfs dentry's for the stuff above */ | 137 | /* debugfs dentry's for the stuff above */ |
| 133 | static struct dentry *dma_debug_dent __read_mostly; | 138 | static struct dentry *dma_debug_dent __read_mostly; |
| @@ -439,7 +444,6 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 439 | spin_unlock_irqrestore(&bucket->lock, flags); | 444 | spin_unlock_irqrestore(&bucket->lock, flags); |
| 440 | } | 445 | } |
| 441 | } | 446 | } |
| 442 | EXPORT_SYMBOL(debug_dma_dump_mappings); | ||
| 443 | 447 | ||
| 444 | /* | 448 | /* |
| 445 | * For each mapping (initial cacheline in the case of | 449 | * For each mapping (initial cacheline in the case of |
| @@ -748,7 +752,6 @@ int dma_debug_resize_entries(u32 num_entries) | |||
| 748 | 752 | ||
| 749 | return ret; | 753 | return ret; |
| 750 | } | 754 | } |
| 751 | EXPORT_SYMBOL(dma_debug_resize_entries); | ||
| 752 | 755 | ||
| 753 | /* | 756 | /* |
| 754 | * DMA-API debugging init code | 757 | * DMA-API debugging init code |
| @@ -1004,10 +1007,7 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
| 1004 | bus_register_notifier(bus, nb); | 1007 | bus_register_notifier(bus, nb); |
| 1005 | } | 1008 | } |
| 1006 | 1009 | ||
| 1007 | /* | 1010 | static int dma_debug_init(void) |
| 1008 | * Let the architectures decide how many entries should be preallocated. | ||
| 1009 | */ | ||
| 1010 | void dma_debug_init(u32 num_entries) | ||
| 1011 | { | 1011 | { |
| 1012 | int i; | 1012 | int i; |
| 1013 | 1013 | ||
| @@ -1015,7 +1015,7 @@ void dma_debug_init(u32 num_entries) | |||
| 1015 | * called to set dma_debug_initialized | 1015 | * called to set dma_debug_initialized |
| 1016 | */ | 1016 | */ |
| 1017 | if (global_disable) | 1017 | if (global_disable) |
| 1018 | return; | 1018 | return 0; |
| 1019 | 1019 | ||
| 1020 | for (i = 0; i < HASH_SIZE; ++i) { | 1020 | for (i = 0; i < HASH_SIZE; ++i) { |
| 1021 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | 1021 | INIT_LIST_HEAD(&dma_entry_hash[i].list); |
| @@ -1026,17 +1026,14 @@ void dma_debug_init(u32 num_entries) | |||
| 1026 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); | 1026 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
| 1027 | global_disable = true; | 1027 | global_disable = true; |
| 1028 | 1028 | ||
| 1029 | return; | 1029 | return 0; |
| 1030 | } | 1030 | } |
| 1031 | 1031 | ||
| 1032 | if (req_entries) | 1032 | if (prealloc_memory(nr_prealloc_entries) != 0) { |
| 1033 | num_entries = req_entries; | ||
| 1034 | |||
| 1035 | if (prealloc_memory(num_entries) != 0) { | ||
| 1036 | pr_err("DMA-API: debugging out of memory error - disabled\n"); | 1033 | pr_err("DMA-API: debugging out of memory error - disabled\n"); |
| 1037 | global_disable = true; | 1034 | global_disable = true; |
| 1038 | 1035 | ||
| 1039 | return; | 1036 | return 0; |
| 1040 | } | 1037 | } |
| 1041 | 1038 | ||
| 1042 | nr_total_entries = num_free_entries; | 1039 | nr_total_entries = num_free_entries; |
| @@ -1044,7 +1041,9 @@ void dma_debug_init(u32 num_entries) | |||
| 1044 | dma_debug_initialized = true; | 1041 | dma_debug_initialized = true; |
| 1045 | 1042 | ||
| 1046 | pr_info("DMA-API: debugging enabled by kernel config\n"); | 1043 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
| 1044 | return 0; | ||
| 1047 | } | 1045 | } |
| 1046 | core_initcall(dma_debug_init); | ||
| 1048 | 1047 | ||
| 1049 | static __init int dma_debug_cmdline(char *str) | 1048 | static __init int dma_debug_cmdline(char *str) |
| 1050 | { | 1049 | { |
| @@ -1061,16 +1060,10 @@ static __init int dma_debug_cmdline(char *str) | |||
| 1061 | 1060 | ||
| 1062 | static __init int dma_debug_entries_cmdline(char *str) | 1061 | static __init int dma_debug_entries_cmdline(char *str) |
| 1063 | { | 1062 | { |
| 1064 | int res; | ||
| 1065 | |||
| 1066 | if (!str) | 1063 | if (!str) |
| 1067 | return -EINVAL; | 1064 | return -EINVAL; |
| 1068 | 1065 | if (!get_option(&str, &nr_prealloc_entries)) | |
| 1069 | res = get_option(&str, &req_entries); | 1066 | nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
| 1070 | |||
| 1071 | if (!res) | ||
| 1072 | req_entries = 0; | ||
| 1073 | |||
| 1074 | return 0; | 1067 | return 0; |
| 1075 | } | 1068 | } |
| 1076 | 1069 | ||
| @@ -1293,6 +1286,32 @@ out: | |||
| 1293 | put_hash_bucket(bucket, &flags); | 1286 | put_hash_bucket(bucket, &flags); |
| 1294 | } | 1287 | } |
| 1295 | 1288 | ||
| 1289 | static void check_sg_segment(struct device *dev, struct scatterlist *sg) | ||
| 1290 | { | ||
| 1291 | #ifdef CONFIG_DMA_API_DEBUG_SG | ||
| 1292 | unsigned int max_seg = dma_get_max_seg_size(dev); | ||
| 1293 | u64 start, end, boundary = dma_get_seg_boundary(dev); | ||
| 1294 | |||
| 1295 | /* | ||
| 1296 | * Either the driver forgot to set dma_parms appropriately, or | ||
| 1297 | * whoever generated the list forgot to check them. | ||
| 1298 | */ | ||
| 1299 | if (sg->length > max_seg) | ||
| 1300 | err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", | ||
| 1301 | sg->length, max_seg); | ||
| 1302 | /* | ||
| 1303 | * In some cases this could potentially be the DMA API | ||
| 1304 | * implementation's fault, but it would usually imply that | ||
| 1305 | * the scatterlist was built inappropriately to begin with. | ||
| 1306 | */ | ||
| 1307 | start = sg_dma_address(sg); | ||
| 1308 | end = start + sg_dma_len(sg) - 1; | ||
| 1309 | if ((start ^ end) & ~boundary) | ||
| 1310 | err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", | ||
| 1311 | start, end, boundary); | ||
| 1312 | #endif | ||
| 1313 | } | ||
| 1314 | |||
| 1296 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | 1315 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
| 1297 | size_t size, int direction, dma_addr_t dma_addr, | 1316 | size_t size, int direction, dma_addr_t dma_addr, |
| 1298 | bool map_single) | 1317 | bool map_single) |
| @@ -1423,6 +1442,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1423 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); | 1442 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
| 1424 | } | 1443 | } |
| 1425 | 1444 | ||
| 1445 | check_sg_segment(dev, s); | ||
| 1446 | |||
| 1426 | add_dma_entry(entry); | 1447 | add_dma_entry(entry); |
| 1427 | } | 1448 | } |
| 1428 | } | 1449 | } |
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index bbfb229aa067..8be8106270c2 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
| @@ -34,6 +34,13 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 34 | const char *caller) | 34 | const char *caller) |
| 35 | { | 35 | { |
| 36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { | 36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { |
| 37 | if (!dev->dma_mask) { | ||
| 38 | dev_err(dev, | ||
| 39 | "%s: call on device without dma_mask\n", | ||
| 40 | caller); | ||
| 41 | return false; | ||
| 42 | } | ||
| 43 | |||
| 37 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { | 44 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { |
| 38 | dev_err(dev, | 45 | dev_err(dev, |
| 39 | "%s: overflow %pad+%zu of device mask %llx\n", | 46 | "%s: overflow %pad+%zu of device mask %llx\n", |
| @@ -84,6 +91,13 @@ again: | |||
| 84 | __free_pages(page, page_order); | 91 | __free_pages(page, page_order); |
| 85 | page = NULL; | 92 | page = NULL; |
| 86 | 93 | ||
| 94 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && | ||
| 95 | dev->coherent_dma_mask < DMA_BIT_MASK(64) && | ||
| 96 | !(gfp & (GFP_DMA32 | GFP_DMA))) { | ||
| 97 | gfp |= GFP_DMA32; | ||
| 98 | goto again; | ||
| 99 | } | ||
| 100 | |||
| 87 | if (IS_ENABLED(CONFIG_ZONE_DMA) && | 101 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
| 88 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && | 102 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && |
| 89 | !(gfp & GFP_DMA)) { | 103 | !(gfp & GFP_DMA)) { |
| @@ -121,7 +135,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | |||
| 121 | free_pages((unsigned long)cpu_addr, page_order); | 135 | free_pages((unsigned long)cpu_addr, page_order); |
| 122 | } | 136 | } |
| 123 | 137 | ||
| 124 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | 138 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
| 125 | unsigned long offset, size_t size, enum dma_data_direction dir, | 139 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 126 | unsigned long attrs) | 140 | unsigned long attrs) |
| 127 | { | 141 | { |
| @@ -132,8 +146,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | |||
| 132 | return dma_addr; | 146 | return dma_addr; |
| 133 | } | 147 | } |
| 134 | 148 | ||
| 135 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 149 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 136 | int nents, enum dma_data_direction dir, unsigned long attrs) | 150 | enum dma_data_direction dir, unsigned long attrs) |
| 137 | { | 151 | { |
| 138 | int i; | 152 | int i; |
| 139 | struct scatterlist *sg; | 153 | struct scatterlist *sg; |
| @@ -165,10 +179,16 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
| 165 | if (mask < DMA_BIT_MASK(32)) | 179 | if (mask < DMA_BIT_MASK(32)) |
| 166 | return 0; | 180 | return 0; |
| 167 | #endif | 181 | #endif |
| 182 | /* | ||
| 183 | * Various PCI/PCIe bridges have broken support for > 32bit DMA even | ||
| 184 | * if the device itself might support it. | ||
| 185 | */ | ||
| 186 | if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) | ||
| 187 | return 0; | ||
| 168 | return 1; | 188 | return 1; |
| 169 | } | 189 | } |
| 170 | 190 | ||
| 171 | static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) | 191 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 172 | { | 192 | { |
| 173 | return dma_addr == DIRECT_MAPPING_ERROR; | 193 | return dma_addr == DIRECT_MAPPING_ERROR; |
| 174 | } | 194 | } |
| @@ -180,6 +200,5 @@ const struct dma_map_ops dma_direct_ops = { | |||
| 180 | .map_sg = dma_direct_map_sg, | 200 | .map_sg = dma_direct_map_sg, |
| 181 | .dma_supported = dma_direct_supported, | 201 | .dma_supported = dma_direct_supported, |
| 182 | .mapping_error = dma_direct_mapping_error, | 202 | .mapping_error = dma_direct_mapping_error, |
| 183 | .is_phys = 1, | ||
| 184 | }; | 203 | }; |
| 185 | EXPORT_SYMBOL(dma_direct_ops); | 204 | EXPORT_SYMBOL(dma_direct_ops); |
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/lib/dma-noncoherent.c | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2018 Christoph Hellwig. | ||
| 4 | * | ||
| 5 | * DMA operations that map physical memory directly without providing cache | ||
| 6 | * coherence. | ||
| 7 | */ | ||
| 8 | #include <linux/export.h> | ||
| 9 | #include <linux/mm.h> | ||
| 10 | #include <linux/dma-direct.h> | ||
| 11 | #include <linux/dma-noncoherent.h> | ||
| 12 | #include <linux/scatterlist.h> | ||
| 13 | |||
| 14 | static void dma_noncoherent_sync_single_for_device(struct device *dev, | ||
| 15 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
| 16 | { | ||
| 17 | arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); | ||
| 18 | } | ||
| 19 | |||
| 20 | static void dma_noncoherent_sync_sg_for_device(struct device *dev, | ||
| 21 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
| 22 | { | ||
| 23 | struct scatterlist *sg; | ||
| 24 | int i; | ||
| 25 | |||
| 26 | for_each_sg(sgl, sg, nents, i) | ||
| 27 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||
| 28 | } | ||
| 29 | |||
| 30 | static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, | ||
| 31 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
| 32 | unsigned long attrs) | ||
| 33 | { | ||
| 34 | dma_addr_t addr; | ||
| 35 | |||
| 36 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); | ||
| 37 | if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 38 | arch_sync_dma_for_device(dev, page_to_phys(page) + offset, | ||
| 39 | size, dir); | ||
| 40 | return addr; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, | ||
| 44 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
| 45 | { | ||
| 46 | nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); | ||
| 47 | if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 48 | dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); | ||
| 49 | return nents; | ||
| 50 | } | ||
| 51 | |||
| 52 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 53 | static void dma_noncoherent_sync_single_for_cpu(struct device *dev, | ||
| 54 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
| 55 | { | ||
| 56 | arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); | ||
| 57 | } | ||
| 58 | |||
| 59 | static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, | ||
| 60 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
| 61 | { | ||
| 62 | struct scatterlist *sg; | ||
| 63 | int i; | ||
| 64 | |||
| 65 | for_each_sg(sgl, sg, nents, i) | ||
| 66 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||
| 67 | } | ||
| 68 | |||
| 69 | static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, | ||
| 70 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
| 71 | { | ||
| 72 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 73 | dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||
| 77 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
| 78 | { | ||
| 79 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 80 | dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); | ||
| 81 | } | ||
| 82 | #endif | ||
| 83 | |||
| 84 | const struct dma_map_ops dma_noncoherent_ops = { | ||
| 85 | .alloc = arch_dma_alloc, | ||
| 86 | .free = arch_dma_free, | ||
| 87 | .mmap = arch_dma_mmap, | ||
| 88 | .sync_single_for_device = dma_noncoherent_sync_single_for_device, | ||
| 89 | .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, | ||
| 90 | .map_page = dma_noncoherent_map_page, | ||
| 91 | .map_sg = dma_noncoherent_map_sg, | ||
| 92 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
| 93 | .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, | ||
| 94 | .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, | ||
| 95 | .unmap_page = dma_noncoherent_unmap_page, | ||
| 96 | .unmap_sg = dma_noncoherent_unmap_sg, | ||
| 97 | #endif | ||
| 98 | .dma_supported = dma_direct_supported, | ||
| 99 | .mapping_error = dma_direct_mapping_error, | ||
| 100 | .cache_sync = arch_dma_cache_sync, | ||
| 101 | }; | ||
| 102 | EXPORT_SYMBOL(dma_noncoherent_ops); | ||
| @@ -4,9 +4,9 @@ | |||
| 4 | #include <linux/idr.h> | 4 | #include <linux/idr.h> |
| 5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
| 7 | #include <linux/xarray.h> | ||
| 7 | 8 | ||
| 8 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); | 9 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); |
| 9 | static DEFINE_SPINLOCK(simple_ida_lock); | ||
| 10 | 10 | ||
| 11 | /** | 11 | /** |
| 12 | * idr_alloc_u32() - Allocate an ID. | 12 | * idr_alloc_u32() - Allocate an ID. |
| @@ -581,7 +581,7 @@ again: | |||
| 581 | if (!ida_pre_get(ida, gfp_mask)) | 581 | if (!ida_pre_get(ida, gfp_mask)) |
| 582 | return -ENOMEM; | 582 | return -ENOMEM; |
| 583 | 583 | ||
| 584 | spin_lock_irqsave(&simple_ida_lock, flags); | 584 | xa_lock_irqsave(&ida->ida_rt, flags); |
| 585 | ret = ida_get_new_above(ida, start, &id); | 585 | ret = ida_get_new_above(ida, start, &id); |
| 586 | if (!ret) { | 586 | if (!ret) { |
| 587 | if (id > max) { | 587 | if (id > max) { |
| @@ -591,7 +591,7 @@ again: | |||
| 591 | ret = id; | 591 | ret = id; |
| 592 | } | 592 | } |
| 593 | } | 593 | } |
| 594 | spin_unlock_irqrestore(&simple_ida_lock, flags); | 594 | xa_unlock_irqrestore(&ida->ida_rt, flags); |
| 595 | 595 | ||
| 596 | if (unlikely(ret == -EAGAIN)) | 596 | if (unlikely(ret == -EAGAIN)) |
| 597 | goto again; | 597 | goto again; |
| @@ -615,8 +615,8 @@ void ida_simple_remove(struct ida *ida, unsigned int id) | |||
| 615 | unsigned long flags; | 615 | unsigned long flags; |
| 616 | 616 | ||
| 617 | BUG_ON((int)id < 0); | 617 | BUG_ON((int)id < 0); |
| 618 | spin_lock_irqsave(&simple_ida_lock, flags); | 618 | xa_lock_irqsave(&ida->ida_rt, flags); |
| 619 | ida_remove(ida, id); | 619 | ida_remove(ida, id); |
| 620 | spin_unlock_irqrestore(&simple_ida_lock, flags); | 620 | xa_unlock_irqrestore(&ida->ida_rt, flags); |
| 621 | } | 621 | } |
| 622 | EXPORT_SYMBOL(ida_simple_remove); | 622 | EXPORT_SYMBOL(ida_simple_remove); |
diff --git a/lib/iommu-common.c b/lib/iommu-common.c deleted file mode 100644 index 55b00de106b5..000000000000 --- a/lib/iommu-common.c +++ /dev/null | |||
| @@ -1,267 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * IOMMU mmap management and range allocation functions. | ||
| 4 | * Based almost entirely upon the powerpc iommu allocator. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/export.h> | ||
| 8 | #include <linux/bitmap.h> | ||
| 9 | #include <linux/bug.h> | ||
| 10 | #include <linux/iommu-helper.h> | ||
| 11 | #include <linux/iommu-common.h> | ||
| 12 | #include <linux/dma-mapping.h> | ||
| 13 | #include <linux/hash.h> | ||
| 14 | |||
| 15 | static unsigned long iommu_large_alloc = 15; | ||
| 16 | |||
| 17 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); | ||
| 18 | |||
| 19 | static inline bool need_flush(struct iommu_map_table *iommu) | ||
| 20 | { | ||
| 21 | return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline void set_flush(struct iommu_map_table *iommu) | ||
| 25 | { | ||
| 26 | iommu->flags |= IOMMU_NEED_FLUSH; | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline void clear_flush(struct iommu_map_table *iommu) | ||
| 30 | { | ||
| 31 | iommu->flags &= ~IOMMU_NEED_FLUSH; | ||
| 32 | } | ||
| 33 | |||
| 34 | static void setup_iommu_pool_hash(void) | ||
| 35 | { | ||
| 36 | unsigned int i; | ||
| 37 | static bool do_once; | ||
| 38 | |||
| 39 | if (do_once) | ||
| 40 | return; | ||
| 41 | do_once = true; | ||
| 42 | for_each_possible_cpu(i) | ||
| 43 | per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Initialize iommu_pool entries for the iommu_map_table. `num_entries' | ||
| 48 | * is the number of table entries. If `large_pool' is set to true, | ||
| 49 | * the top 1/4 of the table will be set aside for pool allocations | ||
| 50 | * of more than iommu_large_alloc pages. | ||
| 51 | */ | ||
| 52 | void iommu_tbl_pool_init(struct iommu_map_table *iommu, | ||
| 53 | unsigned long num_entries, | ||
| 54 | u32 table_shift, | ||
| 55 | void (*lazy_flush)(struct iommu_map_table *), | ||
| 56 | bool large_pool, u32 npools, | ||
| 57 | bool skip_span_boundary_check) | ||
| 58 | { | ||
| 59 | unsigned int start, i; | ||
| 60 | struct iommu_pool *p = &(iommu->large_pool); | ||
| 61 | |||
| 62 | setup_iommu_pool_hash(); | ||
| 63 | if (npools == 0) | ||
| 64 | iommu->nr_pools = IOMMU_NR_POOLS; | ||
| 65 | else | ||
| 66 | iommu->nr_pools = npools; | ||
| 67 | BUG_ON(npools > IOMMU_NR_POOLS); | ||
| 68 | |||
| 69 | iommu->table_shift = table_shift; | ||
| 70 | iommu->lazy_flush = lazy_flush; | ||
| 71 | start = 0; | ||
| 72 | if (skip_span_boundary_check) | ||
| 73 | iommu->flags |= IOMMU_NO_SPAN_BOUND; | ||
| 74 | if (large_pool) | ||
| 75 | iommu->flags |= IOMMU_HAS_LARGE_POOL; | ||
| 76 | |||
| 77 | if (!large_pool) | ||
| 78 | iommu->poolsize = num_entries/iommu->nr_pools; | ||
| 79 | else | ||
| 80 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; | ||
| 81 | for (i = 0; i < iommu->nr_pools; i++) { | ||
| 82 | spin_lock_init(&(iommu->pools[i].lock)); | ||
| 83 | iommu->pools[i].start = start; | ||
| 84 | iommu->pools[i].hint = start; | ||
| 85 | start += iommu->poolsize; /* start for next pool */ | ||
| 86 | iommu->pools[i].end = start - 1; | ||
| 87 | } | ||
| 88 | if (!large_pool) | ||
| 89 | return; | ||
| 90 | /* initialize large_pool */ | ||
| 91 | spin_lock_init(&(p->lock)); | ||
| 92 | p->start = start; | ||
| 93 | p->hint = p->start; | ||
| 94 | p->end = num_entries; | ||
| 95 | } | ||
| 96 | EXPORT_SYMBOL(iommu_tbl_pool_init); | ||
| 97 | |||
| 98 | unsigned long iommu_tbl_range_alloc(struct device *dev, | ||
| 99 | struct iommu_map_table *iommu, | ||
| 100 | unsigned long npages, | ||
| 101 | unsigned long *handle, | ||
| 102 | unsigned long mask, | ||
| 103 | unsigned int align_order) | ||
| 104 | { | ||
| 105 | unsigned int pool_hash = __this_cpu_read(iommu_hash_common); | ||
| 106 | unsigned long n, end, start, limit, boundary_size; | ||
| 107 | struct iommu_pool *pool; | ||
| 108 | int pass = 0; | ||
| 109 | unsigned int pool_nr; | ||
| 110 | unsigned int npools = iommu->nr_pools; | ||
| 111 | unsigned long flags; | ||
| 112 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 113 | bool largealloc = (large_pool && npages > iommu_large_alloc); | ||
| 114 | unsigned long shift; | ||
| 115 | unsigned long align_mask = 0; | ||
| 116 | |||
| 117 | if (align_order > 0) | ||
| 118 | align_mask = ~0ul >> (BITS_PER_LONG - align_order); | ||
| 119 | |||
| 120 | /* Sanity check */ | ||
| 121 | if (unlikely(npages == 0)) { | ||
| 122 | WARN_ON_ONCE(1); | ||
| 123 | return IOMMU_ERROR_CODE; | ||
| 124 | } | ||
| 125 | |||
| 126 | if (largealloc) { | ||
| 127 | pool = &(iommu->large_pool); | ||
| 128 | pool_nr = 0; /* to keep compiler happy */ | ||
| 129 | } else { | ||
| 130 | /* pick out pool_nr */ | ||
| 131 | pool_nr = pool_hash & (npools - 1); | ||
| 132 | pool = &(iommu->pools[pool_nr]); | ||
| 133 | } | ||
| 134 | spin_lock_irqsave(&pool->lock, flags); | ||
| 135 | |||
| 136 | again: | ||
| 137 | if (pass == 0 && handle && *handle && | ||
| 138 | (*handle >= pool->start) && (*handle < pool->end)) | ||
| 139 | start = *handle; | ||
| 140 | else | ||
| 141 | start = pool->hint; | ||
| 142 | |||
| 143 | limit = pool->end; | ||
| 144 | |||
| 145 | /* The case below can happen if we have a small segment appended | ||
| 146 | * to a large, or when the previous alloc was at the very end of | ||
| 147 | * the available space. If so, go back to the beginning. If a | ||
| 148 | * flush is needed, it will get done based on the return value | ||
| 149 | * from iommu_area_alloc() below. | ||
| 150 | */ | ||
| 151 | if (start >= limit) | ||
| 152 | start = pool->start; | ||
| 153 | shift = iommu->table_map_base >> iommu->table_shift; | ||
| 154 | if (limit + shift > mask) { | ||
| 155 | limit = mask - shift + 1; | ||
| 156 | /* If we're constrained on address range, first try | ||
| 157 | * at the masked hint to avoid O(n) search complexity, | ||
| 158 | * but on second pass, start at 0 in pool 0. | ||
| 159 | */ | ||
| 160 | if ((start & mask) >= limit || pass > 0) { | ||
| 161 | spin_unlock(&(pool->lock)); | ||
| 162 | pool = &(iommu->pools[0]); | ||
| 163 | spin_lock(&(pool->lock)); | ||
| 164 | start = pool->start; | ||
| 165 | } else { | ||
| 166 | start &= mask; | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | if (dev) | ||
| 171 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
| 172 | 1 << iommu->table_shift); | ||
| 173 | else | ||
| 174 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); | ||
| 175 | |||
| 176 | boundary_size = boundary_size >> iommu->table_shift; | ||
| 177 | /* | ||
| 178 | * if the skip_span_boundary_check had been set during init, we set | ||
| 179 | * things up so that iommu_is_span_boundary() merely checks if the | ||
| 180 | * (index + npages) < num_tsb_entries | ||
| 181 | */ | ||
| 182 | if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { | ||
| 183 | shift = 0; | ||
| 184 | boundary_size = iommu->poolsize * iommu->nr_pools; | ||
| 185 | } | ||
| 186 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, | ||
| 187 | boundary_size, align_mask); | ||
| 188 | if (n == -1) { | ||
| 189 | if (likely(pass == 0)) { | ||
| 190 | /* First failure, rescan from the beginning. */ | ||
| 191 | pool->hint = pool->start; | ||
| 192 | set_flush(iommu); | ||
| 193 | pass++; | ||
| 194 | goto again; | ||
| 195 | } else if (!largealloc && pass <= iommu->nr_pools) { | ||
| 196 | spin_unlock(&(pool->lock)); | ||
| 197 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | ||
| 198 | pool = &(iommu->pools[pool_nr]); | ||
| 199 | spin_lock(&(pool->lock)); | ||
| 200 | pool->hint = pool->start; | ||
| 201 | set_flush(iommu); | ||
| 202 | pass++; | ||
| 203 | goto again; | ||
| 204 | } else { | ||
| 205 | /* give up */ | ||
| 206 | n = IOMMU_ERROR_CODE; | ||
| 207 | goto bail; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | if (iommu->lazy_flush && | ||
| 211 | (n < pool->hint || need_flush(iommu))) { | ||
| 212 | clear_flush(iommu); | ||
| 213 | iommu->lazy_flush(iommu); | ||
| 214 | } | ||
| 215 | |||
| 216 | end = n + npages; | ||
| 217 | pool->hint = end; | ||
| 218 | |||
| 219 | /* Update handle for SG allocations */ | ||
| 220 | if (handle) | ||
| 221 | *handle = end; | ||
| 222 | bail: | ||
| 223 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 224 | |||
| 225 | return n; | ||
| 226 | } | ||
| 227 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | ||
| 228 | |||
| 229 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, | ||
| 230 | unsigned long entry) | ||
| 231 | { | ||
| 232 | struct iommu_pool *p; | ||
| 233 | unsigned long largepool_start = tbl->large_pool.start; | ||
| 234 | bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 235 | |||
| 236 | /* The large pool is the last pool at the top of the table */ | ||
| 237 | if (large_pool && entry >= largepool_start) { | ||
| 238 | p = &tbl->large_pool; | ||
| 239 | } else { | ||
| 240 | unsigned int pool_nr = entry / tbl->poolsize; | ||
| 241 | |||
| 242 | BUG_ON(pool_nr >= tbl->nr_pools); | ||
| 243 | p = &tbl->pools[pool_nr]; | ||
| 244 | } | ||
| 245 | return p; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* Caller supplies the index of the entry into the iommu map table | ||
| 249 | * itself when the mapping from dma_addr to the entry is not the | ||
| 250 | * default addr->entry mapping below. | ||
| 251 | */ | ||
| 252 | void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | ||
| 253 | unsigned long npages, unsigned long entry) | ||
| 254 | { | ||
| 255 | struct iommu_pool *pool; | ||
| 256 | unsigned long flags; | ||
| 257 | unsigned long shift = iommu->table_shift; | ||
| 258 | |||
| 259 | if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ | ||
| 260 | entry = (dma_addr - iommu->table_map_base) >> shift; | ||
| 261 | pool = get_pool(iommu, entry); | ||
| 262 | |||
| 263 | spin_lock_irqsave(&(pool->lock), flags); | ||
| 264 | bitmap_clear(iommu->map, entry, npages); | ||
| 265 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 266 | } | ||
| 267 | EXPORT_SYMBOL(iommu_tbl_range_free); | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 23633c0fda4a..92a9f243c0e2 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
| @@ -3,19 +3,8 @@ | |||
| 3 | * IOMMU helper functions for the free area management | 3 | * IOMMU helper functions for the free area management |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #include <linux/export.h> | ||
| 7 | #include <linux/bitmap.h> | 6 | #include <linux/bitmap.h> |
| 8 | #include <linux/bug.h> | 7 | #include <linux/iommu-helper.h> |
| 9 | |||
| 10 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, | ||
| 11 | unsigned long shift, | ||
| 12 | unsigned long boundary_size) | ||
| 13 | { | ||
| 14 | BUG_ON(!is_power_of_2(boundary_size)); | ||
| 15 | |||
| 16 | shift = (shift + index) & (boundary_size - 1); | ||
| 17 | return shift + nr > boundary_size; | ||
| 18 | } | ||
| 19 | 8 | ||
| 20 | unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | 9 | unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, |
| 21 | unsigned long start, unsigned int nr, | 10 | unsigned long start, unsigned int nr, |
| @@ -38,4 +27,3 @@ again: | |||
| 38 | } | 27 | } |
| 39 | return -1; | 28 | return -1; |
| 40 | } | 29 | } |
| 41 | EXPORT_SYMBOL(iommu_area_alloc); | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index fdae394172fa..7e43cd54c84c 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | |||
| 573 | } | 573 | } |
| 574 | EXPORT_SYMBOL(_copy_to_iter); | 574 | EXPORT_SYMBOL(_copy_to_iter); |
| 575 | 575 | ||
| 576 | #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE | ||
| 577 | static int copyout_mcsafe(void __user *to, const void *from, size_t n) | ||
| 578 | { | ||
| 579 | if (access_ok(VERIFY_WRITE, to, n)) { | ||
| 580 | kasan_check_read(from, n); | ||
| 581 | n = copy_to_user_mcsafe((__force void *) to, from, n); | ||
| 582 | } | ||
| 583 | return n; | ||
| 584 | } | ||
| 585 | |||
| 586 | static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, | ||
| 587 | const char *from, size_t len) | ||
| 588 | { | ||
| 589 | unsigned long ret; | ||
| 590 | char *to; | ||
| 591 | |||
| 592 | to = kmap_atomic(page); | ||
| 593 | ret = memcpy_mcsafe(to + offset, from, len); | ||
| 594 | kunmap_atomic(to); | ||
| 595 | |||
| 596 | return ret; | ||
| 597 | } | ||
| 598 | |||
| 599 | size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) | ||
| 600 | { | ||
| 601 | const char *from = addr; | ||
| 602 | unsigned long rem, curr_addr, s_addr = (unsigned long) addr; | ||
| 603 | |||
| 604 | if (unlikely(i->type & ITER_PIPE)) { | ||
| 605 | WARN_ON(1); | ||
| 606 | return 0; | ||
| 607 | } | ||
| 608 | if (iter_is_iovec(i)) | ||
| 609 | might_fault(); | ||
| 610 | iterate_and_advance(i, bytes, v, | ||
| 611 | copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), | ||
| 612 | ({ | ||
| 613 | rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, | ||
| 614 | (from += v.bv_len) - v.bv_len, v.bv_len); | ||
| 615 | if (rem) { | ||
| 616 | curr_addr = (unsigned long) from; | ||
| 617 | bytes = curr_addr - s_addr - rem; | ||
| 618 | return bytes; | ||
| 619 | } | ||
| 620 | }), | ||
| 621 | ({ | ||
| 622 | rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, | ||
| 623 | v.iov_len); | ||
| 624 | if (rem) { | ||
| 625 | curr_addr = (unsigned long) from; | ||
| 626 | bytes = curr_addr - s_addr - rem; | ||
| 627 | return bytes; | ||
| 628 | } | ||
| 629 | }) | ||
| 630 | ) | ||
| 631 | |||
| 632 | return bytes; | ||
| 633 | } | ||
| 634 | EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); | ||
| 635 | #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ | ||
| 636 | |||
| 576 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | 637 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
| 577 | { | 638 | { |
| 578 | char *to = addr; | 639 | char *to = addr; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 15ea216a67ce..63d0816ab23b 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
| 23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
| 24 | #include <linux/netlink.h> | 24 | #include <linux/netlink.h> |
| 25 | #include <linux/uidgid.h> | ||
| 25 | #include <linux/uuid.h> | 26 | #include <linux/uuid.h> |
| 26 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
| 27 | #include <net/sock.h> | 28 | #include <net/sock.h> |
| @@ -231,30 +232,6 @@ out: | |||
| 231 | return r; | 232 | return r; |
| 232 | } | 233 | } |
| 233 | 234 | ||
| 234 | #ifdef CONFIG_NET | ||
| 235 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) | ||
| 236 | { | ||
| 237 | struct kobject *kobj = data, *ksobj; | ||
| 238 | const struct kobj_ns_type_operations *ops; | ||
| 239 | |||
| 240 | ops = kobj_ns_ops(kobj); | ||
| 241 | if (!ops && kobj->kset) { | ||
| 242 | ksobj = &kobj->kset->kobj; | ||
| 243 | if (ksobj->parent != NULL) | ||
| 244 | ops = kobj_ns_ops(ksobj->parent); | ||
| 245 | } | ||
| 246 | |||
| 247 | if (ops && ops->netlink_ns && kobj->ktype->namespace) { | ||
| 248 | const void *sock_ns, *ns; | ||
| 249 | ns = kobj->ktype->namespace(kobj); | ||
| 250 | sock_ns = ops->netlink_ns(dsk); | ||
| 251 | return sock_ns != ns; | ||
| 252 | } | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | #endif | ||
| 257 | |||
| 258 | #ifdef CONFIG_UEVENT_HELPER | 235 | #ifdef CONFIG_UEVENT_HELPER |
| 259 | static int kobj_usermode_filter(struct kobject *kobj) | 236 | static int kobj_usermode_filter(struct kobject *kobj) |
| 260 | { | 237 | { |
| @@ -296,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info) | |||
| 296 | } | 273 | } |
| 297 | #endif | 274 | #endif |
| 298 | 275 | ||
| 299 | static int kobject_uevent_net_broadcast(struct kobject *kobj, | 276 | #ifdef CONFIG_NET |
| 300 | struct kobj_uevent_env *env, | 277 | static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env, |
| 301 | const char *action_string, | 278 | const char *action_string, |
| 302 | const char *devpath) | 279 | const char *devpath) |
| 303 | { | 280 | { |
| 304 | int retval = 0; | 281 | struct netlink_skb_parms *parms; |
| 305 | #if defined(CONFIG_NET) | 282 | struct sk_buff *skb = NULL; |
| 283 | char *scratch; | ||
| 284 | size_t len; | ||
| 285 | |||
| 286 | /* allocate message with maximum possible size */ | ||
| 287 | len = strlen(action_string) + strlen(devpath) + 2; | ||
| 288 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); | ||
| 289 | if (!skb) | ||
| 290 | return NULL; | ||
| 291 | |||
| 292 | /* add header */ | ||
| 293 | scratch = skb_put(skb, len); | ||
| 294 | sprintf(scratch, "%s@%s", action_string, devpath); | ||
| 295 | |||
| 296 | skb_put_data(skb, env->buf, env->buflen); | ||
| 297 | |||
| 298 | parms = &NETLINK_CB(skb); | ||
| 299 | parms->creds.uid = GLOBAL_ROOT_UID; | ||
| 300 | parms->creds.gid = GLOBAL_ROOT_GID; | ||
| 301 | parms->dst_group = 1; | ||
| 302 | parms->portid = 0; | ||
| 303 | |||
| 304 | return skb; | ||
| 305 | } | ||
| 306 | |||
| 307 | static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, | ||
| 308 | const char *action_string, | ||
| 309 | const char *devpath) | ||
| 310 | { | ||
| 306 | struct sk_buff *skb = NULL; | 311 | struct sk_buff *skb = NULL; |
| 307 | struct uevent_sock *ue_sk; | 312 | struct uevent_sock *ue_sk; |
| 313 | int retval = 0; | ||
| 308 | 314 | ||
| 309 | /* send netlink message */ | 315 | /* send netlink message */ |
| 310 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { | 316 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { |
| @@ -314,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, | |||
| 314 | continue; | 320 | continue; |
| 315 | 321 | ||
| 316 | if (!skb) { | 322 | if (!skb) { |
| 317 | /* allocate message with the maximum possible size */ | ||
| 318 | size_t len = strlen(action_string) + strlen(devpath) + 2; | ||
| 319 | char *scratch; | ||
| 320 | |||
| 321 | retval = -ENOMEM; | 323 | retval = -ENOMEM; |
| 322 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); | 324 | skb = alloc_uevent_skb(env, action_string, devpath); |
| 323 | if (!skb) | 325 | if (!skb) |
| 324 | continue; | 326 | continue; |
| 325 | |||
| 326 | /* add header */ | ||
| 327 | scratch = skb_put(skb, len); | ||
| 328 | sprintf(scratch, "%s@%s", action_string, devpath); | ||
| 329 | |||
| 330 | skb_put_data(skb, env->buf, env->buflen); | ||
| 331 | |||
| 332 | NETLINK_CB(skb).dst_group = 1; | ||
| 333 | } | 327 | } |
| 334 | 328 | ||
| 335 | retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), | 329 | retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1, |
| 336 | 0, 1, GFP_KERNEL, | 330 | GFP_KERNEL); |
| 337 | kobj_bcast_filter, | ||
| 338 | kobj); | ||
| 339 | /* ENOBUFS should be handled in userspace */ | 331 | /* ENOBUFS should be handled in userspace */ |
| 340 | if (retval == -ENOBUFS || retval == -ESRCH) | 332 | if (retval == -ENOBUFS || retval == -ESRCH) |
| 341 | retval = 0; | 333 | retval = 0; |
| 342 | } | 334 | } |
| 343 | consume_skb(skb); | 335 | consume_skb(skb); |
| 344 | #endif | 336 | |
| 345 | return retval; | 337 | return retval; |
| 346 | } | 338 | } |
| 347 | 339 | ||
| 340 | static int uevent_net_broadcast_tagged(struct sock *usk, | ||
| 341 | struct kobj_uevent_env *env, | ||
| 342 | const char *action_string, | ||
| 343 | const char *devpath) | ||
| 344 | { | ||
| 345 | struct user_namespace *owning_user_ns = sock_net(usk)->user_ns; | ||
| 346 | struct sk_buff *skb = NULL; | ||
| 347 | int ret = 0; | ||
| 348 | |||
| 349 | skb = alloc_uevent_skb(env, action_string, devpath); | ||
| 350 | if (!skb) | ||
| 351 | return -ENOMEM; | ||
| 352 | |||
| 353 | /* fix credentials */ | ||
| 354 | if (owning_user_ns != &init_user_ns) { | ||
| 355 | struct netlink_skb_parms *parms = &NETLINK_CB(skb); | ||
| 356 | kuid_t root_uid; | ||
| 357 | kgid_t root_gid; | ||
| 358 | |||
| 359 | /* fix uid */ | ||
| 360 | root_uid = make_kuid(owning_user_ns, 0); | ||
| 361 | if (uid_valid(root_uid)) | ||
| 362 | parms->creds.uid = root_uid; | ||
| 363 | |||
| 364 | /* fix gid */ | ||
| 365 | root_gid = make_kgid(owning_user_ns, 0); | ||
| 366 | if (gid_valid(root_gid)) | ||
| 367 | parms->creds.gid = root_gid; | ||
| 368 | } | ||
| 369 | |||
| 370 | ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL); | ||
| 371 | /* ENOBUFS should be handled in userspace */ | ||
| 372 | if (ret == -ENOBUFS || ret == -ESRCH) | ||
| 373 | ret = 0; | ||
| 374 | |||
| 375 | return ret; | ||
| 376 | } | ||
| 377 | #endif | ||
| 378 | |||
| 379 | static int kobject_uevent_net_broadcast(struct kobject *kobj, | ||
| 380 | struct kobj_uevent_env *env, | ||
| 381 | const char *action_string, | ||
| 382 | const char *devpath) | ||
| 383 | { | ||
| 384 | int ret = 0; | ||
| 385 | |||
| 386 | #ifdef CONFIG_NET | ||
| 387 | const struct kobj_ns_type_operations *ops; | ||
| 388 | const struct net *net = NULL; | ||
| 389 | |||
| 390 | ops = kobj_ns_ops(kobj); | ||
| 391 | if (!ops && kobj->kset) { | ||
| 392 | struct kobject *ksobj = &kobj->kset->kobj; | ||
| 393 | if (ksobj->parent != NULL) | ||
| 394 | ops = kobj_ns_ops(ksobj->parent); | ||
| 395 | } | ||
| 396 | |||
| 397 | /* kobjects currently only carry network namespace tags and they | ||
| 398 | * are the only tag relevant here since we want to decide which | ||
| 399 | * network namespaces to broadcast the uevent into. | ||
| 400 | */ | ||
| 401 | if (ops && ops->netlink_ns && kobj->ktype->namespace) | ||
| 402 | if (ops->type == KOBJ_NS_TYPE_NET) | ||
| 403 | net = kobj->ktype->namespace(kobj); | ||
| 404 | |||
| 405 | if (!net) | ||
| 406 | ret = uevent_net_broadcast_untagged(env, action_string, | ||
| 407 | devpath); | ||
| 408 | else | ||
| 409 | ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env, | ||
| 410 | action_string, devpath); | ||
| 411 | #endif | ||
| 412 | |||
| 413 | return ret; | ||
| 414 | } | ||
| 415 | |||
| 348 | static void zap_modalias_env(struct kobj_uevent_env *env) | 416 | static void zap_modalias_env(struct kobj_uevent_env *env) |
| 349 | { | 417 | { |
| 350 | static const char modalias_prefix[] = "MODALIAS="; | 418 | static const char modalias_prefix[] = "MODALIAS="; |
| @@ -703,9 +771,13 @@ static int uevent_net_init(struct net *net) | |||
| 703 | 771 | ||
| 704 | net->uevent_sock = ue_sk; | 772 | net->uevent_sock = ue_sk; |
| 705 | 773 | ||
| 706 | mutex_lock(&uevent_sock_mutex); | 774 | /* Restrict uevents to initial user namespace. */ |
| 707 | list_add_tail(&ue_sk->list, &uevent_sock_list); | 775 | if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { |
| 708 | mutex_unlock(&uevent_sock_mutex); | 776 | mutex_lock(&uevent_sock_mutex); |
| 777 | list_add_tail(&ue_sk->list, &uevent_sock_list); | ||
| 778 | mutex_unlock(&uevent_sock_mutex); | ||
| 779 | } | ||
| 780 | |||
| 709 | return 0; | 781 | return 0; |
| 710 | } | 782 | } |
| 711 | 783 | ||
| @@ -713,9 +785,11 @@ static void uevent_net_exit(struct net *net) | |||
| 713 | { | 785 | { |
| 714 | struct uevent_sock *ue_sk = net->uevent_sock; | 786 | struct uevent_sock *ue_sk = net->uevent_sock; |
| 715 | 787 | ||
| 716 | mutex_lock(&uevent_sock_mutex); | 788 | if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { |
| 717 | list_del(&ue_sk->list); | 789 | mutex_lock(&uevent_sock_mutex); |
| 718 | mutex_unlock(&uevent_sock_mutex); | 790 | list_del(&ue_sk->list); |
| 791 | mutex_unlock(&uevent_sock_mutex); | ||
| 792 | } | ||
| 719 | 793 | ||
| 720 | netlink_kernel_release(ue_sk->sk); | 794 | netlink_kernel_release(ue_sk->sk); |
| 721 | kfree(ue_sk); | 795 | kfree(ue_sk); |
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 7eceeddb3fb8..c2d6f4efcfbc 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h | |||
| @@ -65,13 +65,6 @@ | |||
| 65 | typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ | 65 | typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ |
| 66 | typedef int mpi_size_t; /* (must be a signed type) */ | 66 | typedef int mpi_size_t; /* (must be a signed type) */ |
| 67 | 67 | ||
| 68 | static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | ||
| 69 | { | ||
| 70 | if (a->alloced < b) | ||
| 71 | return mpi_resize(a, b); | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* Copy N limbs from S to D. */ | 68 | /* Copy N limbs from S to D. */ |
| 76 | #define MPN_COPY(d, s, n) \ | 69 | #define MPN_COPY(d, s, n) \ |
| 77 | do { \ | 70 | do { \ |
| @@ -80,13 +73,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | |||
| 80 | (d)[_i] = (s)[_i]; \ | 73 | (d)[_i] = (s)[_i]; \ |
| 81 | } while (0) | 74 | } while (0) |
| 82 | 75 | ||
| 83 | #define MPN_COPY_INCR(d, s, n) \ | ||
| 84 | do { \ | ||
| 85 | mpi_size_t _i; \ | ||
| 86 | for (_i = 0; _i < (n); _i++) \ | ||
| 87 | (d)[_i] = (s)[_i]; \ | ||
| 88 | } while (0) | ||
| 89 | |||
| 90 | #define MPN_COPY_DECR(d, s, n) \ | 76 | #define MPN_COPY_DECR(d, s, n) \ |
| 91 | do { \ | 77 | do { \ |
| 92 | mpi_size_t _i; \ | 78 | mpi_size_t _i; \ |
| @@ -111,15 +97,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | |||
| 111 | } \ | 97 | } \ |
| 112 | } while (0) | 98 | } while (0) |
| 113 | 99 | ||
| 114 | #define MPN_NORMALIZE_NOT_ZERO(d, n) \ | ||
| 115 | do { \ | ||
| 116 | for (;;) { \ | ||
| 117 | if ((d)[(n)-1]) \ | ||
| 118 | break; \ | ||
| 119 | (n)--; \ | ||
| 120 | } \ | ||
| 121 | } while (0) | ||
| 122 | |||
| 123 | #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ | 100 | #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ |
| 124 | do { \ | 101 | do { \ |
| 125 | if ((size) < KARATSUBA_THRESHOLD) \ | 102 | if ((size) < KARATSUBA_THRESHOLD) \ |
| @@ -128,46 +105,11 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | |||
| 128 | mul_n(prodp, up, vp, size, tspace); \ | 105 | mul_n(prodp, up, vp, size, tspace); \ |
| 129 | } while (0); | 106 | } while (0); |
| 130 | 107 | ||
| 131 | /* Divide the two-limb number in (NH,,NL) by D, with DI being the largest | ||
| 132 | * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB). | ||
| 133 | * If this would yield overflow, DI should be the largest possible number | ||
| 134 | * (i.e., only ones). For correct operation, the most significant bit of D | ||
| 135 | * has to be set. Put the quotient in Q and the remainder in R. | ||
| 136 | */ | ||
| 137 | #define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \ | ||
| 138 | do { \ | ||
| 139 | mpi_limb_t _q, _ql, _r; \ | ||
| 140 | mpi_limb_t _xh, _xl; \ | ||
| 141 | umul_ppmm(_q, _ql, (nh), (di)); \ | ||
| 142 | _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \ | ||
| 143 | umul_ppmm(_xh, _xl, _q, (d)); \ | ||
| 144 | sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \ | ||
| 145 | if (_xh) { \ | ||
| 146 | sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ | ||
| 147 | _q++; \ | ||
| 148 | if (_xh) { \ | ||
| 149 | sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ | ||
| 150 | _q++; \ | ||
| 151 | } \ | ||
| 152 | } \ | ||
| 153 | if (_r >= (d)) { \ | ||
| 154 | _r -= (d); \ | ||
| 155 | _q++; \ | ||
| 156 | } \ | ||
| 157 | (r) = _r; \ | ||
| 158 | (q) = _q; \ | ||
| 159 | } while (0) | ||
| 160 | |||
| 161 | /*-- mpiutil.c --*/ | 108 | /*-- mpiutil.c --*/ |
| 162 | mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs); | 109 | mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs); |
| 163 | void mpi_free_limb_space(mpi_ptr_t a); | 110 | void mpi_free_limb_space(mpi_ptr_t a); |
| 164 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs); | 111 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs); |
| 165 | 112 | ||
| 166 | /*-- mpi-bit.c --*/ | ||
| 167 | void mpi_rshift_limbs(MPI a, unsigned int count); | ||
| 168 | int mpi_lshift_limbs(MPI a, unsigned int count); | ||
| 169 | |||
| 170 | /*-- mpihelp-add.c --*/ | ||
| 171 | static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 113 | static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| 172 | mpi_size_t s1_size, mpi_limb_t s2_limb); | 114 | mpi_size_t s1_size, mpi_limb_t s2_limb); |
| 173 | mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 115 | mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| @@ -175,7 +117,6 @@ mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | |||
| 175 | static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | 117 | static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, |
| 176 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); | 118 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); |
| 177 | 119 | ||
| 178 | /*-- mpihelp-sub.c --*/ | ||
| 179 | static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 120 | static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| 180 | mpi_size_t s1_size, mpi_limb_t s2_limb); | 121 | mpi_size_t s1_size, mpi_limb_t s2_limb); |
| 181 | mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 122 | mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| @@ -183,10 +124,10 @@ mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | |||
| 183 | static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | 124 | static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, |
| 184 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); | 125 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); |
| 185 | 126 | ||
| 186 | /*-- mpihelp-cmp.c --*/ | 127 | /*-- mpih-cmp.c --*/ |
| 187 | int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size); | 128 | int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size); |
| 188 | 129 | ||
| 189 | /*-- mpihelp-mul.c --*/ | 130 | /*-- mpih-mul.c --*/ |
| 190 | 131 | ||
| 191 | struct karatsuba_ctx { | 132 | struct karatsuba_ctx { |
| 192 | struct karatsuba_ctx *next; | 133 | struct karatsuba_ctx *next; |
| @@ -202,7 +143,6 @@ mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | |||
| 202 | mpi_size_t s1_size, mpi_limb_t s2_limb); | 143 | mpi_size_t s1_size, mpi_limb_t s2_limb); |
| 203 | mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 144 | mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| 204 | mpi_size_t s1_size, mpi_limb_t s2_limb); | 145 | mpi_size_t s1_size, mpi_limb_t s2_limb); |
| 205 | int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size); | ||
| 206 | int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, | 146 | int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, |
| 207 | mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result); | 147 | mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result); |
| 208 | void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size); | 148 | void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size); |
| @@ -214,21 +154,16 @@ int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, | |||
| 214 | mpi_ptr_t vp, mpi_size_t vsize, | 154 | mpi_ptr_t vp, mpi_size_t vsize, |
| 215 | struct karatsuba_ctx *ctx); | 155 | struct karatsuba_ctx *ctx); |
| 216 | 156 | ||
| 217 | /*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/ | 157 | /*-- generic_mpih-mul1.c --*/ |
| 218 | mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | 158 | mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, |
| 219 | mpi_size_t s1_size, mpi_limb_t s2_limb); | 159 | mpi_size_t s1_size, mpi_limb_t s2_limb); |
| 220 | 160 | ||
| 221 | /*-- mpihelp-div.c --*/ | 161 | /*-- mpih-div.c --*/ |
| 222 | mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, | ||
| 223 | mpi_limb_t divisor_limb); | ||
| 224 | mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, | 162 | mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, |
| 225 | mpi_ptr_t np, mpi_size_t nsize, | 163 | mpi_ptr_t np, mpi_size_t nsize, |
| 226 | mpi_ptr_t dp, mpi_size_t dsize); | 164 | mpi_ptr_t dp, mpi_size_t dsize); |
| 227 | mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr, | ||
| 228 | mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, | ||
| 229 | mpi_limb_t divisor_limb); | ||
| 230 | 165 | ||
| 231 | /*-- mpihelp-shift.c --*/ | 166 | /*-- generic_mpih-[lr]shift.c --*/ |
| 232 | mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, | 167 | mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, |
| 233 | unsigned cnt); | 168 | unsigned cnt); |
| 234 | mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, | 169 | mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6016f1deb1f5..9bbd9c5d375a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
| @@ -112,18 +112,6 @@ static inline void alloc_global_tags(struct percpu_ida *pool, | |||
| 112 | min(pool->nr_free, pool->percpu_batch_size)); | 112 | min(pool->nr_free, pool->percpu_batch_size)); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) | ||
| 116 | { | ||
| 117 | int tag = -ENOSPC; | ||
| 118 | |||
| 119 | spin_lock(&tags->lock); | ||
| 120 | if (tags->nr_free) | ||
| 121 | tag = tags->freelist[--tags->nr_free]; | ||
| 122 | spin_unlock(&tags->lock); | ||
| 123 | |||
| 124 | return tag; | ||
| 125 | } | ||
| 126 | |||
| 127 | /** | 115 | /** |
| 128 | * percpu_ida_alloc - allocate a tag | 116 | * percpu_ida_alloc - allocate a tag |
| 129 | * @pool: pool to allocate from | 117 | * @pool: pool to allocate from |
| @@ -147,20 +135,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 147 | DEFINE_WAIT(wait); | 135 | DEFINE_WAIT(wait); |
| 148 | struct percpu_ida_cpu *tags; | 136 | struct percpu_ida_cpu *tags; |
| 149 | unsigned long flags; | 137 | unsigned long flags; |
| 150 | int tag; | 138 | int tag = -ENOSPC; |
| 151 | 139 | ||
| 152 | local_irq_save(flags); | 140 | tags = raw_cpu_ptr(pool->tag_cpu); |
| 153 | tags = this_cpu_ptr(pool->tag_cpu); | 141 | spin_lock_irqsave(&tags->lock, flags); |
| 154 | 142 | ||
| 155 | /* Fastpath */ | 143 | /* Fastpath */ |
| 156 | tag = alloc_local_tag(tags); | 144 | if (likely(tags->nr_free >= 0)) { |
| 157 | if (likely(tag >= 0)) { | 145 | tag = tags->freelist[--tags->nr_free]; |
| 158 | local_irq_restore(flags); | 146 | spin_unlock_irqrestore(&tags->lock, flags); |
| 159 | return tag; | 147 | return tag; |
| 160 | } | 148 | } |
| 149 | spin_unlock_irqrestore(&tags->lock, flags); | ||
| 161 | 150 | ||
| 162 | while (1) { | 151 | while (1) { |
| 163 | spin_lock(&pool->lock); | 152 | spin_lock_irqsave(&pool->lock, flags); |
| 153 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 164 | 154 | ||
| 165 | /* | 155 | /* |
| 166 | * prepare_to_wait() must come before steal_tags(), in case | 156 | * prepare_to_wait() must come before steal_tags(), in case |
| @@ -184,8 +174,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 184 | &pool->cpus_have_tags); | 174 | &pool->cpus_have_tags); |
| 185 | } | 175 | } |
| 186 | 176 | ||
| 187 | spin_unlock(&pool->lock); | 177 | spin_unlock_irqrestore(&pool->lock, flags); |
| 188 | local_irq_restore(flags); | ||
| 189 | 178 | ||
| 190 | if (tag >= 0 || state == TASK_RUNNING) | 179 | if (tag >= 0 || state == TASK_RUNNING) |
| 191 | break; | 180 | break; |
| @@ -196,9 +185,6 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 196 | } | 185 | } |
| 197 | 186 | ||
| 198 | schedule(); | 187 | schedule(); |
| 199 | |||
| 200 | local_irq_save(flags); | ||
| 201 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 202 | } | 188 | } |
| 203 | if (state != TASK_RUNNING) | 189 | if (state != TASK_RUNNING) |
| 204 | finish_wait(&pool->wait, &wait); | 190 | finish_wait(&pool->wait, &wait); |
| @@ -222,28 +208,24 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | |||
| 222 | 208 | ||
| 223 | BUG_ON(tag >= pool->nr_tags); | 209 | BUG_ON(tag >= pool->nr_tags); |
| 224 | 210 | ||
| 225 | local_irq_save(flags); | 211 | tags = raw_cpu_ptr(pool->tag_cpu); |
| 226 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 227 | 212 | ||
| 228 | spin_lock(&tags->lock); | 213 | spin_lock_irqsave(&tags->lock, flags); |
| 229 | tags->freelist[tags->nr_free++] = tag; | 214 | tags->freelist[tags->nr_free++] = tag; |
| 230 | 215 | ||
| 231 | nr_free = tags->nr_free; | 216 | nr_free = tags->nr_free; |
| 232 | spin_unlock(&tags->lock); | ||
| 233 | 217 | ||
| 234 | if (nr_free == 1) { | 218 | if (nr_free == 1) { |
| 235 | cpumask_set_cpu(smp_processor_id(), | 219 | cpumask_set_cpu(smp_processor_id(), |
| 236 | &pool->cpus_have_tags); | 220 | &pool->cpus_have_tags); |
| 237 | wake_up(&pool->wait); | 221 | wake_up(&pool->wait); |
| 238 | } | 222 | } |
| 223 | spin_unlock_irqrestore(&tags->lock, flags); | ||
| 239 | 224 | ||
| 240 | if (nr_free == pool->percpu_max_size) { | 225 | if (nr_free == pool->percpu_max_size) { |
| 241 | spin_lock(&pool->lock); | 226 | spin_lock_irqsave(&pool->lock, flags); |
| 227 | spin_lock(&tags->lock); | ||
| 242 | 228 | ||
| 243 | /* | ||
| 244 | * Global lock held and irqs disabled, don't need percpu | ||
| 245 | * lock | ||
| 246 | */ | ||
| 247 | if (tags->nr_free == pool->percpu_max_size) { | 229 | if (tags->nr_free == pool->percpu_max_size) { |
| 248 | move_tags(pool->freelist, &pool->nr_free, | 230 | move_tags(pool->freelist, &pool->nr_free, |
| 249 | tags->freelist, &tags->nr_free, | 231 | tags->freelist, &tags->nr_free, |
| @@ -251,10 +233,9 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | |||
| 251 | 233 | ||
| 252 | wake_up(&pool->wait); | 234 | wake_up(&pool->wait); |
| 253 | } | 235 | } |
| 254 | spin_unlock(&pool->lock); | 236 | spin_unlock(&tags->lock); |
| 237 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 255 | } | 238 | } |
| 256 | |||
| 257 | local_irq_restore(flags); | ||
| 258 | } | 239 | } |
| 259 | EXPORT_SYMBOL_GPL(percpu_ida_free); | 240 | EXPORT_SYMBOL_GPL(percpu_ida_free); |
| 260 | 241 | ||
| @@ -346,29 +327,27 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, | |||
| 346 | struct percpu_ida_cpu *remote; | 327 | struct percpu_ida_cpu *remote; |
| 347 | unsigned cpu, i, err = 0; | 328 | unsigned cpu, i, err = 0; |
| 348 | 329 | ||
| 349 | local_irq_save(flags); | ||
| 350 | for_each_possible_cpu(cpu) { | 330 | for_each_possible_cpu(cpu) { |
| 351 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | 331 | remote = per_cpu_ptr(pool->tag_cpu, cpu); |
| 352 | spin_lock(&remote->lock); | 332 | spin_lock_irqsave(&remote->lock, flags); |
| 353 | for (i = 0; i < remote->nr_free; i++) { | 333 | for (i = 0; i < remote->nr_free; i++) { |
| 354 | err = fn(remote->freelist[i], data); | 334 | err = fn(remote->freelist[i], data); |
| 355 | if (err) | 335 | if (err) |
| 356 | break; | 336 | break; |
| 357 | } | 337 | } |
| 358 | spin_unlock(&remote->lock); | 338 | spin_unlock_irqrestore(&remote->lock, flags); |
| 359 | if (err) | 339 | if (err) |
| 360 | goto out; | 340 | goto out; |
| 361 | } | 341 | } |
| 362 | 342 | ||
| 363 | spin_lock(&pool->lock); | 343 | spin_lock_irqsave(&pool->lock, flags); |
| 364 | for (i = 0; i < pool->nr_free; i++) { | 344 | for (i = 0; i < pool->nr_free; i++) { |
| 365 | err = fn(pool->freelist[i], data); | 345 | err = fn(pool->freelist[i], data); |
| 366 | if (err) | 346 | if (err) |
| 367 | break; | 347 | break; |
| 368 | } | 348 | } |
| 369 | spin_unlock(&pool->lock); | 349 | spin_unlock_irqrestore(&pool->lock, flags); |
| 370 | out: | 350 | out: |
| 371 | local_irq_restore(flags); | ||
| 372 | return err; | 351 | return err; |
| 373 | } | 352 | } |
| 374 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); | 353 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); |
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index 0ec3f257ffdf..1db74eb098d0 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c | |||
| @@ -1,22 +1,16 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * lib/reed_solomon/decode_rs.c | 3 | * Generic Reed Solomon encoder / decoder library |
| 3 | * | ||
| 4 | * Overview: | ||
| 5 | * Generic Reed Solomon encoder / decoder library | ||
| 6 | * | 4 | * |
| 7 | * Copyright 2002, Phil Karn, KA9Q | 5 | * Copyright 2002, Phil Karn, KA9Q |
| 8 | * May be used under the terms of the GNU General Public License (GPL) | 6 | * May be used under the terms of the GNU General Public License (GPL) |
| 9 | * | 7 | * |
| 10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | 8 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) |
| 11 | * | 9 | * |
| 12 | * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ | 10 | * Generic data width independent code which is included by the wrappers. |
| 13 | * | ||
| 14 | */ | ||
| 15 | |||
| 16 | /* Generic data width independent code which is included by the | ||
| 17 | * wrappers. | ||
| 18 | */ | 11 | */ |
| 19 | { | 12 | { |
| 13 | struct rs_codec *rs = rsc->codec; | ||
| 20 | int deg_lambda, el, deg_omega; | 14 | int deg_lambda, el, deg_omega; |
| 21 | int i, j, r, k, pad; | 15 | int i, j, r, k, pad; |
| 22 | int nn = rs->nn; | 16 | int nn = rs->nn; |
| @@ -27,16 +21,22 @@ | |||
| 27 | uint16_t *alpha_to = rs->alpha_to; | 21 | uint16_t *alpha_to = rs->alpha_to; |
| 28 | uint16_t *index_of = rs->index_of; | 22 | uint16_t *index_of = rs->index_of; |
| 29 | uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; | 23 | uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; |
| 30 | /* Err+Eras Locator poly and syndrome poly The maximum value | ||
| 31 | * of nroots is 8. So the necessary stack size will be about | ||
| 32 | * 220 bytes max. | ||
| 33 | */ | ||
| 34 | uint16_t lambda[nroots + 1], syn[nroots]; | ||
| 35 | uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1]; | ||
| 36 | uint16_t root[nroots], reg[nroots + 1], loc[nroots]; | ||
| 37 | int count = 0; | 24 | int count = 0; |
| 38 | uint16_t msk = (uint16_t) rs->nn; | 25 | uint16_t msk = (uint16_t) rs->nn; |
| 39 | 26 | ||
| 27 | /* | ||
| 28 | * The decoder buffers are in the rs control struct. They are | ||
| 29 | * arrays sized [nroots + 1] | ||
| 30 | */ | ||
| 31 | uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1); | ||
| 32 | uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1); | ||
| 33 | uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1); | ||
| 34 | uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1); | ||
| 35 | uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1); | ||
| 36 | uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1); | ||
| 37 | uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1); | ||
| 38 | uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1); | ||
| 39 | |||
| 40 | /* Check length parameter for validity */ | 40 | /* Check length parameter for validity */ |
| 41 | pad = nn - nroots - len; | 41 | pad = nn - nroots - len; |
| 42 | BUG_ON(pad < 0 || pad >= nn); | 42 | BUG_ON(pad < 0 || pad >= nn); |
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c index 0b5b1a6728ec..9112d46e869e 100644 --- a/lib/reed_solomon/encode_rs.c +++ b/lib/reed_solomon/encode_rs.c | |||
| @@ -1,23 +1,16 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * lib/reed_solomon/encode_rs.c | 3 | * Generic Reed Solomon encoder / decoder library |
| 3 | * | ||
| 4 | * Overview: | ||
| 5 | * Generic Reed Solomon encoder / decoder library | ||
| 6 | * | 4 | * |
| 7 | * Copyright 2002, Phil Karn, KA9Q | 5 | * Copyright 2002, Phil Karn, KA9Q |
| 8 | * May be used under the terms of the GNU General Public License (GPL) | 6 | * May be used under the terms of the GNU General Public License (GPL) |
| 9 | * | 7 | * |
| 10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | 8 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) |
| 11 | * | 9 | * |
| 12 | * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $ | 10 | * Generic data width independent code which is included by the wrappers. |
| 13 | * | ||
| 14 | */ | ||
| 15 | |||
| 16 | /* Generic data width independent code which is included by the | ||
| 17 | * wrappers. | ||
| 18 | * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) | ||
| 19 | */ | 11 | */ |
| 20 | { | 12 | { |
| 13 | struct rs_codec *rs = rsc->codec; | ||
| 21 | int i, j, pad; | 14 | int i, j, pad; |
| 22 | int nn = rs->nn; | 15 | int nn = rs->nn; |
| 23 | int nroots = rs->nroots; | 16 | int nroots = rs->nroots; |
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 06d04cfa9339..dfcf54242fb9 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c | |||
| @@ -1,43 +1,34 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * lib/reed_solomon/reed_solomon.c | 3 | * Generic Reed Solomon encoder / decoder library |
| 3 | * | ||
| 4 | * Overview: | ||
| 5 | * Generic Reed Solomon encoder / decoder library | ||
| 6 | * | 4 | * |
| 7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) | 5 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) |
| 8 | * | 6 | * |
| 9 | * Reed Solomon code lifted from reed solomon library written by Phil Karn | 7 | * Reed Solomon code lifted from reed solomon library written by Phil Karn |
| 10 | * Copyright 2002 Phil Karn, KA9Q | 8 | * Copyright 2002 Phil Karn, KA9Q |
| 11 | * | 9 | * |
| 12 | * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ | ||
| 13 | * | ||
| 14 | * This program is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License version 2 as | ||
| 16 | * published by the Free Software Foundation. | ||
| 17 | * | ||
| 18 | * Description: | 10 | * Description: |
| 19 | * | 11 | * |
| 20 | * The generic Reed Solomon library provides runtime configurable | 12 | * The generic Reed Solomon library provides runtime configurable |
| 21 | * encoding / decoding of RS codes. | 13 | * encoding / decoding of RS codes. |
| 22 | * Each user must call init_rs to get a pointer to a rs_control | ||
| 23 | * structure for the given rs parameters. This structure is either | ||
| 24 | * generated or a already available matching control structure is used. | ||
| 25 | * If a structure is generated then the polynomial arrays for | ||
| 26 | * fast encoding / decoding are built. This can take some time so | ||
| 27 | * make sure not to call this function from a time critical path. | ||
| 28 | * Usually a module / driver should initialize the necessary | ||
| 29 | * rs_control structure on module / driver init and release it | ||
| 30 | * on exit. | ||
| 31 | * The encoding puts the calculated syndrome into a given syndrome | ||
| 32 | * buffer. | ||
| 33 | * The decoding is a two step process. The first step calculates | ||
| 34 | * the syndrome over the received (data + syndrome) and calls the | ||
| 35 | * second stage, which does the decoding / error correction itself. | ||
| 36 | * Many hw encoders provide a syndrome calculation over the received | ||
| 37 | * data + syndrome and can call the second stage directly. | ||
| 38 | * | 14 | * |
| 15 | * Each user must call init_rs to get a pointer to a rs_control structure | ||
| 16 | * for the given rs parameters. The control struct is unique per instance. | ||
| 17 | * It points to a codec which can be shared by multiple control structures. | ||
| 18 | * If a codec is newly allocated then the polynomial arrays for fast | ||
| 19 | * encoding / decoding are built. This can take some time so make sure not | ||
| 20 | * to call this function from a time critical path. Usually a module / | ||
| 21 | * driver should initialize the necessary rs_control structure on module / | ||
| 22 | * driver init and release it on exit. | ||
| 23 | * | ||
| 24 | * The encoding puts the calculated syndrome into a given syndrome buffer. | ||
| 25 | * | ||
| 26 | * The decoding is a two step process. The first step calculates the | ||
| 27 | * syndrome over the received (data + syndrome) and calls the second stage, | ||
| 28 | * which does the decoding / error correction itself. Many hw encoders | ||
| 29 | * provide a syndrome calculation over the received data + syndrome and can | ||
| 30 | * call the second stage directly. | ||
| 39 | */ | 31 | */ |
| 40 | |||
| 41 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
| 42 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
| 43 | #include <linux/init.h> | 34 | #include <linux/init.h> |
| @@ -46,32 +37,44 @@ | |||
| 46 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| 47 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
| 48 | 39 | ||
| 49 | /* This list holds all currently allocated rs control structures */ | 40 | enum { |
| 50 | static LIST_HEAD (rslist); | 41 | RS_DECODE_LAMBDA, |
| 42 | RS_DECODE_SYN, | ||
| 43 | RS_DECODE_B, | ||
| 44 | RS_DECODE_T, | ||
| 45 | RS_DECODE_OMEGA, | ||
| 46 | RS_DECODE_ROOT, | ||
| 47 | RS_DECODE_REG, | ||
| 48 | RS_DECODE_LOC, | ||
| 49 | RS_DECODE_NUM_BUFFERS | ||
| 50 | }; | ||
| 51 | |||
| 52 | /* This list holds all currently allocated rs codec structures */ | ||
| 53 | static LIST_HEAD(codec_list); | ||
| 51 | /* Protection for the list */ | 54 | /* Protection for the list */ |
| 52 | static DEFINE_MUTEX(rslistlock); | 55 | static DEFINE_MUTEX(rslistlock); |
| 53 | 56 | ||
| 54 | /** | 57 | /** |
| 55 | * rs_init - Initialize a Reed-Solomon codec | 58 | * codec_init - Initialize a Reed-Solomon codec |
| 56 | * @symsize: symbol size, bits (1-8) | 59 | * @symsize: symbol size, bits (1-8) |
| 57 | * @gfpoly: Field generator polynomial coefficients | 60 | * @gfpoly: Field generator polynomial coefficients |
| 58 | * @gffunc: Field generator function | 61 | * @gffunc: Field generator function |
| 59 | * @fcr: first root of RS code generator polynomial, index form | 62 | * @fcr: first root of RS code generator polynomial, index form |
| 60 | * @prim: primitive element to generate polynomial roots | 63 | * @prim: primitive element to generate polynomial roots |
| 61 | * @nroots: RS code generator polynomial degree (number of roots) | 64 | * @nroots: RS code generator polynomial degree (number of roots) |
| 65 | * @gfp: GFP_ flags for allocations | ||
| 62 | * | 66 | * |
| 63 | * Allocate a control structure and the polynom arrays for faster | 67 | * Allocate a codec structure and the polynom arrays for faster |
| 64 | * en/decoding. Fill the arrays according to the given parameters. | 68 | * en/decoding. Fill the arrays according to the given parameters. |
| 65 | */ | 69 | */ |
| 66 | static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), | 70 | static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int), |
| 67 | int fcr, int prim, int nroots) | 71 | int fcr, int prim, int nroots, gfp_t gfp) |
| 68 | { | 72 | { |
| 69 | struct rs_control *rs; | ||
| 70 | int i, j, sr, root, iprim; | 73 | int i, j, sr, root, iprim; |
| 74 | struct rs_codec *rs; | ||
| 71 | 75 | ||
| 72 | /* Allocate the control structure */ | 76 | rs = kzalloc(sizeof(*rs), gfp); |
| 73 | rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL); | 77 | if (!rs) |
| 74 | if (rs == NULL) | ||
| 75 | return NULL; | 78 | return NULL; |
| 76 | 79 | ||
| 77 | INIT_LIST_HEAD(&rs->list); | 80 | INIT_LIST_HEAD(&rs->list); |
| @@ -85,17 +88,17 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), | |||
| 85 | rs->gffunc = gffunc; | 88 | rs->gffunc = gffunc; |
| 86 | 89 | ||
| 87 | /* Allocate the arrays */ | 90 | /* Allocate the arrays */ |
| 88 | rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); | 91 | rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp); |
| 89 | if (rs->alpha_to == NULL) | 92 | if (rs->alpha_to == NULL) |
| 90 | goto errrs; | 93 | goto err; |
| 91 | 94 | ||
| 92 | rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); | 95 | rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp); |
| 93 | if (rs->index_of == NULL) | 96 | if (rs->index_of == NULL) |
| 94 | goto erralp; | 97 | goto err; |
| 95 | 98 | ||
| 96 | rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL); | 99 | rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), gfp); |
| 97 | if(rs->genpoly == NULL) | 100 | if(rs->genpoly == NULL) |
| 98 | goto erridx; | 101 | goto err; |
| 99 | 102 | ||
| 100 | /* Generate Galois field lookup tables */ | 103 | /* Generate Galois field lookup tables */ |
| 101 | rs->index_of[0] = rs->nn; /* log(zero) = -inf */ | 104 | rs->index_of[0] = rs->nn; /* log(zero) = -inf */ |
| @@ -120,7 +123,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), | |||
| 120 | } | 123 | } |
| 121 | /* If it's not primitive, exit */ | 124 | /* If it's not primitive, exit */ |
| 122 | if(sr != rs->alpha_to[0]) | 125 | if(sr != rs->alpha_to[0]) |
| 123 | goto errpol; | 126 | goto err; |
| 124 | 127 | ||
| 125 | /* Find prim-th root of 1, used in decoding */ | 128 | /* Find prim-th root of 1, used in decoding */ |
| 126 | for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn); | 129 | for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn); |
| @@ -148,42 +151,52 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), | |||
| 148 | /* convert rs->genpoly[] to index form for quicker encoding */ | 151 | /* convert rs->genpoly[] to index form for quicker encoding */ |
| 149 | for (i = 0; i <= nroots; i++) | 152 | for (i = 0; i <= nroots; i++) |
| 150 | rs->genpoly[i] = rs->index_of[rs->genpoly[i]]; | 153 | rs->genpoly[i] = rs->index_of[rs->genpoly[i]]; |
| 154 | |||
| 155 | rs->users = 1; | ||
| 156 | list_add(&rs->list, &codec_list); | ||
| 151 | return rs; | 157 | return rs; |
| 152 | 158 | ||
| 153 | /* Error exit */ | 159 | err: |
| 154 | errpol: | ||
| 155 | kfree(rs->genpoly); | 160 | kfree(rs->genpoly); |
| 156 | erridx: | ||
| 157 | kfree(rs->index_of); | 161 | kfree(rs->index_of); |
| 158 | erralp: | ||
| 159 | kfree(rs->alpha_to); | 162 | kfree(rs->alpha_to); |
| 160 | errrs: | ||
| 161 | kfree(rs); | 163 | kfree(rs); |
| 162 | return NULL; | 164 | return NULL; |
| 163 | } | 165 | } |
| 164 | 166 | ||
| 165 | 167 | ||
| 166 | /** | 168 | /** |
| 167 | * free_rs - Free the rs control structure, if it is no longer used | 169 | * free_rs - Free the rs control structure |
| 168 | * @rs: the control structure which is not longer used by the | 170 | * @rs: The control structure which is not longer used by the |
| 169 | * caller | 171 | * caller |
| 172 | * | ||
| 173 | * Free the control structure. If @rs is the last user of the associated | ||
| 174 | * codec, free the codec as well. | ||
| 170 | */ | 175 | */ |
| 171 | void free_rs(struct rs_control *rs) | 176 | void free_rs(struct rs_control *rs) |
| 172 | { | 177 | { |
| 178 | struct rs_codec *cd; | ||
| 179 | |||
| 180 | if (!rs) | ||
| 181 | return; | ||
| 182 | |||
| 183 | cd = rs->codec; | ||
| 173 | mutex_lock(&rslistlock); | 184 | mutex_lock(&rslistlock); |
| 174 | rs->users--; | 185 | cd->users--; |
| 175 | if(!rs->users) { | 186 | if(!cd->users) { |
| 176 | list_del(&rs->list); | 187 | list_del(&cd->list); |
| 177 | kfree(rs->alpha_to); | 188 | kfree(cd->alpha_to); |
| 178 | kfree(rs->index_of); | 189 | kfree(cd->index_of); |
| 179 | kfree(rs->genpoly); | 190 | kfree(cd->genpoly); |
| 180 | kfree(rs); | 191 | kfree(cd); |
| 181 | } | 192 | } |
| 182 | mutex_unlock(&rslistlock); | 193 | mutex_unlock(&rslistlock); |
| 194 | kfree(rs); | ||
| 183 | } | 195 | } |
| 196 | EXPORT_SYMBOL_GPL(free_rs); | ||
| 184 | 197 | ||
| 185 | /** | 198 | /** |
| 186 | * init_rs_internal - Find a matching or allocate a new rs control structure | 199 | * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one |
| 187 | * @symsize: the symbol size (number of bits) | 200 | * @symsize: the symbol size (number of bits) |
| 188 | * @gfpoly: the extended Galois field generator polynomial coefficients, | 201 | * @gfpoly: the extended Galois field generator polynomial coefficients, |
| 189 | * with the 0th coefficient in the low order bit. The polynomial | 202 | * with the 0th coefficient in the low order bit. The polynomial |
| @@ -191,55 +204,69 @@ void free_rs(struct rs_control *rs) | |||
| 191 | * @gffunc: pointer to function to generate the next field element, | 204 | * @gffunc: pointer to function to generate the next field element, |
| 192 | * or the multiplicative identity element if given 0. Used | 205 | * or the multiplicative identity element if given 0. Used |
| 193 | * instead of gfpoly if gfpoly is 0 | 206 | * instead of gfpoly if gfpoly is 0 |
| 194 | * @fcr: the first consecutive root of the rs code generator polynomial | 207 | * @fcr: the first consecutive root of the rs code generator polynomial |
| 195 | * in index form | 208 | * in index form |
| 196 | * @prim: primitive element to generate polynomial roots | 209 | * @prim: primitive element to generate polynomial roots |
| 197 | * @nroots: RS code generator polynomial degree (number of roots) | 210 | * @nroots: RS code generator polynomial degree (number of roots) |
| 211 | * @gfp: GFP_ flags for allocations | ||
| 198 | */ | 212 | */ |
| 199 | static struct rs_control *init_rs_internal(int symsize, int gfpoly, | 213 | static struct rs_control *init_rs_internal(int symsize, int gfpoly, |
| 200 | int (*gffunc)(int), int fcr, | 214 | int (*gffunc)(int), int fcr, |
| 201 | int prim, int nroots) | 215 | int prim, int nroots, gfp_t gfp) |
| 202 | { | 216 | { |
| 203 | struct list_head *tmp; | 217 | struct list_head *tmp; |
| 204 | struct rs_control *rs; | 218 | struct rs_control *rs; |
| 219 | unsigned int bsize; | ||
| 205 | 220 | ||
| 206 | /* Sanity checks */ | 221 | /* Sanity checks */ |
| 207 | if (symsize < 1) | 222 | if (symsize < 1) |
| 208 | return NULL; | 223 | return NULL; |
| 209 | if (fcr < 0 || fcr >= (1<<symsize)) | 224 | if (fcr < 0 || fcr >= (1<<symsize)) |
| 210 | return NULL; | 225 | return NULL; |
| 211 | if (prim <= 0 || prim >= (1<<symsize)) | 226 | if (prim <= 0 || prim >= (1<<symsize)) |
| 212 | return NULL; | 227 | return NULL; |
| 213 | if (nroots < 0 || nroots >= (1<<symsize)) | 228 | if (nroots < 0 || nroots >= (1<<symsize)) |
| 214 | return NULL; | 229 | return NULL; |
| 215 | 230 | ||
| 231 | /* | ||
| 232 | * The decoder needs buffers in each control struct instance to | ||
| 233 | * avoid variable size or large fixed size allocations on | ||
| 234 | * stack. Size the buffers to arrays of [nroots + 1]. | ||
| 235 | */ | ||
| 236 | bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1); | ||
| 237 | rs = kzalloc(sizeof(*rs) + bsize, gfp); | ||
| 238 | if (!rs) | ||
| 239 | return NULL; | ||
| 240 | |||
| 216 | mutex_lock(&rslistlock); | 241 | mutex_lock(&rslistlock); |
| 217 | 242 | ||
| 218 | /* Walk through the list and look for a matching entry */ | 243 | /* Walk through the list and look for a matching entry */ |
| 219 | list_for_each(tmp, &rslist) { | 244 | list_for_each(tmp, &codec_list) { |
| 220 | rs = list_entry(tmp, struct rs_control, list); | 245 | struct rs_codec *cd = list_entry(tmp, struct rs_codec, list); |
| 221 | if (symsize != rs->mm) | 246 | |
| 247 | if (symsize != cd->mm) | ||
| 222 | continue; | 248 | continue; |
| 223 | if (gfpoly != rs->gfpoly) | 249 | if (gfpoly != cd->gfpoly) |
| 224 | continue; | 250 | continue; |
| 225 | if (gffunc != rs->gffunc) | 251 | if (gffunc != cd->gffunc) |
| 226 | continue; | 252 | continue; |
| 227 | if (fcr != rs->fcr) | 253 | if (fcr != cd->fcr) |
| 228 | continue; | 254 | continue; |
| 229 | if (prim != rs->prim) | 255 | if (prim != cd->prim) |
| 230 | continue; | 256 | continue; |
| 231 | if (nroots != rs->nroots) | 257 | if (nroots != cd->nroots) |
| 232 | continue; | 258 | continue; |
| 233 | /* We have a matching one already */ | 259 | /* We have a matching one already */ |
| 234 | rs->users++; | 260 | cd->users++; |
| 261 | rs->codec = cd; | ||
| 235 | goto out; | 262 | goto out; |
| 236 | } | 263 | } |
| 237 | 264 | ||
| 238 | /* Create a new one */ | 265 | /* Create a new one */ |
| 239 | rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots); | 266 | rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp); |
| 240 | if (rs) { | 267 | if (!rs->codec) { |
| 241 | rs->users = 1; | 268 | kfree(rs); |
| 242 | list_add(&rs->list, &rslist); | 269 | rs = NULL; |
| 243 | } | 270 | } |
| 244 | out: | 271 | out: |
| 245 | mutex_unlock(&rslistlock); | 272 | mutex_unlock(&rslistlock); |
| @@ -247,45 +274,48 @@ out: | |||
| 247 | } | 274 | } |
| 248 | 275 | ||
| 249 | /** | 276 | /** |
| 250 | * init_rs - Find a matching or allocate a new rs control structure | 277 | * init_rs_gfp - Create a RS control struct and initialize it |
| 251 | * @symsize: the symbol size (number of bits) | 278 | * @symsize: the symbol size (number of bits) |
| 252 | * @gfpoly: the extended Galois field generator polynomial coefficients, | 279 | * @gfpoly: the extended Galois field generator polynomial coefficients, |
| 253 | * with the 0th coefficient in the low order bit. The polynomial | 280 | * with the 0th coefficient in the low order bit. The polynomial |
| 254 | * must be primitive; | 281 | * must be primitive; |
| 255 | * @fcr: the first consecutive root of the rs code generator polynomial | 282 | * @fcr: the first consecutive root of the rs code generator polynomial |
| 256 | * in index form | 283 | * in index form |
| 257 | * @prim: primitive element to generate polynomial roots | 284 | * @prim: primitive element to generate polynomial roots |
| 258 | * @nroots: RS code generator polynomial degree (number of roots) | 285 | * @nroots: RS code generator polynomial degree (number of roots) |
| 286 | * @gfp: GFP_ flags for allocations | ||
| 259 | */ | 287 | */ |
| 260 | struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | 288 | struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim, |
| 261 | int nroots) | 289 | int nroots, gfp_t gfp) |
| 262 | { | 290 | { |
| 263 | return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots); | 291 | return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp); |
| 264 | } | 292 | } |
| 293 | EXPORT_SYMBOL_GPL(init_rs_gfp); | ||
| 265 | 294 | ||
| 266 | /** | 295 | /** |
| 267 | * init_rs_non_canonical - Find a matching or allocate a new rs control | 296 | * init_rs_non_canonical - Allocate rs control struct for fields with |
| 268 | * structure, for fields with non-canonical | 297 | * non-canonical representation |
| 269 | * representation | ||
| 270 | * @symsize: the symbol size (number of bits) | 298 | * @symsize: the symbol size (number of bits) |
| 271 | * @gffunc: pointer to function to generate the next field element, | 299 | * @gffunc: pointer to function to generate the next field element, |
| 272 | * or the multiplicative identity element if given 0. Used | 300 | * or the multiplicative identity element if given 0. Used |
| 273 | * instead of gfpoly if gfpoly is 0 | 301 | * instead of gfpoly if gfpoly is 0 |
| 274 | * @fcr: the first consecutive root of the rs code generator polynomial | 302 | * @fcr: the first consecutive root of the rs code generator polynomial |
| 275 | * in index form | 303 | * in index form |
| 276 | * @prim: primitive element to generate polynomial roots | 304 | * @prim: primitive element to generate polynomial roots |
| 277 | * @nroots: RS code generator polynomial degree (number of roots) | 305 | * @nroots: RS code generator polynomial degree (number of roots) |
| 278 | */ | 306 | */ |
| 279 | struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), | 307 | struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), |
| 280 | int fcr, int prim, int nroots) | 308 | int fcr, int prim, int nroots) |
| 281 | { | 309 | { |
| 282 | return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots); | 310 | return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots, |
| 311 | GFP_KERNEL); | ||
| 283 | } | 312 | } |
| 313 | EXPORT_SYMBOL_GPL(init_rs_non_canonical); | ||
| 284 | 314 | ||
| 285 | #ifdef CONFIG_REED_SOLOMON_ENC8 | 315 | #ifdef CONFIG_REED_SOLOMON_ENC8 |
| 286 | /** | 316 | /** |
| 287 | * encode_rs8 - Calculate the parity for data values (8bit data width) | 317 | * encode_rs8 - Calculate the parity for data values (8bit data width) |
| 288 | * @rs: the rs control structure | 318 | * @rsc: the rs control structure |
| 289 | * @data: data field of a given type | 319 | * @data: data field of a given type |
| 290 | * @len: data length | 320 | * @len: data length |
| 291 | * @par: parity data, must be initialized by caller (usually all 0) | 321 | * @par: parity data, must be initialized by caller (usually all 0) |
| @@ -295,7 +325,7 @@ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), | |||
| 295 | * symbol size > 8. The calling code must take care of encoding of the | 325 | * symbol size > 8. The calling code must take care of encoding of the |
| 296 | * syndrome result for storage itself. | 326 | * syndrome result for storage itself. |
| 297 | */ | 327 | */ |
| 298 | int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, | 328 | int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par, |
| 299 | uint16_t invmsk) | 329 | uint16_t invmsk) |
| 300 | { | 330 | { |
| 301 | #include "encode_rs.c" | 331 | #include "encode_rs.c" |
| @@ -306,7 +336,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); | |||
| 306 | #ifdef CONFIG_REED_SOLOMON_DEC8 | 336 | #ifdef CONFIG_REED_SOLOMON_DEC8 |
| 307 | /** | 337 | /** |
| 308 | * decode_rs8 - Decode codeword (8bit data width) | 338 | * decode_rs8 - Decode codeword (8bit data width) |
| 309 | * @rs: the rs control structure | 339 | * @rsc: the rs control structure |
| 310 | * @data: data field of a given type | 340 | * @data: data field of a given type |
| 311 | * @par: received parity data field | 341 | * @par: received parity data field |
| 312 | * @len: data length | 342 | * @len: data length |
| @@ -319,9 +349,14 @@ EXPORT_SYMBOL_GPL(encode_rs8); | |||
| 319 | * The syndrome and parity uses a uint16_t data type to enable | 349 | * The syndrome and parity uses a uint16_t data type to enable |
| 320 | * symbol size > 8. The calling code must take care of decoding of the | 350 | * symbol size > 8. The calling code must take care of decoding of the |
| 321 | * syndrome result and the received parity before calling this code. | 351 | * syndrome result and the received parity before calling this code. |
| 352 | * | ||
| 353 | * Note: The rs_control struct @rsc contains buffers which are used for | ||
| 354 | * decoding, so the caller has to ensure that decoder invocations are | ||
| 355 | * serialized. | ||
| 356 | * | ||
| 322 | * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. | 357 | * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. |
| 323 | */ | 358 | */ |
| 324 | int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, | 359 | int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len, |
| 325 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | 360 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, |
| 326 | uint16_t *corr) | 361 | uint16_t *corr) |
| 327 | { | 362 | { |
| @@ -333,7 +368,7 @@ EXPORT_SYMBOL_GPL(decode_rs8); | |||
| 333 | #ifdef CONFIG_REED_SOLOMON_ENC16 | 368 | #ifdef CONFIG_REED_SOLOMON_ENC16 |
| 334 | /** | 369 | /** |
| 335 | * encode_rs16 - Calculate the parity for data values (16bit data width) | 370 | * encode_rs16 - Calculate the parity for data values (16bit data width) |
| 336 | * @rs: the rs control structure | 371 | * @rsc: the rs control structure |
| 337 | * @data: data field of a given type | 372 | * @data: data field of a given type |
| 338 | * @len: data length | 373 | * @len: data length |
| 339 | * @par: parity data, must be initialized by caller (usually all 0) | 374 | * @par: parity data, must be initialized by caller (usually all 0) |
| @@ -341,7 +376,7 @@ EXPORT_SYMBOL_GPL(decode_rs8); | |||
| 341 | * | 376 | * |
| 342 | * Each field in the data array contains up to symbol size bits of valid data. | 377 | * Each field in the data array contains up to symbol size bits of valid data. |
| 343 | */ | 378 | */ |
| 344 | int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, | 379 | int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par, |
| 345 | uint16_t invmsk) | 380 | uint16_t invmsk) |
| 346 | { | 381 | { |
| 347 | #include "encode_rs.c" | 382 | #include "encode_rs.c" |
| @@ -352,7 +387,7 @@ EXPORT_SYMBOL_GPL(encode_rs16); | |||
| 352 | #ifdef CONFIG_REED_SOLOMON_DEC16 | 387 | #ifdef CONFIG_REED_SOLOMON_DEC16 |
| 353 | /** | 388 | /** |
| 354 | * decode_rs16 - Decode codeword (16bit data width) | 389 | * decode_rs16 - Decode codeword (16bit data width) |
| 355 | * @rs: the rs control structure | 390 | * @rsc: the rs control structure |
| 356 | * @data: data field of a given type | 391 | * @data: data field of a given type |
| 357 | * @par: received parity data field | 392 | * @par: received parity data field |
| 358 | * @len: data length | 393 | * @len: data length |
| @@ -363,9 +398,14 @@ EXPORT_SYMBOL_GPL(encode_rs16); | |||
| 363 | * @corr: buffer to store correction bitmask on eras_pos | 398 | * @corr: buffer to store correction bitmask on eras_pos |
| 364 | * | 399 | * |
| 365 | * Each field in the data array contains up to symbol size bits of valid data. | 400 | * Each field in the data array contains up to symbol size bits of valid data. |
| 401 | * | ||
| 402 | * Note: The rc_control struct @rsc contains buffers which are used for | ||
| 403 | * decoding, so the caller has to ensure that decoder invocations are | ||
| 404 | * serialized. | ||
| 405 | * | ||
| 366 | * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. | 406 | * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. |
| 367 | */ | 407 | */ |
| 368 | int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, | 408 | int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len, |
| 369 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | 409 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, |
| 370 | uint16_t *corr) | 410 | uint16_t *corr) |
| 371 | { | 411 | { |
| @@ -374,10 +414,6 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, | |||
| 374 | EXPORT_SYMBOL_GPL(decode_rs16); | 414 | EXPORT_SYMBOL_GPL(decode_rs16); |
| 375 | #endif | 415 | #endif |
| 376 | 416 | ||
| 377 | EXPORT_SYMBOL_GPL(init_rs); | ||
| 378 | EXPORT_SYMBOL_GPL(init_rs_non_canonical); | ||
| 379 | EXPORT_SYMBOL_GPL(free_rs); | ||
| 380 | |||
| 381 | MODULE_LICENSE("GPL"); | 417 | MODULE_LICENSE("GPL"); |
| 382 | MODULE_DESCRIPTION("Reed Solomon encoder/decoder"); | 418 | MODULE_DESCRIPTION("Reed Solomon encoder/decoder"); |
| 383 | MODULE_AUTHOR("Phil Karn, Thomas Gleixner"); | 419 | MODULE_AUTHOR("Phil Karn, Thomas Gleixner"); |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 2b2b79974b61..9427b5766134 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |||
| 668 | * For a completely stable walk you should construct your own data | 668 | * For a completely stable walk you should construct your own data |
| 669 | * structure outside the hash table. | 669 | * structure outside the hash table. |
| 670 | * | 670 | * |
| 671 | * This function may sleep so you must not call it from interrupt | 671 | * This function may be called from any process context, including |
| 672 | * context or with spin locks held. | 672 | * non-preemptable context, but cannot be called from softirq or |
| 673 | * hardirq context. | ||
| 673 | * | 674 | * |
| 674 | * You must call rhashtable_walk_exit after this function returns. | 675 | * You must call rhashtable_walk_exit after this function returns. |
| 675 | */ | 676 | */ |
| @@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) | |||
| 726 | __acquires(RCU) | 727 | __acquires(RCU) |
| 727 | { | 728 | { |
| 728 | struct rhashtable *ht = iter->ht; | 729 | struct rhashtable *ht = iter->ht; |
| 730 | bool rhlist = ht->rhlist; | ||
| 729 | 731 | ||
| 730 | rcu_read_lock(); | 732 | rcu_read_lock(); |
| 731 | 733 | ||
| @@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) | |||
| 734 | list_del(&iter->walker.list); | 736 | list_del(&iter->walker.list); |
| 735 | spin_unlock(&ht->lock); | 737 | spin_unlock(&ht->lock); |
| 736 | 738 | ||
| 737 | if (!iter->walker.tbl && !iter->end_of_table) { | 739 | if (iter->end_of_table) |
| 740 | return 0; | ||
| 741 | if (!iter->walker.tbl) { | ||
| 738 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); | 742 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
| 743 | iter->slot = 0; | ||
| 744 | iter->skip = 0; | ||
| 739 | return -EAGAIN; | 745 | return -EAGAIN; |
| 740 | } | 746 | } |
| 741 | 747 | ||
| 748 | if (iter->p && !rhlist) { | ||
| 749 | /* | ||
| 750 | * We need to validate that 'p' is still in the table, and | ||
| 751 | * if so, update 'skip' | ||
| 752 | */ | ||
| 753 | struct rhash_head *p; | ||
| 754 | int skip = 0; | ||
| 755 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | ||
| 756 | skip++; | ||
| 757 | if (p == iter->p) { | ||
| 758 | iter->skip = skip; | ||
| 759 | goto found; | ||
| 760 | } | ||
| 761 | } | ||
| 762 | iter->p = NULL; | ||
| 763 | } else if (iter->p && rhlist) { | ||
| 764 | /* Need to validate that 'list' is still in the table, and | ||
| 765 | * if so, update 'skip' and 'p'. | ||
| 766 | */ | ||
| 767 | struct rhash_head *p; | ||
| 768 | struct rhlist_head *list; | ||
| 769 | int skip = 0; | ||
| 770 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | ||
| 771 | for (list = container_of(p, struct rhlist_head, rhead); | ||
| 772 | list; | ||
| 773 | list = rcu_dereference(list->next)) { | ||
| 774 | skip++; | ||
| 775 | if (list == iter->list) { | ||
| 776 | iter->p = p; | ||
| 777 | skip = skip; | ||
| 778 | goto found; | ||
| 779 | } | ||
| 780 | } | ||
| 781 | } | ||
| 782 | iter->p = NULL; | ||
| 783 | } | ||
| 784 | found: | ||
| 742 | return 0; | 785 | return 0; |
| 743 | } | 786 | } |
| 744 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); | 787 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
| @@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
| 914 | iter->walker.tbl = NULL; | 957 | iter->walker.tbl = NULL; |
| 915 | spin_unlock(&ht->lock); | 958 | spin_unlock(&ht->lock); |
| 916 | 959 | ||
| 917 | iter->p = NULL; | ||
| 918 | |||
| 919 | out: | 960 | out: |
| 920 | rcu_read_unlock(); | 961 | rcu_read_unlock(); |
| 921 | } | 962 | } |
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index e6a9c06ec70c..6fdc6267f4a8 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
| @@ -270,18 +270,33 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) | |||
| 270 | } | 270 | } |
| 271 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); | 271 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); |
| 272 | 272 | ||
| 273 | static unsigned int sbq_calc_wake_batch(unsigned int depth) | 273 | static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, |
| 274 | unsigned int depth) | ||
| 274 | { | 275 | { |
| 275 | unsigned int wake_batch; | 276 | unsigned int wake_batch; |
| 277 | unsigned int shallow_depth; | ||
| 276 | 278 | ||
| 277 | /* | 279 | /* |
| 278 | * For each batch, we wake up one queue. We need to make sure that our | 280 | * For each batch, we wake up one queue. We need to make sure that our |
| 279 | * batch size is small enough that the full depth of the bitmap is | 281 | * batch size is small enough that the full depth of the bitmap, |
| 280 | * enough to wake up all of the queues. | 282 | * potentially limited by a shallow depth, is enough to wake up all of |
| 283 | * the queues. | ||
| 284 | * | ||
| 285 | * Each full word of the bitmap has bits_per_word bits, and there might | ||
| 286 | * be a partial word. There are depth / bits_per_word full words and | ||
| 287 | * depth % bits_per_word bits left over. In bitwise arithmetic: | ||
| 288 | * | ||
| 289 | * bits_per_word = 1 << shift | ||
| 290 | * depth / bits_per_word = depth >> shift | ||
| 291 | * depth % bits_per_word = depth & ((1 << shift) - 1) | ||
| 292 | * | ||
| 293 | * Each word can be limited to sbq->min_shallow_depth bits. | ||
| 281 | */ | 294 | */ |
| 282 | wake_batch = SBQ_WAKE_BATCH; | 295 | shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); |
| 283 | if (wake_batch > depth / SBQ_WAIT_QUEUES) | 296 | depth = ((depth >> sbq->sb.shift) * shallow_depth + |
| 284 | wake_batch = max(1U, depth / SBQ_WAIT_QUEUES); | 297 | min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); |
| 298 | wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, | ||
| 299 | SBQ_WAKE_BATCH); | ||
| 285 | 300 | ||
| 286 | return wake_batch; | 301 | return wake_batch; |
| 287 | } | 302 | } |
| @@ -307,7 +322,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |||
| 307 | *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; | 322 | *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; |
| 308 | } | 323 | } |
| 309 | 324 | ||
| 310 | sbq->wake_batch = sbq_calc_wake_batch(depth); | 325 | sbq->min_shallow_depth = UINT_MAX; |
| 326 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); | ||
| 311 | atomic_set(&sbq->wake_index, 0); | 327 | atomic_set(&sbq->wake_index, 0); |
| 312 | 328 | ||
| 313 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); | 329 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
| @@ -327,21 +343,28 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |||
| 327 | } | 343 | } |
| 328 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | 344 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); |
| 329 | 345 | ||
| 330 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | 346 | static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, |
| 347 | unsigned int depth) | ||
| 331 | { | 348 | { |
| 332 | unsigned int wake_batch = sbq_calc_wake_batch(depth); | 349 | unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); |
| 333 | int i; | 350 | int i; |
| 334 | 351 | ||
| 335 | if (sbq->wake_batch != wake_batch) { | 352 | if (sbq->wake_batch != wake_batch) { |
| 336 | WRITE_ONCE(sbq->wake_batch, wake_batch); | 353 | WRITE_ONCE(sbq->wake_batch, wake_batch); |
| 337 | /* | 354 | /* |
| 338 | * Pairs with the memory barrier in sbq_wake_up() to ensure that | 355 | * Pairs with the memory barrier in sbitmap_queue_wake_up() |
| 339 | * the batch size is updated before the wait counts. | 356 | * to ensure that the batch size is updated before the wait |
| 357 | * counts. | ||
| 340 | */ | 358 | */ |
| 341 | smp_mb__before_atomic(); | 359 | smp_mb__before_atomic(); |
| 342 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) | 360 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
| 343 | atomic_set(&sbq->ws[i].wait_cnt, 1); | 361 | atomic_set(&sbq->ws[i].wait_cnt, 1); |
| 344 | } | 362 | } |
| 363 | } | ||
| 364 | |||
| 365 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | ||
| 366 | { | ||
| 367 | sbitmap_queue_update_wake_batch(sbq, depth); | ||
| 345 | sbitmap_resize(&sbq->sb, depth); | 368 | sbitmap_resize(&sbq->sb, depth); |
| 346 | } | 369 | } |
| 347 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | 370 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); |
| @@ -380,6 +403,8 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | |||
| 380 | unsigned int hint, depth; | 403 | unsigned int hint, depth; |
| 381 | int nr; | 404 | int nr; |
| 382 | 405 | ||
| 406 | WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); | ||
| 407 | |||
| 383 | hint = this_cpu_read(*sbq->alloc_hint); | 408 | hint = this_cpu_read(*sbq->alloc_hint); |
| 384 | depth = READ_ONCE(sbq->sb.depth); | 409 | depth = READ_ONCE(sbq->sb.depth); |
| 385 | if (unlikely(hint >= depth)) { | 410 | if (unlikely(hint >= depth)) { |
| @@ -403,6 +428,14 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | |||
| 403 | } | 428 | } |
| 404 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | 429 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); |
| 405 | 430 | ||
| 431 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, | ||
| 432 | unsigned int min_shallow_depth) | ||
| 433 | { | ||
| 434 | sbq->min_shallow_depth = min_shallow_depth; | ||
| 435 | sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); | ||
| 436 | } | ||
| 437 | EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); | ||
| 438 | |||
| 406 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | 439 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
| 407 | { | 440 | { |
| 408 | int i, wake_index; | 441 | int i, wake_index; |
| @@ -425,52 +458,67 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | |||
| 425 | return NULL; | 458 | return NULL; |
| 426 | } | 459 | } |
| 427 | 460 | ||
| 428 | static void sbq_wake_up(struct sbitmap_queue *sbq) | 461 | static bool __sbq_wake_up(struct sbitmap_queue *sbq) |
| 429 | { | 462 | { |
| 430 | struct sbq_wait_state *ws; | 463 | struct sbq_wait_state *ws; |
| 431 | unsigned int wake_batch; | 464 | unsigned int wake_batch; |
| 432 | int wait_cnt; | 465 | int wait_cnt; |
| 433 | 466 | ||
| 434 | /* | ||
| 435 | * Pairs with the memory barrier in set_current_state() to ensure the | ||
| 436 | * proper ordering of clear_bit()/waitqueue_active() in the waker and | ||
| 437 | * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | ||
| 438 | * waiter. See the comment on waitqueue_active(). This is __after_atomic | ||
| 439 | * because we just did clear_bit_unlock() in the caller. | ||
| 440 | */ | ||
| 441 | smp_mb__after_atomic(); | ||
| 442 | |||
| 443 | ws = sbq_wake_ptr(sbq); | 467 | ws = sbq_wake_ptr(sbq); |
| 444 | if (!ws) | 468 | if (!ws) |
| 445 | return; | 469 | return false; |
| 446 | 470 | ||
| 447 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | 471 | wait_cnt = atomic_dec_return(&ws->wait_cnt); |
| 448 | if (wait_cnt <= 0) { | 472 | if (wait_cnt <= 0) { |
| 473 | int ret; | ||
| 474 | |||
| 449 | wake_batch = READ_ONCE(sbq->wake_batch); | 475 | wake_batch = READ_ONCE(sbq->wake_batch); |
| 476 | |||
| 450 | /* | 477 | /* |
| 451 | * Pairs with the memory barrier in sbitmap_queue_resize() to | 478 | * Pairs with the memory barrier in sbitmap_queue_resize() to |
| 452 | * ensure that we see the batch size update before the wait | 479 | * ensure that we see the batch size update before the wait |
| 453 | * count is reset. | 480 | * count is reset. |
| 454 | */ | 481 | */ |
| 455 | smp_mb__before_atomic(); | 482 | smp_mb__before_atomic(); |
| 483 | |||
| 456 | /* | 484 | /* |
| 457 | * If there are concurrent callers to sbq_wake_up(), the last | 485 | * For concurrent callers of this, the one that failed the |
| 458 | * one to decrement the wait count below zero will bump it back | 486 | * atomic_cmpxhcg() race should call this function again |
| 459 | * up. If there is a concurrent resize, the count reset will | 487 | * to wakeup a new batch on a different 'ws'. |
| 460 | * either cause the cmpxchg to fail or overwrite after the | ||
| 461 | * cmpxchg. | ||
| 462 | */ | 488 | */ |
| 463 | atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch); | 489 | ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); |
| 464 | sbq_index_atomic_inc(&sbq->wake_index); | 490 | if (ret == wait_cnt) { |
| 465 | wake_up_nr(&ws->wait, wake_batch); | 491 | sbq_index_atomic_inc(&sbq->wake_index); |
| 492 | wake_up_nr(&ws->wait, wake_batch); | ||
| 493 | return false; | ||
| 494 | } | ||
| 495 | |||
| 496 | return true; | ||
| 466 | } | 497 | } |
| 498 | |||
| 499 | return false; | ||
| 500 | } | ||
| 501 | |||
| 502 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) | ||
| 503 | { | ||
| 504 | while (__sbq_wake_up(sbq)) | ||
| 505 | ; | ||
| 467 | } | 506 | } |
| 507 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); | ||
| 468 | 508 | ||
| 469 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, | 509 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
| 470 | unsigned int cpu) | 510 | unsigned int cpu) |
| 471 | { | 511 | { |
| 472 | sbitmap_clear_bit_unlock(&sbq->sb, nr); | 512 | sbitmap_clear_bit_unlock(&sbq->sb, nr); |
| 473 | sbq_wake_up(sbq); | 513 | /* |
| 514 | * Pairs with the memory barrier in set_current_state() to ensure the | ||
| 515 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | ||
| 516 | * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | ||
| 517 | * waiter. See the comment on waitqueue_active(). | ||
| 518 | */ | ||
| 519 | smp_mb__after_atomic(); | ||
| 520 | sbitmap_queue_wake_up(sbq); | ||
| 521 | |||
| 474 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) | 522 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) |
| 475 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; | 523 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; |
| 476 | } | 524 | } |
| @@ -482,7 +530,7 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |||
| 482 | 530 | ||
| 483 | /* | 531 | /* |
| 484 | * Pairs with the memory barrier in set_current_state() like in | 532 | * Pairs with the memory barrier in set_current_state() like in |
| 485 | * sbq_wake_up(). | 533 | * sbitmap_queue_wake_up(). |
| 486 | */ | 534 | */ |
| 487 | smp_mb(); | 535 | smp_mb(); |
| 488 | wake_index = atomic_read(&sbq->wake_index); | 536 | wake_index = atomic_read(&sbq->wake_index); |
| @@ -528,5 +576,6 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |||
| 528 | seq_puts(m, "}\n"); | 576 | seq_puts(m, "}\n"); |
| 529 | 577 | ||
| 530 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); | 578 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); |
| 579 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); | ||
| 531 | } | 580 | } |
| 532 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | 581 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cc640588f145..04b68d9dffac 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -593,9 +593,8 @@ found: | |||
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | /* | 595 | /* |
| 596 | * Allocates bounce buffer and returns its kernel virtual address. | 596 | * Allocates bounce buffer and returns its physical address. |
| 597 | */ | 597 | */ |
| 598 | |||
| 599 | static phys_addr_t | 598 | static phys_addr_t |
| 600 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 599 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, |
| 601 | enum dma_data_direction dir, unsigned long attrs) | 600 | enum dma_data_direction dir, unsigned long attrs) |
| @@ -614,7 +613,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, | |||
| 614 | } | 613 | } |
| 615 | 614 | ||
| 616 | /* | 615 | /* |
| 617 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 616 | * tlb_addr is the physical address of the bounce buffer to unmap. |
| 618 | */ | 617 | */ |
| 619 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, | 618 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, |
| 620 | size_t size, enum dma_data_direction dir, | 619 | size_t size, enum dma_data_direction dir, |
| @@ -692,7 +691,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, | |||
| 692 | } | 691 | } |
| 693 | } | 692 | } |
| 694 | 693 | ||
| 695 | #ifdef CONFIG_DMA_DIRECT_OPS | ||
| 696 | static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, | 694 | static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, |
| 697 | size_t size) | 695 | size_t size) |
| 698 | { | 696 | { |
| @@ -727,7 +725,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
| 727 | 725 | ||
| 728 | out_unmap: | 726 | out_unmap: |
| 729 | dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 727 | dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 730 | (unsigned long long)(dev ? dev->coherent_dma_mask : 0), | 728 | (unsigned long long)dev->coherent_dma_mask, |
| 731 | (unsigned long long)*dma_handle); | 729 | (unsigned long long)*dma_handle); |
| 732 | 730 | ||
| 733 | /* | 731 | /* |
| @@ -764,7 +762,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, | |||
| 764 | DMA_ATTR_SKIP_CPU_SYNC); | 762 | DMA_ATTR_SKIP_CPU_SYNC); |
| 765 | return true; | 763 | return true; |
| 766 | } | 764 | } |
| 767 | #endif | ||
| 768 | 765 | ||
| 769 | static void | 766 | static void |
| 770 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, | 767 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
| @@ -1045,7 +1042,6 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) | |||
| 1045 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; | 1042 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; |
| 1046 | } | 1043 | } |
| 1047 | 1044 | ||
| 1048 | #ifdef CONFIG_DMA_DIRECT_OPS | ||
| 1049 | void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | 1045 | void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 1050 | gfp_t gfp, unsigned long attrs) | 1046 | gfp_t gfp, unsigned long attrs) |
| 1051 | { | 1047 | { |
| @@ -1089,4 +1085,3 @@ const struct dma_map_ops swiotlb_dma_ops = { | |||
| 1089 | .unmap_page = swiotlb_unmap_page, | 1085 | .unmap_page = swiotlb_unmap_page, |
| 1090 | .dma_supported = dma_direct_supported, | 1086 | .dma_supported = dma_direct_supported, |
| 1091 | }; | 1087 | }; |
| 1092 | #endif /* CONFIG_DMA_DIRECT_OPS */ | ||
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 8e157806df7a..60aedc879361 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -356,29 +356,22 @@ static int bpf_fill_maxinsns11(struct bpf_test *self) | |||
| 356 | return __bpf_fill_ja(self, BPF_MAXINSNS, 68); | 356 | return __bpf_fill_ja(self, BPF_MAXINSNS, 68); |
| 357 | } | 357 | } |
| 358 | 358 | ||
| 359 | static int bpf_fill_ja(struct bpf_test *self) | 359 | static int bpf_fill_maxinsns12(struct bpf_test *self) |
| 360 | { | ||
| 361 | /* Hits exactly 11 passes on x86_64 JIT. */ | ||
| 362 | return __bpf_fill_ja(self, 12, 9); | ||
| 363 | } | ||
| 364 | |||
| 365 | static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) | ||
| 366 | { | 360 | { |
| 367 | unsigned int len = BPF_MAXINSNS; | 361 | unsigned int len = BPF_MAXINSNS; |
| 368 | struct sock_filter *insn; | 362 | struct sock_filter *insn; |
| 369 | int i; | 363 | int i = 0; |
| 370 | 364 | ||
| 371 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); | 365 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| 372 | if (!insn) | 366 | if (!insn) |
| 373 | return -ENOMEM; | 367 | return -ENOMEM; |
| 374 | 368 | ||
| 375 | for (i = 0; i < len - 1; i += 2) { | 369 | insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); |
| 376 | insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); | ||
| 377 | insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, | ||
| 378 | SKF_AD_OFF + SKF_AD_CPU); | ||
| 379 | } | ||
| 380 | 370 | ||
| 381 | insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); | 371 | for (i = 1; i < len - 1; i++) |
| 372 | insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); | ||
| 373 | |||
| 374 | insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); | ||
| 382 | 375 | ||
| 383 | self->u.ptr.insns = insn; | 376 | self->u.ptr.insns = insn; |
| 384 | self->u.ptr.len = len; | 377 | self->u.ptr.len = len; |
| @@ -386,50 +379,22 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) | |||
| 386 | return 0; | 379 | return 0; |
| 387 | } | 380 | } |
| 388 | 381 | ||
| 389 | #define PUSH_CNT 68 | 382 | static int bpf_fill_maxinsns13(struct bpf_test *self) |
| 390 | /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */ | ||
| 391 | static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) | ||
| 392 | { | 383 | { |
| 393 | unsigned int len = BPF_MAXINSNS; | 384 | unsigned int len = BPF_MAXINSNS; |
| 394 | struct bpf_insn *insn; | 385 | struct sock_filter *insn; |
| 395 | int i = 0, j, k = 0; | 386 | int i = 0; |
| 396 | 387 | ||
| 397 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); | 388 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| 398 | if (!insn) | 389 | if (!insn) |
| 399 | return -ENOMEM; | 390 | return -ENOMEM; |
| 400 | 391 | ||
| 401 | insn[i++] = BPF_MOV64_REG(R6, R1); | 392 | for (i = 0; i < len - 3; i++) |
| 402 | loop: | 393 | insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); |
| 403 | for (j = 0; j < PUSH_CNT; j++) { | ||
| 404 | insn[i++] = BPF_LD_ABS(BPF_B, 0); | ||
| 405 | insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); | ||
| 406 | i++; | ||
| 407 | insn[i++] = BPF_MOV64_REG(R1, R6); | ||
| 408 | insn[i++] = BPF_MOV64_IMM(R2, 1); | ||
| 409 | insn[i++] = BPF_MOV64_IMM(R3, 2); | ||
| 410 | insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
| 411 | bpf_skb_vlan_push_proto.func - __bpf_call_base); | ||
| 412 | insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); | ||
| 413 | i++; | ||
| 414 | } | ||
| 415 | |||
| 416 | for (j = 0; j < PUSH_CNT; j++) { | ||
| 417 | insn[i++] = BPF_LD_ABS(BPF_B, 0); | ||
| 418 | insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); | ||
| 419 | i++; | ||
| 420 | insn[i++] = BPF_MOV64_REG(R1, R6); | ||
| 421 | insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
| 422 | bpf_skb_vlan_pop_proto.func - __bpf_call_base); | ||
| 423 | insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); | ||
| 424 | i++; | ||
| 425 | } | ||
| 426 | if (++k < 5) | ||
| 427 | goto loop; | ||
| 428 | 394 | ||
| 429 | for (; i < len - 1; i++) | 395 | insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab); |
| 430 | insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef); | 396 | insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0); |
| 431 | 397 | insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); | |
| 432 | insn[len - 1] = BPF_EXIT_INSN(); | ||
| 433 | 398 | ||
| 434 | self->u.ptr.insns = insn; | 399 | self->u.ptr.insns = insn; |
| 435 | self->u.ptr.len = len; | 400 | self->u.ptr.len = len; |
| @@ -437,58 +402,29 @@ loop: | |||
| 437 | return 0; | 402 | return 0; |
| 438 | } | 403 | } |
| 439 | 404 | ||
| 440 | static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self) | 405 | static int bpf_fill_ja(struct bpf_test *self) |
| 441 | { | 406 | { |
| 442 | struct bpf_insn *insn; | 407 | /* Hits exactly 11 passes on x86_64 JIT. */ |
| 443 | 408 | return __bpf_fill_ja(self, 12, 9); | |
| 444 | insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL); | ||
| 445 | if (!insn) | ||
| 446 | return -ENOMEM; | ||
| 447 | |||
| 448 | /* Due to func address being non-const, we need to | ||
| 449 | * assemble this here. | ||
| 450 | */ | ||
| 451 | insn[0] = BPF_MOV64_REG(R6, R1); | ||
| 452 | insn[1] = BPF_LD_ABS(BPF_B, 0); | ||
| 453 | insn[2] = BPF_LD_ABS(BPF_H, 0); | ||
| 454 | insn[3] = BPF_LD_ABS(BPF_W, 0); | ||
| 455 | insn[4] = BPF_MOV64_REG(R7, R6); | ||
| 456 | insn[5] = BPF_MOV64_IMM(R6, 0); | ||
| 457 | insn[6] = BPF_MOV64_REG(R1, R7); | ||
| 458 | insn[7] = BPF_MOV64_IMM(R2, 1); | ||
| 459 | insn[8] = BPF_MOV64_IMM(R3, 2); | ||
| 460 | insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
| 461 | bpf_skb_vlan_push_proto.func - __bpf_call_base); | ||
| 462 | insn[10] = BPF_MOV64_REG(R6, R7); | ||
| 463 | insn[11] = BPF_LD_ABS(BPF_B, 0); | ||
| 464 | insn[12] = BPF_LD_ABS(BPF_H, 0); | ||
| 465 | insn[13] = BPF_LD_ABS(BPF_W, 0); | ||
| 466 | insn[14] = BPF_MOV64_IMM(R0, 42); | ||
| 467 | insn[15] = BPF_EXIT_INSN(); | ||
| 468 | |||
| 469 | self->u.ptr.insns = insn; | ||
| 470 | self->u.ptr.len = 16; | ||
| 471 | |||
| 472 | return 0; | ||
| 473 | } | 409 | } |
| 474 | 410 | ||
| 475 | static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) | 411 | static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) |
| 476 | { | 412 | { |
| 477 | unsigned int len = BPF_MAXINSNS; | 413 | unsigned int len = BPF_MAXINSNS; |
| 478 | struct bpf_insn *insn; | 414 | struct sock_filter *insn; |
| 479 | int i = 0; | 415 | int i; |
| 480 | 416 | ||
| 481 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); | 417 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); |
| 482 | if (!insn) | 418 | if (!insn) |
| 483 | return -ENOMEM; | 419 | return -ENOMEM; |
| 484 | 420 | ||
| 485 | insn[i++] = BPF_MOV64_REG(R6, R1); | 421 | for (i = 0; i < len - 1; i += 2) { |
| 486 | insn[i++] = BPF_LD_ABS(BPF_B, 0); | 422 | insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); |
| 487 | insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); | 423 | insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, |
| 488 | i++; | 424 | SKF_AD_OFF + SKF_AD_CPU); |
| 489 | while (i < len - 1) | 425 | } |
| 490 | insn[i++] = BPF_LD_ABS(BPF_B, 1); | 426 | |
| 491 | insn[i] = BPF_EXIT_INSN(); | 427 | insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); |
| 492 | 428 | ||
| 493 | self->u.ptr.insns = insn; | 429 | self->u.ptr.insns = insn; |
| 494 | self->u.ptr.len = len; | 430 | self->u.ptr.len = len; |
| @@ -1988,40 +1924,6 @@ static struct bpf_test tests[] = { | |||
| 1988 | { { 0, -1 } } | 1924 | { { 0, -1 } } |
| 1989 | }, | 1925 | }, |
| 1990 | { | 1926 | { |
| 1991 | "INT: DIV + ABS", | ||
| 1992 | .u.insns_int = { | ||
| 1993 | BPF_ALU64_REG(BPF_MOV, R6, R1), | ||
| 1994 | BPF_LD_ABS(BPF_B, 3), | ||
| 1995 | BPF_ALU64_IMM(BPF_MOV, R2, 2), | ||
| 1996 | BPF_ALU32_REG(BPF_DIV, R0, R2), | ||
| 1997 | BPF_ALU64_REG(BPF_MOV, R8, R0), | ||
| 1998 | BPF_LD_ABS(BPF_B, 4), | ||
| 1999 | BPF_ALU64_REG(BPF_ADD, R8, R0), | ||
| 2000 | BPF_LD_IND(BPF_B, R8, -70), | ||
| 2001 | BPF_EXIT_INSN(), | ||
| 2002 | }, | ||
| 2003 | INTERNAL, | ||
| 2004 | { 10, 20, 30, 40, 50 }, | ||
| 2005 | { { 4, 0 }, { 5, 10 } } | ||
| 2006 | }, | ||
| 2007 | { | ||
| 2008 | /* This one doesn't go through verifier, but is just raw insn | ||
| 2009 | * as opposed to cBPF tests from here. Thus div by 0 tests are | ||
| 2010 | * done in test_verifier in BPF kselftests. | ||
| 2011 | */ | ||
| 2012 | "INT: DIV by -1", | ||
| 2013 | .u.insns_int = { | ||
| 2014 | BPF_ALU64_REG(BPF_MOV, R6, R1), | ||
| 2015 | BPF_ALU64_IMM(BPF_MOV, R7, -1), | ||
| 2016 | BPF_LD_ABS(BPF_B, 3), | ||
| 2017 | BPF_ALU32_REG(BPF_DIV, R0, R7), | ||
| 2018 | BPF_EXIT_INSN(), | ||
| 2019 | }, | ||
| 2020 | INTERNAL, | ||
| 2021 | { 10, 20, 30, 40, 50 }, | ||
| 2022 | { { 3, 0 }, { 4, 0 } } | ||
| 2023 | }, | ||
| 2024 | { | ||
| 2025 | "check: missing ret", | 1927 | "check: missing ret", |
| 2026 | .u.insns = { | 1928 | .u.insns = { |
| 2027 | BPF_STMT(BPF_LD | BPF_IMM, 1), | 1929 | BPF_STMT(BPF_LD | BPF_IMM, 1), |
| @@ -2383,50 +2285,6 @@ static struct bpf_test tests[] = { | |||
| 2383 | { }, | 2285 | { }, |
| 2384 | { { 0, 1 } } | 2286 | { { 0, 1 } } |
| 2385 | }, | 2287 | }, |
| 2386 | { | ||
| 2387 | "nmap reduced", | ||
| 2388 | .u.insns_int = { | ||
| 2389 | BPF_MOV64_REG(R6, R1), | ||
| 2390 | BPF_LD_ABS(BPF_H, 12), | ||
| 2391 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), | ||
| 2392 | BPF_LD_ABS(BPF_H, 12), | ||
| 2393 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), | ||
| 2394 | BPF_MOV32_IMM(R0, 18), | ||
| 2395 | BPF_STX_MEM(BPF_W, R10, R0, -64), | ||
| 2396 | BPF_LDX_MEM(BPF_W, R7, R10, -64), | ||
| 2397 | BPF_LD_IND(BPF_W, R7, 14), | ||
| 2398 | BPF_STX_MEM(BPF_W, R10, R0, -60), | ||
| 2399 | BPF_MOV32_IMM(R0, 280971478), | ||
| 2400 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
| 2401 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
| 2402 | BPF_LDX_MEM(BPF_W, R0, R10, -60), | ||
| 2403 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
| 2404 | BPF_JMP_IMM(BPF_JNE, R0, 0, 15), | ||
| 2405 | BPF_LD_ABS(BPF_H, 12), | ||
| 2406 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), | ||
| 2407 | BPF_MOV32_IMM(R0, 22), | ||
| 2408 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
| 2409 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
| 2410 | BPF_LD_IND(BPF_H, R7, 14), | ||
| 2411 | BPF_STX_MEM(BPF_W, R10, R0, -52), | ||
| 2412 | BPF_MOV32_IMM(R0, 17366), | ||
| 2413 | BPF_STX_MEM(BPF_W, R10, R0, -48), | ||
| 2414 | BPF_LDX_MEM(BPF_W, R7, R10, -48), | ||
| 2415 | BPF_LDX_MEM(BPF_W, R0, R10, -52), | ||
| 2416 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
| 2417 | BPF_JMP_IMM(BPF_JNE, R0, 0, 2), | ||
| 2418 | BPF_MOV32_IMM(R0, 256), | ||
| 2419 | BPF_EXIT_INSN(), | ||
| 2420 | BPF_MOV32_IMM(R0, 0), | ||
| 2421 | BPF_EXIT_INSN(), | ||
| 2422 | }, | ||
| 2423 | INTERNAL, | ||
| 2424 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, | ||
| 2425 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 2426 | 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, | ||
| 2427 | { { 38, 256 } }, | ||
| 2428 | .stack_depth = 64, | ||
| 2429 | }, | ||
| 2430 | /* BPF_ALU | BPF_MOV | BPF_X */ | 2288 | /* BPF_ALU | BPF_MOV | BPF_X */ |
| 2431 | { | 2289 | { |
| 2432 | "ALU_MOV_X: dst = 2", | 2290 | "ALU_MOV_X: dst = 2", |
| @@ -5478,28 +5336,29 @@ static struct bpf_test tests[] = { | |||
| 5478 | .expected_errcode = -ENOTSUPP, | 5336 | .expected_errcode = -ENOTSUPP, |
| 5479 | }, | 5337 | }, |
| 5480 | { | 5338 | { |
| 5481 | "BPF_MAXINSNS: ld_abs+get_processor_id", | 5339 | "BPF_MAXINSNS: jump over MSH", |
| 5482 | { }, | ||
| 5483 | CLASSIC, | ||
| 5484 | { }, | 5340 | { }, |
| 5485 | { { 1, 0xbee } }, | 5341 | CLASSIC | FLAG_EXPECTED_FAIL, |
| 5486 | .fill_helper = bpf_fill_ld_abs_get_processor_id, | 5342 | { 0xfa, 0xfb, 0xfc, 0xfd, }, |
| 5343 | { { 4, 0xabababab } }, | ||
| 5344 | .fill_helper = bpf_fill_maxinsns12, | ||
| 5345 | .expected_errcode = -EINVAL, | ||
| 5487 | }, | 5346 | }, |
| 5488 | { | 5347 | { |
| 5489 | "BPF_MAXINSNS: ld_abs+vlan_push/pop", | 5348 | "BPF_MAXINSNS: exec all MSH", |
| 5490 | { }, | 5349 | { }, |
| 5491 | INTERNAL, | 5350 | CLASSIC, |
| 5492 | { 0x34 }, | 5351 | { 0xfa, 0xfb, 0xfc, 0xfd, }, |
| 5493 | { { ETH_HLEN, 0xbef } }, | 5352 | { { 4, 0xababab83 } }, |
| 5494 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop, | 5353 | .fill_helper = bpf_fill_maxinsns13, |
| 5495 | }, | 5354 | }, |
| 5496 | { | 5355 | { |
| 5497 | "BPF_MAXINSNS: jump around ld_abs", | 5356 | "BPF_MAXINSNS: ld_abs+get_processor_id", |
| 5498 | { }, | 5357 | { }, |
| 5499 | INTERNAL, | 5358 | CLASSIC, |
| 5500 | { 10, 11 }, | 5359 | { }, |
| 5501 | { { 2, 10 } }, | 5360 | { { 1, 0xbee } }, |
| 5502 | .fill_helper = bpf_fill_jump_around_ld_abs, | 5361 | .fill_helper = bpf_fill_ld_abs_get_processor_id, |
| 5503 | }, | 5362 | }, |
| 5504 | /* | 5363 | /* |
| 5505 | * LD_IND / LD_ABS on fragmented SKBs | 5364 | * LD_IND / LD_ABS on fragmented SKBs |
| @@ -5683,6 +5542,53 @@ static struct bpf_test tests[] = { | |||
| 5683 | { {0x40, 0x05 } }, | 5542 | { {0x40, 0x05 } }, |
| 5684 | }, | 5543 | }, |
| 5685 | { | 5544 | { |
| 5545 | "LD_IND byte positive offset, all ff", | ||
| 5546 | .u.insns = { | ||
| 5547 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5548 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), | ||
| 5549 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5550 | }, | ||
| 5551 | CLASSIC, | ||
| 5552 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 5553 | { {0x40, 0xff } }, | ||
| 5554 | }, | ||
| 5555 | { | ||
| 5556 | "LD_IND byte positive offset, out of bounds", | ||
| 5557 | .u.insns = { | ||
| 5558 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5559 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), | ||
| 5560 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5561 | }, | ||
| 5562 | CLASSIC, | ||
| 5563 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5564 | { {0x3f, 0 }, }, | ||
| 5565 | }, | ||
| 5566 | { | ||
| 5567 | "LD_IND byte negative offset, out of bounds", | ||
| 5568 | .u.insns = { | ||
| 5569 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5570 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f), | ||
| 5571 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5572 | }, | ||
| 5573 | CLASSIC, | ||
| 5574 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5575 | { {0x3f, 0 } }, | ||
| 5576 | }, | ||
| 5577 | { | ||
| 5578 | "LD_IND byte negative offset, multiple calls", | ||
| 5579 | .u.insns = { | ||
| 5580 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), | ||
| 5581 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1), | ||
| 5582 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2), | ||
| 5583 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3), | ||
| 5584 | BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4), | ||
| 5585 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5586 | }, | ||
| 5587 | CLASSIC, | ||
| 5588 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5589 | { {0x40, 0x82 }, }, | ||
| 5590 | }, | ||
| 5591 | { | ||
| 5686 | "LD_IND halfword positive offset", | 5592 | "LD_IND halfword positive offset", |
| 5687 | .u.insns = { | 5593 | .u.insns = { |
| 5688 | BPF_STMT(BPF_LDX | BPF_IMM, 0x20), | 5594 | BPF_STMT(BPF_LDX | BPF_IMM, 0x20), |
| @@ -5731,6 +5637,39 @@ static struct bpf_test tests[] = { | |||
| 5731 | { {0x40, 0x66cc } }, | 5637 | { {0x40, 0x66cc } }, |
| 5732 | }, | 5638 | }, |
| 5733 | { | 5639 | { |
| 5640 | "LD_IND halfword positive offset, all ff", | ||
| 5641 | .u.insns = { | ||
| 5642 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3d), | ||
| 5643 | BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), | ||
| 5644 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5645 | }, | ||
| 5646 | CLASSIC, | ||
| 5647 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 5648 | { {0x40, 0xffff } }, | ||
| 5649 | }, | ||
| 5650 | { | ||
| 5651 | "LD_IND halfword positive offset, out of bounds", | ||
| 5652 | .u.insns = { | ||
| 5653 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5654 | BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), | ||
| 5655 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5656 | }, | ||
| 5657 | CLASSIC, | ||
| 5658 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5659 | { {0x3f, 0 }, }, | ||
| 5660 | }, | ||
| 5661 | { | ||
| 5662 | "LD_IND halfword negative offset, out of bounds", | ||
| 5663 | .u.insns = { | ||
| 5664 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5665 | BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f), | ||
| 5666 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5667 | }, | ||
| 5668 | CLASSIC, | ||
| 5669 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5670 | { {0x3f, 0 } }, | ||
| 5671 | }, | ||
| 5672 | { | ||
| 5734 | "LD_IND word positive offset", | 5673 | "LD_IND word positive offset", |
| 5735 | .u.insns = { | 5674 | .u.insns = { |
| 5736 | BPF_STMT(BPF_LDX | BPF_IMM, 0x20), | 5675 | BPF_STMT(BPF_LDX | BPF_IMM, 0x20), |
| @@ -5821,6 +5760,39 @@ static struct bpf_test tests[] = { | |||
| 5821 | { {0x40, 0x66cc77dd } }, | 5760 | { {0x40, 0x66cc77dd } }, |
| 5822 | }, | 5761 | }, |
| 5823 | { | 5762 | { |
| 5763 | "LD_IND word positive offset, all ff", | ||
| 5764 | .u.insns = { | ||
| 5765 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), | ||
| 5766 | BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), | ||
| 5767 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5768 | }, | ||
| 5769 | CLASSIC, | ||
| 5770 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 5771 | { {0x40, 0xffffffff } }, | ||
| 5772 | }, | ||
| 5773 | { | ||
| 5774 | "LD_IND word positive offset, out of bounds", | ||
| 5775 | .u.insns = { | ||
| 5776 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5777 | BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), | ||
| 5778 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5779 | }, | ||
| 5780 | CLASSIC, | ||
| 5781 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5782 | { {0x3f, 0 }, }, | ||
| 5783 | }, | ||
| 5784 | { | ||
| 5785 | "LD_IND word negative offset, out of bounds", | ||
| 5786 | .u.insns = { | ||
| 5787 | BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), | ||
| 5788 | BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f), | ||
| 5789 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5790 | }, | ||
| 5791 | CLASSIC, | ||
| 5792 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5793 | { {0x3f, 0 } }, | ||
| 5794 | }, | ||
| 5795 | { | ||
| 5824 | "LD_ABS byte", | 5796 | "LD_ABS byte", |
| 5825 | .u.insns = { | 5797 | .u.insns = { |
| 5826 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), | 5798 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), |
| @@ -5838,6 +5810,68 @@ static struct bpf_test tests[] = { | |||
| 5838 | { {0x40, 0xcc } }, | 5810 | { {0x40, 0xcc } }, |
| 5839 | }, | 5811 | }, |
| 5840 | { | 5812 | { |
| 5813 | "LD_ABS byte positive offset, all ff", | ||
| 5814 | .u.insns = { | ||
| 5815 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), | ||
| 5816 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5817 | }, | ||
| 5818 | CLASSIC, | ||
| 5819 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 5820 | { {0x40, 0xff } }, | ||
| 5821 | }, | ||
| 5822 | { | ||
| 5823 | "LD_ABS byte positive offset, out of bounds", | ||
| 5824 | .u.insns = { | ||
| 5825 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), | ||
| 5826 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5827 | }, | ||
| 5828 | CLASSIC, | ||
| 5829 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5830 | { {0x3f, 0 }, }, | ||
| 5831 | }, | ||
| 5832 | { | ||
| 5833 | "LD_ABS byte negative offset, out of bounds load", | ||
| 5834 | .u.insns = { | ||
| 5835 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1), | ||
| 5836 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5837 | }, | ||
| 5838 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
| 5839 | .expected_errcode = -EINVAL, | ||
| 5840 | }, | ||
| 5841 | { | ||
| 5842 | "LD_ABS byte negative offset, in bounds", | ||
| 5843 | .u.insns = { | ||
| 5844 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), | ||
| 5845 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5846 | }, | ||
| 5847 | CLASSIC, | ||
| 5848 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5849 | { {0x40, 0x82 }, }, | ||
| 5850 | }, | ||
| 5851 | { | ||
| 5852 | "LD_ABS byte negative offset, out of bounds", | ||
| 5853 | .u.insns = { | ||
| 5854 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), | ||
| 5855 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5856 | }, | ||
| 5857 | CLASSIC, | ||
| 5858 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5859 | { {0x3f, 0 }, }, | ||
| 5860 | }, | ||
| 5861 | { | ||
| 5862 | "LD_ABS byte negative offset, multiple calls", | ||
| 5863 | .u.insns = { | ||
| 5864 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c), | ||
| 5865 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d), | ||
| 5866 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e), | ||
| 5867 | BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), | ||
| 5868 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5869 | }, | ||
| 5870 | CLASSIC, | ||
| 5871 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5872 | { {0x40, 0x82 }, }, | ||
| 5873 | }, | ||
| 5874 | { | ||
| 5841 | "LD_ABS halfword", | 5875 | "LD_ABS halfword", |
| 5842 | .u.insns = { | 5876 | .u.insns = { |
| 5843 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), | 5877 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), |
| @@ -5872,6 +5906,55 @@ static struct bpf_test tests[] = { | |||
| 5872 | { {0x40, 0x99ff } }, | 5906 | { {0x40, 0x99ff } }, |
| 5873 | }, | 5907 | }, |
| 5874 | { | 5908 | { |
| 5909 | "LD_ABS halfword positive offset, all ff", | ||
| 5910 | .u.insns = { | ||
| 5911 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e), | ||
| 5912 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5913 | }, | ||
| 5914 | CLASSIC, | ||
| 5915 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 5916 | { {0x40, 0xffff } }, | ||
| 5917 | }, | ||
| 5918 | { | ||
| 5919 | "LD_ABS halfword positive offset, out of bounds", | ||
| 5920 | .u.insns = { | ||
| 5921 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), | ||
| 5922 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5923 | }, | ||
| 5924 | CLASSIC, | ||
| 5925 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5926 | { {0x3f, 0 }, }, | ||
| 5927 | }, | ||
| 5928 | { | ||
| 5929 | "LD_ABS halfword negative offset, out of bounds load", | ||
| 5930 | .u.insns = { | ||
| 5931 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1), | ||
| 5932 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5933 | }, | ||
| 5934 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
| 5935 | .expected_errcode = -EINVAL, | ||
| 5936 | }, | ||
| 5937 | { | ||
| 5938 | "LD_ABS halfword negative offset, in bounds", | ||
| 5939 | .u.insns = { | ||
| 5940 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), | ||
| 5941 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5942 | }, | ||
| 5943 | CLASSIC, | ||
| 5944 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5945 | { {0x40, 0x1982 }, }, | ||
| 5946 | }, | ||
| 5947 | { | ||
| 5948 | "LD_ABS halfword negative offset, out of bounds", | ||
| 5949 | .u.insns = { | ||
| 5950 | BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), | ||
| 5951 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 5952 | }, | ||
| 5953 | CLASSIC, | ||
| 5954 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 5955 | { {0x3f, 0 }, }, | ||
| 5956 | }, | ||
| 5957 | { | ||
| 5875 | "LD_ABS word", | 5958 | "LD_ABS word", |
| 5876 | .u.insns = { | 5959 | .u.insns = { |
| 5877 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), | 5960 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), |
| @@ -5939,6 +6022,140 @@ static struct bpf_test tests[] = { | |||
| 5939 | }, | 6022 | }, |
| 5940 | { {0x40, 0x88ee99ff } }, | 6023 | { {0x40, 0x88ee99ff } }, |
| 5941 | }, | 6024 | }, |
| 6025 | { | ||
| 6026 | "LD_ABS word positive offset, all ff", | ||
| 6027 | .u.insns = { | ||
| 6028 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c), | ||
| 6029 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6030 | }, | ||
| 6031 | CLASSIC, | ||
| 6032 | { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, | ||
| 6033 | { {0x40, 0xffffffff } }, | ||
| 6034 | }, | ||
| 6035 | { | ||
| 6036 | "LD_ABS word positive offset, out of bounds", | ||
| 6037 | .u.insns = { | ||
| 6038 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f), | ||
| 6039 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6040 | }, | ||
| 6041 | CLASSIC, | ||
| 6042 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6043 | { {0x3f, 0 }, }, | ||
| 6044 | }, | ||
| 6045 | { | ||
| 6046 | "LD_ABS word negative offset, out of bounds load", | ||
| 6047 | .u.insns = { | ||
| 6048 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1), | ||
| 6049 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6050 | }, | ||
| 6051 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
| 6052 | .expected_errcode = -EINVAL, | ||
| 6053 | }, | ||
| 6054 | { | ||
| 6055 | "LD_ABS word negative offset, in bounds", | ||
| 6056 | .u.insns = { | ||
| 6057 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), | ||
| 6058 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6059 | }, | ||
| 6060 | CLASSIC, | ||
| 6061 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6062 | { {0x40, 0x25051982 }, }, | ||
| 6063 | }, | ||
| 6064 | { | ||
| 6065 | "LD_ABS word negative offset, out of bounds", | ||
| 6066 | .u.insns = { | ||
| 6067 | BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), | ||
| 6068 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6069 | }, | ||
| 6070 | CLASSIC, | ||
| 6071 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6072 | { {0x3f, 0 }, }, | ||
| 6073 | }, | ||
| 6074 | { | ||
| 6075 | "LDX_MSH standalone, preserved A", | ||
| 6076 | .u.insns = { | ||
| 6077 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6078 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), | ||
| 6079 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6080 | }, | ||
| 6081 | CLASSIC, | ||
| 6082 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6083 | { {0x40, 0xffeebbaa }, }, | ||
| 6084 | }, | ||
| 6085 | { | ||
| 6086 | "LDX_MSH standalone, preserved A 2", | ||
| 6087 | .u.insns = { | ||
| 6088 | BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63), | ||
| 6089 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), | ||
| 6090 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d), | ||
| 6091 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), | ||
| 6092 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f), | ||
| 6093 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6094 | }, | ||
| 6095 | CLASSIC, | ||
| 6096 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6097 | { {0x40, 0x175e9d63 }, }, | ||
| 6098 | }, | ||
| 6099 | { | ||
| 6100 | "LDX_MSH standalone, test result 1", | ||
| 6101 | .u.insns = { | ||
| 6102 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6103 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), | ||
| 6104 | BPF_STMT(BPF_MISC | BPF_TXA, 0), | ||
| 6105 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6106 | }, | ||
| 6107 | CLASSIC, | ||
| 6108 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6109 | { {0x40, 0x14 }, }, | ||
| 6110 | }, | ||
| 6111 | { | ||
| 6112 | "LDX_MSH standalone, test result 2", | ||
| 6113 | .u.insns = { | ||
| 6114 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6115 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), | ||
| 6116 | BPF_STMT(BPF_MISC | BPF_TXA, 0), | ||
| 6117 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6118 | }, | ||
| 6119 | CLASSIC, | ||
| 6120 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6121 | { {0x40, 0x24 }, }, | ||
| 6122 | }, | ||
| 6123 | { | ||
| 6124 | "LDX_MSH standalone, negative offset", | ||
| 6125 | .u.insns = { | ||
| 6126 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6127 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1), | ||
| 6128 | BPF_STMT(BPF_MISC | BPF_TXA, 0), | ||
| 6129 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6130 | }, | ||
| 6131 | CLASSIC, | ||
| 6132 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6133 | { {0x40, 0 }, }, | ||
| 6134 | }, | ||
| 6135 | { | ||
| 6136 | "LDX_MSH standalone, negative offset 2", | ||
| 6137 | .u.insns = { | ||
| 6138 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6139 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e), | ||
| 6140 | BPF_STMT(BPF_MISC | BPF_TXA, 0), | ||
| 6141 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6142 | }, | ||
| 6143 | CLASSIC, | ||
| 6144 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6145 | { {0x40, 0x24 }, }, | ||
| 6146 | }, | ||
| 6147 | { | ||
| 6148 | "LDX_MSH standalone, out of bounds", | ||
| 6149 | .u.insns = { | ||
| 6150 | BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), | ||
| 6151 | BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40), | ||
| 6152 | BPF_STMT(BPF_MISC | BPF_TXA, 0), | ||
| 6153 | BPF_STMT(BPF_RET | BPF_A, 0x0), | ||
| 6154 | }, | ||
| 6155 | CLASSIC, | ||
| 6156 | { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, | ||
| 6157 | { {0x40, 0 }, }, | ||
| 6158 | }, | ||
| 5942 | /* | 6159 | /* |
| 5943 | * verify that the interpreter or JIT correctly sets A and X | 6160 | * verify that the interpreter or JIT correctly sets A and X |
| 5944 | * to 0. | 6161 | * to 0. |
| @@ -6127,14 +6344,6 @@ static struct bpf_test tests[] = { | |||
| 6127 | {}, | 6344 | {}, |
| 6128 | { {0x1, 0x42 } }, | 6345 | { {0x1, 0x42 } }, |
| 6129 | }, | 6346 | }, |
| 6130 | { | ||
| 6131 | "LD_ABS with helper changing skb data", | ||
| 6132 | { }, | ||
| 6133 | INTERNAL, | ||
| 6134 | { 0x34 }, | ||
| 6135 | { { ETH_HLEN, 42 } }, | ||
| 6136 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop2, | ||
| 6137 | }, | ||
| 6138 | /* Checking interpreter vs JIT wrt signed extended imms. */ | 6347 | /* Checking interpreter vs JIT wrt signed extended imms. */ |
| 6139 | { | 6348 | { |
| 6140 | "JNE signed compare, test 1", | 6349 | "JNE signed compare, test 1", |
diff --git a/lib/test_overflow.c b/lib/test_overflow.c new file mode 100644 index 000000000000..aecbbb217305 --- /dev/null +++ b/lib/test_overflow.c | |||
| @@ -0,0 +1,417 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT | ||
| 2 | /* | ||
| 3 | * Test cases for arithmetic overflow checks. | ||
| 4 | */ | ||
| 5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 6 | |||
| 7 | #include <linux/device.h> | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <linux/mm.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/overflow.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/types.h> | ||
| 15 | #include <linux/vmalloc.h> | ||
| 16 | |||
| 17 | #define DEFINE_TEST_ARRAY(t) \ | ||
| 18 | static const struct test_ ## t { \ | ||
| 19 | t a, b; \ | ||
| 20 | t sum, diff, prod; \ | ||
| 21 | bool s_of, d_of, p_of; \ | ||
| 22 | } t ## _tests[] __initconst | ||
| 23 | |||
| 24 | DEFINE_TEST_ARRAY(u8) = { | ||
| 25 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 26 | {1, 1, 2, 0, 1, false, false, false}, | ||
| 27 | {0, 1, 1, U8_MAX, 0, false, true, false}, | ||
| 28 | {1, 0, 1, 1, 0, false, false, false}, | ||
| 29 | {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, | ||
| 30 | {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, | ||
| 31 | {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, | ||
| 32 | {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, | ||
| 33 | {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, | ||
| 34 | |||
| 35 | {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, | ||
| 36 | {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, | ||
| 37 | |||
| 38 | {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, | ||
| 39 | {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, | ||
| 40 | {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, | ||
| 41 | {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, | ||
| 42 | |||
| 43 | {48, 32, 80, 16, 0, false, false, true}, | ||
| 44 | {128, 128, 0, 0, 0, true, false, true}, | ||
| 45 | {123, 234, 101, 145, 110, true, true, true}, | ||
| 46 | }; | ||
| 47 | DEFINE_TEST_ARRAY(u16) = { | ||
| 48 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 49 | {1, 1, 2, 0, 1, false, false, false}, | ||
| 50 | {0, 1, 1, U16_MAX, 0, false, true, false}, | ||
| 51 | {1, 0, 1, 1, 0, false, false, false}, | ||
| 52 | {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, | ||
| 53 | {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, | ||
| 54 | {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, | ||
| 55 | {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, | ||
| 56 | {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, | ||
| 57 | |||
| 58 | {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, | ||
| 59 | {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, | ||
| 60 | |||
| 61 | {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, | ||
| 62 | {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, | ||
| 63 | {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, | ||
| 64 | {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, | ||
| 65 | |||
| 66 | {123, 234, 357, 65425, 28782, false, true, false}, | ||
| 67 | {1234, 2345, 3579, 64425, 10146, false, true, true}, | ||
| 68 | }; | ||
| 69 | DEFINE_TEST_ARRAY(u32) = { | ||
| 70 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 71 | {1, 1, 2, 0, 1, false, false, false}, | ||
| 72 | {0, 1, 1, U32_MAX, 0, false, true, false}, | ||
| 73 | {1, 0, 1, 1, 0, false, false, false}, | ||
| 74 | {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, | ||
| 75 | {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, | ||
| 76 | {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, | ||
| 77 | {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, | ||
| 78 | {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, | ||
| 79 | |||
| 80 | {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, | ||
| 81 | {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, | ||
| 82 | |||
| 83 | {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, | ||
| 84 | {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, | ||
| 85 | {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, | ||
| 86 | {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, | ||
| 87 | |||
| 88 | {-2U, 1U, -1U, -3U, -2U, false, false, false}, | ||
| 89 | {-4U, 5U, 1U, -9U, -20U, true, false, true}, | ||
| 90 | }; | ||
| 91 | |||
| 92 | DEFINE_TEST_ARRAY(u64) = { | ||
| 93 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 94 | {1, 1, 2, 0, 1, false, false, false}, | ||
| 95 | {0, 1, 1, U64_MAX, 0, false, true, false}, | ||
| 96 | {1, 0, 1, 1, 0, false, false, false}, | ||
| 97 | {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, | ||
| 98 | {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, | ||
| 99 | {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, | ||
| 100 | {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, | ||
| 101 | {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, | ||
| 102 | |||
| 103 | {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, | ||
| 104 | {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, | ||
| 105 | |||
| 106 | {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, | ||
| 107 | {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, | ||
| 108 | {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, | ||
| 109 | {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, | ||
| 110 | {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, | ||
| 111 | 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, | ||
| 112 | false, true, false}, | ||
| 113 | {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, | ||
| 114 | }; | ||
| 115 | |||
| 116 | DEFINE_TEST_ARRAY(s8) = { | ||
| 117 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 118 | |||
| 119 | {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, | ||
| 120 | {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, | ||
| 121 | {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, | ||
| 122 | {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, | ||
| 123 | |||
| 124 | {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, | ||
| 125 | {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, | ||
| 126 | {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, | ||
| 127 | {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, | ||
| 128 | {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, | ||
| 129 | {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, | ||
| 130 | |||
| 131 | {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, | ||
| 132 | {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, | ||
| 133 | {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, | ||
| 134 | {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, | ||
| 135 | |||
| 136 | {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, | ||
| 137 | {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, | ||
| 138 | |||
| 139 | {-4, -32, -36, 28, -128, false, false, true}, | ||
| 140 | {-4, 32, 28, -36, -128, false, false, false}, | ||
| 141 | }; | ||
| 142 | |||
| 143 | DEFINE_TEST_ARRAY(s16) = { | ||
| 144 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 145 | |||
| 146 | {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, | ||
| 147 | {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, | ||
| 148 | {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, | ||
| 149 | {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, | ||
| 150 | |||
| 151 | {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, | ||
| 152 | {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, | ||
| 153 | {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, | ||
| 154 | {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, | ||
| 155 | {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, | ||
| 156 | {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, | ||
| 157 | |||
| 158 | {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, | ||
| 159 | {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, | ||
| 160 | {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, | ||
| 161 | {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, | ||
| 162 | |||
| 163 | {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, | ||
| 164 | {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, | ||
| 165 | }; | ||
| 166 | DEFINE_TEST_ARRAY(s32) = { | ||
| 167 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 168 | |||
| 169 | {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, | ||
| 170 | {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, | ||
| 171 | {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, | ||
| 172 | {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, | ||
| 173 | |||
| 174 | {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, | ||
| 175 | {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, | ||
| 176 | {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, | ||
| 177 | {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, | ||
| 178 | {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, | ||
| 179 | {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, | ||
| 180 | |||
| 181 | {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, | ||
| 182 | {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, | ||
| 183 | {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, | ||
| 184 | {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, | ||
| 185 | |||
| 186 | {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, | ||
| 187 | {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, | ||
| 188 | }; | ||
| 189 | DEFINE_TEST_ARRAY(s64) = { | ||
| 190 | {0, 0, 0, 0, 0, false, false, false}, | ||
| 191 | |||
| 192 | {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, | ||
| 193 | {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, | ||
| 194 | {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, | ||
| 195 | {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, | ||
| 196 | |||
| 197 | {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, | ||
| 198 | {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, | ||
| 199 | {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, | ||
| 200 | {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, | ||
| 201 | {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, | ||
| 202 | {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, | ||
| 203 | |||
| 204 | {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, | ||
| 205 | {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, | ||
| 206 | {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, | ||
| 207 | {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, | ||
| 208 | |||
| 209 | {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, | ||
| 210 | {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, | ||
| 211 | |||
| 212 | {-1, -1, -2, 0, 1, false, false, false}, | ||
| 213 | {-1, -128, -129, 127, 128, false, false, false}, | ||
| 214 | {-128, -1, -129, -127, 128, false, false, false}, | ||
| 215 | {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, | ||
| 216 | }; | ||
| 217 | |||
| 218 | #define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ | ||
| 219 | t _r; \ | ||
| 220 | bool _of; \ | ||
| 221 | \ | ||
| 222 | _of = check_ ## op ## _overflow(a, b, &_r); \ | ||
| 223 | if (_of != of) { \ | ||
| 224 | pr_warn("expected "fmt" "sym" "fmt \ | ||
| 225 | " to%s overflow (type %s)\n", \ | ||
| 226 | a, b, of ? "" : " not", #t); \ | ||
| 227 | err = 1; \ | ||
| 228 | } \ | ||
| 229 | if (_r != r) { \ | ||
| 230 | pr_warn("expected "fmt" "sym" "fmt" == " \ | ||
| 231 | fmt", got "fmt" (type %s)\n", \ | ||
| 232 | a, b, r, _r, #t); \ | ||
| 233 | err = 1; \ | ||
| 234 | } \ | ||
| 235 | } while (0) | ||
| 236 | |||
| 237 | #define DEFINE_TEST_FUNC(t, fmt) \ | ||
| 238 | static int __init do_test_ ## t(const struct test_ ## t *p) \ | ||
| 239 | { \ | ||
| 240 | int err = 0; \ | ||
| 241 | \ | ||
| 242 | check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ | ||
| 243 | check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ | ||
| 244 | check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ | ||
| 245 | check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ | ||
| 246 | check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ | ||
| 247 | \ | ||
| 248 | return err; \ | ||
| 249 | } \ | ||
| 250 | \ | ||
| 251 | static int __init test_ ## t ## _overflow(void) { \ | ||
| 252 | int err = 0; \ | ||
| 253 | unsigned i; \ | ||
| 254 | \ | ||
| 255 | pr_info("%-3s: %zu tests\n", #t, ARRAY_SIZE(t ## _tests)); \ | ||
| 256 | for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ | ||
| 257 | err |= do_test_ ## t(&t ## _tests[i]); \ | ||
| 258 | return err; \ | ||
| 259 | } | ||
| 260 | |||
| 261 | DEFINE_TEST_FUNC(u8, "%d"); | ||
| 262 | DEFINE_TEST_FUNC(s8, "%d"); | ||
| 263 | DEFINE_TEST_FUNC(u16, "%d"); | ||
| 264 | DEFINE_TEST_FUNC(s16, "%d"); | ||
| 265 | DEFINE_TEST_FUNC(u32, "%u"); | ||
| 266 | DEFINE_TEST_FUNC(s32, "%d"); | ||
| 267 | #if BITS_PER_LONG == 64 | ||
| 268 | DEFINE_TEST_FUNC(u64, "%llu"); | ||
| 269 | DEFINE_TEST_FUNC(s64, "%lld"); | ||
| 270 | #endif | ||
| 271 | |||
| 272 | static int __init test_overflow_calculation(void) | ||
| 273 | { | ||
| 274 | int err = 0; | ||
| 275 | |||
| 276 | err |= test_u8_overflow(); | ||
| 277 | err |= test_s8_overflow(); | ||
| 278 | err |= test_u16_overflow(); | ||
| 279 | err |= test_s16_overflow(); | ||
| 280 | err |= test_u32_overflow(); | ||
| 281 | err |= test_s32_overflow(); | ||
| 282 | #if BITS_PER_LONG == 64 | ||
| 283 | err |= test_u64_overflow(); | ||
| 284 | err |= test_s64_overflow(); | ||
| 285 | #endif | ||
| 286 | |||
| 287 | return err; | ||
| 288 | } | ||
| 289 | |||
| 290 | /* | ||
| 291 | * Deal with the various forms of allocator arguments. See comments above | ||
| 292 | * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". | ||
| 293 | */ | ||
| 294 | #define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL) | ||
| 295 | #define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE) | ||
| 296 | #define alloc000(alloc, arg, sz) alloc(sz) | ||
| 297 | #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) | ||
| 298 | #define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL) | ||
| 299 | #define free0(free, arg, ptr) free(ptr) | ||
| 300 | #define free1(free, arg, ptr) free(arg, ptr) | ||
| 301 | |||
| 302 | /* Wrap around to 8K */ | ||
| 303 | #define TEST_SIZE (9 << PAGE_SHIFT) | ||
| 304 | |||
| 305 | #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ | ||
| 306 | static int __init test_ ## func (void *arg) \ | ||
| 307 | { \ | ||
| 308 | volatile size_t a = TEST_SIZE; \ | ||
| 309 | volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ | ||
| 310 | void *ptr; \ | ||
| 311 | \ | ||
| 312 | /* Tiny allocation test. */ \ | ||
| 313 | ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ | ||
| 314 | if (!ptr) { \ | ||
| 315 | pr_warn(#func " failed regular allocation?!\n"); \ | ||
| 316 | return 1; \ | ||
| 317 | } \ | ||
| 318 | free ## want_arg (free_func, arg, ptr); \ | ||
| 319 | \ | ||
| 320 | /* Wrapped allocation test. */ \ | ||
| 321 | ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ | ||
| 322 | a * b); \ | ||
| 323 | if (!ptr) { \ | ||
| 324 | pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \ | ||
| 325 | return 1; \ | ||
| 326 | } \ | ||
| 327 | free ## want_arg (free_func, arg, ptr); \ | ||
| 328 | \ | ||
| 329 | /* Saturated allocation test. */ \ | ||
| 330 | ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ | ||
| 331 | array_size(a, b)); \ | ||
| 332 | if (ptr) { \ | ||
| 333 | pr_warn(#func " missed saturation!\n"); \ | ||
| 334 | free ## want_arg (free_func, arg, ptr); \ | ||
| 335 | return 1; \ | ||
| 336 | } \ | ||
| 337 | pr_info(#func " detected saturation\n"); \ | ||
| 338 | return 0; \ | ||
| 339 | } | ||
| 340 | |||
| 341 | /* | ||
| 342 | * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) | ||
| 343 | * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) | ||
| 344 | * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) | ||
| 345 | * | | | | ||
| 346 | */ | ||
| 347 | DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); | ||
| 348 | DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); | ||
| 349 | DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); | ||
| 350 | DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); | ||
| 351 | DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0); | ||
| 352 | DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1); | ||
| 353 | DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0); | ||
| 354 | DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1); | ||
| 355 | DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); | ||
| 356 | DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); | ||
| 357 | DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); | ||
| 358 | DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); | ||
| 359 | DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); | ||
| 360 | DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); | ||
| 361 | |||
| 362 | static int __init test_overflow_allocation(void) | ||
| 363 | { | ||
| 364 | const char device_name[] = "overflow-test"; | ||
| 365 | struct device *dev; | ||
| 366 | int err = 0; | ||
| 367 | |||
| 368 | /* Create dummy device for devm_kmalloc()-family tests. */ | ||
| 369 | dev = root_device_register(device_name); | ||
| 370 | if (!dev) { | ||
| 371 | pr_warn("Cannot register test device\n"); | ||
| 372 | return 1; | ||
| 373 | } | ||
| 374 | |||
| 375 | err |= test_kmalloc(NULL); | ||
| 376 | err |= test_kmalloc_node(NULL); | ||
| 377 | err |= test_kzalloc(NULL); | ||
| 378 | err |= test_kzalloc_node(NULL); | ||
| 379 | err |= test_kvmalloc(NULL); | ||
| 380 | err |= test_kvmalloc_node(NULL); | ||
| 381 | err |= test_kvzalloc(NULL); | ||
| 382 | err |= test_kvzalloc_node(NULL); | ||
| 383 | err |= test_vmalloc(NULL); | ||
| 384 | err |= test_vmalloc_node(NULL); | ||
| 385 | err |= test_vzalloc(NULL); | ||
| 386 | err |= test_vzalloc_node(NULL); | ||
| 387 | err |= test_devm_kmalloc(dev); | ||
| 388 | err |= test_devm_kzalloc(dev); | ||
| 389 | |||
| 390 | device_unregister(dev); | ||
| 391 | |||
| 392 | return err; | ||
| 393 | } | ||
| 394 | |||
| 395 | static int __init test_module_init(void) | ||
| 396 | { | ||
| 397 | int err = 0; | ||
| 398 | |||
| 399 | err |= test_overflow_calculation(); | ||
| 400 | err |= test_overflow_allocation(); | ||
| 401 | |||
| 402 | if (err) { | ||
| 403 | pr_warn("FAIL!\n"); | ||
| 404 | err = -EINVAL; | ||
| 405 | } else { | ||
| 406 | pr_info("all tests passed\n"); | ||
| 407 | } | ||
| 408 | |||
| 409 | return err; | ||
| 410 | } | ||
| 411 | |||
| 412 | static void __exit test_module_exit(void) | ||
| 413 | { } | ||
| 414 | |||
| 415 | module_init(test_module_init); | ||
| 416 | module_exit(test_module_exit); | ||
| 417 | MODULE_LICENSE("Dual MIT/GPL"); | ||
diff --git a/lib/test_printf.c b/lib/test_printf.c index 71ebfa43ad05..cea592f402ed 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c | |||
| @@ -204,7 +204,7 @@ test_string(void) | |||
| 204 | #if BITS_PER_LONG == 64 | 204 | #if BITS_PER_LONG == 64 |
| 205 | 205 | ||
| 206 | #define PTR_WIDTH 16 | 206 | #define PTR_WIDTH 16 |
| 207 | #define PTR ((void *)0xffff0123456789ab) | 207 | #define PTR ((void *)0xffff0123456789abUL) |
| 208 | #define PTR_STR "ffff0123456789ab" | 208 | #define PTR_STR "ffff0123456789ab" |
| 209 | #define ZEROS "00000000" /* hex 32 zero bits */ | 209 | #define ZEROS "00000000" /* hex 32 zero bits */ |
| 210 | 210 | ||
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c index d7e06b28de38..0a559a42359b 100644 --- a/lib/ucs2_string.c +++ b/lib/ucs2_string.c | |||
| @@ -112,3 +112,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) | |||
| 112 | return j; | 112 | return j; |
| 113 | } | 113 | } |
| 114 | EXPORT_SYMBOL(ucs2_as_utf8); | 114 | EXPORT_SYMBOL(ucs2_as_utf8); |
| 115 | |||
| 116 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 23920c5ff728..a48aaa79d352 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -703,6 +703,22 @@ char *symbol_string(char *buf, char *end, void *ptr, | |||
| 703 | #endif | 703 | #endif |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | static const struct printf_spec default_str_spec = { | ||
| 707 | .field_width = -1, | ||
| 708 | .precision = -1, | ||
| 709 | }; | ||
| 710 | |||
| 711 | static const struct printf_spec default_flag_spec = { | ||
| 712 | .base = 16, | ||
| 713 | .precision = -1, | ||
| 714 | .flags = SPECIAL | SMALL, | ||
| 715 | }; | ||
| 716 | |||
| 717 | static const struct printf_spec default_dec_spec = { | ||
| 718 | .base = 10, | ||
| 719 | .precision = -1, | ||
| 720 | }; | ||
| 721 | |||
| 706 | static noinline_for_stack | 722 | static noinline_for_stack |
| 707 | char *resource_string(char *buf, char *end, struct resource *res, | 723 | char *resource_string(char *buf, char *end, struct resource *res, |
| 708 | struct printf_spec spec, const char *fmt) | 724 | struct printf_spec spec, const char *fmt) |
| @@ -732,21 +748,11 @@ char *resource_string(char *buf, char *end, struct resource *res, | |||
| 732 | .precision = -1, | 748 | .precision = -1, |
| 733 | .flags = SMALL | ZEROPAD, | 749 | .flags = SMALL | ZEROPAD, |
| 734 | }; | 750 | }; |
| 735 | static const struct printf_spec dec_spec = { | ||
| 736 | .base = 10, | ||
| 737 | .precision = -1, | ||
| 738 | .flags = 0, | ||
| 739 | }; | ||
| 740 | static const struct printf_spec str_spec = { | 751 | static const struct printf_spec str_spec = { |
| 741 | .field_width = -1, | 752 | .field_width = -1, |
| 742 | .precision = 10, | 753 | .precision = 10, |
| 743 | .flags = LEFT, | 754 | .flags = LEFT, |
| 744 | }; | 755 | }; |
| 745 | static const struct printf_spec flag_spec = { | ||
| 746 | .base = 16, | ||
| 747 | .precision = -1, | ||
| 748 | .flags = SPECIAL | SMALL, | ||
| 749 | }; | ||
| 750 | 756 | ||
| 751 | /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) | 757 | /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) |
| 752 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ | 758 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ |
| @@ -770,10 +776,10 @@ char *resource_string(char *buf, char *end, struct resource *res, | |||
| 770 | specp = &mem_spec; | 776 | specp = &mem_spec; |
| 771 | } else if (res->flags & IORESOURCE_IRQ) { | 777 | } else if (res->flags & IORESOURCE_IRQ) { |
| 772 | p = string(p, pend, "irq ", str_spec); | 778 | p = string(p, pend, "irq ", str_spec); |
| 773 | specp = &dec_spec; | 779 | specp = &default_dec_spec; |
| 774 | } else if (res->flags & IORESOURCE_DMA) { | 780 | } else if (res->flags & IORESOURCE_DMA) { |
| 775 | p = string(p, pend, "dma ", str_spec); | 781 | p = string(p, pend, "dma ", str_spec); |
| 776 | specp = &dec_spec; | 782 | specp = &default_dec_spec; |
| 777 | } else if (res->flags & IORESOURCE_BUS) { | 783 | } else if (res->flags & IORESOURCE_BUS) { |
| 778 | p = string(p, pend, "bus ", str_spec); | 784 | p = string(p, pend, "bus ", str_spec); |
| 779 | specp = &bus_spec; | 785 | specp = &bus_spec; |
| @@ -803,7 +809,7 @@ char *resource_string(char *buf, char *end, struct resource *res, | |||
| 803 | p = string(p, pend, " disabled", str_spec); | 809 | p = string(p, pend, " disabled", str_spec); |
| 804 | } else { | 810 | } else { |
| 805 | p = string(p, pend, " flags ", str_spec); | 811 | p = string(p, pend, " flags ", str_spec); |
| 806 | p = number(p, pend, res->flags, flag_spec); | 812 | p = number(p, pend, res->flags, default_flag_spec); |
| 807 | } | 813 | } |
| 808 | *p++ = ']'; | 814 | *p++ = ']'; |
| 809 | *p = '\0'; | 815 | *p = '\0'; |
| @@ -913,9 +919,6 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, | |||
| 913 | int cur, rbot, rtop; | 919 | int cur, rbot, rtop; |
| 914 | bool first = true; | 920 | bool first = true; |
| 915 | 921 | ||
| 916 | /* reused to print numbers */ | ||
| 917 | spec = (struct printf_spec){ .base = 10 }; | ||
| 918 | |||
| 919 | rbot = cur = find_first_bit(bitmap, nr_bits); | 922 | rbot = cur = find_first_bit(bitmap, nr_bits); |
| 920 | while (cur < nr_bits) { | 923 | while (cur < nr_bits) { |
| 921 | rtop = cur; | 924 | rtop = cur; |
| @@ -930,13 +933,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, | |||
| 930 | } | 933 | } |
| 931 | first = false; | 934 | first = false; |
| 932 | 935 | ||
| 933 | buf = number(buf, end, rbot, spec); | 936 | buf = number(buf, end, rbot, default_dec_spec); |
| 934 | if (rbot < rtop) { | 937 | if (rbot < rtop) { |
| 935 | if (buf < end) | 938 | if (buf < end) |
| 936 | *buf = '-'; | 939 | *buf = '-'; |
| 937 | buf++; | 940 | buf++; |
| 938 | 941 | ||
| 939 | buf = number(buf, end, rtop, spec); | 942 | buf = number(buf, end, rtop, default_dec_spec); |
| 940 | } | 943 | } |
| 941 | 944 | ||
| 942 | rbot = cur; | 945 | rbot = cur; |
| @@ -1354,11 +1357,9 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
| 1354 | return string(buf, end, uuid, spec); | 1357 | return string(buf, end, uuid, spec); |
| 1355 | } | 1358 | } |
| 1356 | 1359 | ||
| 1357 | int kptr_restrict __read_mostly; | ||
| 1358 | |||
| 1359 | static noinline_for_stack | 1360 | static noinline_for_stack |
| 1360 | char *restricted_pointer(char *buf, char *end, const void *ptr, | 1361 | char *pointer_string(char *buf, char *end, const void *ptr, |
| 1361 | struct printf_spec spec) | 1362 | struct printf_spec spec) |
| 1362 | { | 1363 | { |
| 1363 | spec.base = 16; | 1364 | spec.base = 16; |
| 1364 | spec.flags |= SMALL; | 1365 | spec.flags |= SMALL; |
| @@ -1367,6 +1368,15 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, | |||
| 1367 | spec.flags |= ZEROPAD; | 1368 | spec.flags |= ZEROPAD; |
| 1368 | } | 1369 | } |
| 1369 | 1370 | ||
| 1371 | return number(buf, end, (unsigned long int)ptr, spec); | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | int kptr_restrict __read_mostly; | ||
| 1375 | |||
| 1376 | static noinline_for_stack | ||
| 1377 | char *restricted_pointer(char *buf, char *end, const void *ptr, | ||
| 1378 | struct printf_spec spec) | ||
| 1379 | { | ||
| 1370 | switch (kptr_restrict) { | 1380 | switch (kptr_restrict) { |
| 1371 | case 0: | 1381 | case 0: |
| 1372 | /* Always print %pK values */ | 1382 | /* Always print %pK values */ |
| @@ -1378,8 +1388,11 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, | |||
| 1378 | * kptr_restrict==1 cannot be used in IRQ context | 1388 | * kptr_restrict==1 cannot be used in IRQ context |
| 1379 | * because its test for CAP_SYSLOG would be meaningless. | 1389 | * because its test for CAP_SYSLOG would be meaningless. |
| 1380 | */ | 1390 | */ |
| 1381 | if (in_irq() || in_serving_softirq() || in_nmi()) | 1391 | if (in_irq() || in_serving_softirq() || in_nmi()) { |
| 1392 | if (spec.field_width == -1) | ||
| 1393 | spec.field_width = 2 * sizeof(ptr); | ||
| 1382 | return string(buf, end, "pK-error", spec); | 1394 | return string(buf, end, "pK-error", spec); |
| 1395 | } | ||
| 1383 | 1396 | ||
| 1384 | /* | 1397 | /* |
| 1385 | * Only print the real pointer value if the current | 1398 | * Only print the real pointer value if the current |
| @@ -1404,7 +1417,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, | |||
| 1404 | break; | 1417 | break; |
| 1405 | } | 1418 | } |
| 1406 | 1419 | ||
| 1407 | return number(buf, end, (unsigned long)ptr, spec); | 1420 | return pointer_string(buf, end, ptr, spec); |
| 1408 | } | 1421 | } |
| 1409 | 1422 | ||
| 1410 | static noinline_for_stack | 1423 | static noinline_for_stack |
| @@ -1456,9 +1469,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, | |||
| 1456 | return string(buf, end, NULL, spec); | 1469 | return string(buf, end, NULL, spec); |
| 1457 | 1470 | ||
| 1458 | switch (fmt[1]) { | 1471 | switch (fmt[1]) { |
| 1459 | case 'r': | ||
| 1460 | return number(buf, end, clk_get_rate(clk), spec); | ||
| 1461 | |||
| 1462 | case 'n': | 1472 | case 'n': |
| 1463 | default: | 1473 | default: |
| 1464 | #ifdef CONFIG_COMMON_CLK | 1474 | #ifdef CONFIG_COMMON_CLK |
| @@ -1474,23 +1484,13 @@ char *format_flags(char *buf, char *end, unsigned long flags, | |||
| 1474 | const struct trace_print_flags *names) | 1484 | const struct trace_print_flags *names) |
| 1475 | { | 1485 | { |
| 1476 | unsigned long mask; | 1486 | unsigned long mask; |
| 1477 | const struct printf_spec strspec = { | ||
| 1478 | .field_width = -1, | ||
| 1479 | .precision = -1, | ||
| 1480 | }; | ||
| 1481 | const struct printf_spec numspec = { | ||
| 1482 | .flags = SPECIAL|SMALL, | ||
| 1483 | .field_width = -1, | ||
| 1484 | .precision = -1, | ||
| 1485 | .base = 16, | ||
| 1486 | }; | ||
| 1487 | 1487 | ||
| 1488 | for ( ; flags && names->name; names++) { | 1488 | for ( ; flags && names->name; names++) { |
| 1489 | mask = names->mask; | 1489 | mask = names->mask; |
| 1490 | if ((flags & mask) != mask) | 1490 | if ((flags & mask) != mask) |
| 1491 | continue; | 1491 | continue; |
| 1492 | 1492 | ||
| 1493 | buf = string(buf, end, names->name, strspec); | 1493 | buf = string(buf, end, names->name, default_str_spec); |
| 1494 | 1494 | ||
| 1495 | flags &= ~mask; | 1495 | flags &= ~mask; |
| 1496 | if (flags) { | 1496 | if (flags) { |
| @@ -1501,7 +1501,7 @@ char *format_flags(char *buf, char *end, unsigned long flags, | |||
| 1501 | } | 1501 | } |
| 1502 | 1502 | ||
| 1503 | if (flags) | 1503 | if (flags) |
| 1504 | buf = number(buf, end, flags, numspec); | 1504 | buf = number(buf, end, flags, default_flag_spec); |
| 1505 | 1505 | ||
| 1506 | return buf; | 1506 | return buf; |
| 1507 | } | 1507 | } |
| @@ -1548,22 +1548,18 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e | |||
| 1548 | { | 1548 | { |
| 1549 | int depth; | 1549 | int depth; |
| 1550 | const struct device_node *parent = np->parent; | 1550 | const struct device_node *parent = np->parent; |
| 1551 | static const struct printf_spec strspec = { | ||
| 1552 | .field_width = -1, | ||
| 1553 | .precision = -1, | ||
| 1554 | }; | ||
| 1555 | 1551 | ||
| 1556 | /* special case for root node */ | 1552 | /* special case for root node */ |
| 1557 | if (!parent) | 1553 | if (!parent) |
| 1558 | return string(buf, end, "/", strspec); | 1554 | return string(buf, end, "/", default_str_spec); |
| 1559 | 1555 | ||
| 1560 | for (depth = 0; parent->parent; depth++) | 1556 | for (depth = 0; parent->parent; depth++) |
| 1561 | parent = parent->parent; | 1557 | parent = parent->parent; |
| 1562 | 1558 | ||
| 1563 | for ( ; depth >= 0; depth--) { | 1559 | for ( ; depth >= 0; depth--) { |
| 1564 | buf = string(buf, end, "/", strspec); | 1560 | buf = string(buf, end, "/", default_str_spec); |
| 1565 | buf = string(buf, end, device_node_name_for_depth(np, depth), | 1561 | buf = string(buf, end, device_node_name_for_depth(np, depth), |
| 1566 | strspec); | 1562 | default_str_spec); |
| 1567 | } | 1563 | } |
| 1568 | return buf; | 1564 | return buf; |
| 1569 | } | 1565 | } |
| @@ -1655,20 +1651,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, | |||
| 1655 | return widen_string(buf, buf - buf_start, end, spec); | 1651 | return widen_string(buf, buf - buf_start, end, spec); |
| 1656 | } | 1652 | } |
| 1657 | 1653 | ||
| 1658 | static noinline_for_stack | ||
| 1659 | char *pointer_string(char *buf, char *end, const void *ptr, | ||
| 1660 | struct printf_spec spec) | ||
| 1661 | { | ||
| 1662 | spec.base = 16; | ||
| 1663 | spec.flags |= SMALL; | ||
| 1664 | if (spec.field_width == -1) { | ||
| 1665 | spec.field_width = 2 * sizeof(ptr); | ||
| 1666 | spec.flags |= ZEROPAD; | ||
| 1667 | } | ||
| 1668 | |||
| 1669 | return number(buf, end, (unsigned long int)ptr, spec); | ||
| 1670 | } | ||
| 1671 | |||
| 1672 | static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); | 1654 | static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); |
| 1673 | static siphash_key_t ptr_key __read_mostly; | 1655 | static siphash_key_t ptr_key __read_mostly; |
| 1674 | 1656 | ||
| @@ -1710,13 +1692,13 @@ early_initcall(initialize_ptr_random); | |||
| 1710 | /* Maps a pointer to a 32 bit unique identifier. */ | 1692 | /* Maps a pointer to a 32 bit unique identifier. */ |
| 1711 | static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | 1693 | static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) |
| 1712 | { | 1694 | { |
| 1695 | const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)"; | ||
| 1713 | unsigned long hashval; | 1696 | unsigned long hashval; |
| 1714 | const int default_width = 2 * sizeof(ptr); | ||
| 1715 | 1697 | ||
| 1716 | if (static_branch_unlikely(¬_filled_random_ptr_key)) { | 1698 | if (static_branch_unlikely(¬_filled_random_ptr_key)) { |
| 1717 | spec.field_width = default_width; | 1699 | spec.field_width = 2 * sizeof(ptr); |
| 1718 | /* string length must be less than default_width */ | 1700 | /* string length must be less than default_width */ |
| 1719 | return string(buf, end, "(ptrval)", spec); | 1701 | return string(buf, end, str, spec); |
| 1720 | } | 1702 | } |
| 1721 | 1703 | ||
| 1722 | #ifdef CONFIG_64BIT | 1704 | #ifdef CONFIG_64BIT |
| @@ -1729,15 +1711,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
| 1729 | #else | 1711 | #else |
| 1730 | hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key); | 1712 | hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key); |
| 1731 | #endif | 1713 | #endif |
| 1732 | 1714 | return pointer_string(buf, end, (const void *)hashval, spec); | |
| 1733 | spec.flags |= SMALL; | ||
| 1734 | if (spec.field_width == -1) { | ||
| 1735 | spec.field_width = default_width; | ||
| 1736 | spec.flags |= ZEROPAD; | ||
| 1737 | } | ||
| 1738 | spec.base = 16; | ||
| 1739 | |||
| 1740 | return number(buf, end, hashval, spec); | ||
| 1741 | } | 1715 | } |
| 1742 | 1716 | ||
| 1743 | /* | 1717 | /* |
| @@ -1750,10 +1724,10 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
| 1750 | * | 1724 | * |
| 1751 | * Right now we handle: | 1725 | * Right now we handle: |
| 1752 | * | 1726 | * |
| 1753 | * - 'F' For symbolic function descriptor pointers with offset | 1727 | * - 'S' For symbolic direct pointers (or function descriptors) with offset |
| 1754 | * - 'f' For simple symbolic function names without offset | 1728 | * - 's' For symbolic direct pointers (or function descriptors) without offset |
| 1755 | * - 'S' For symbolic direct pointers with offset | 1729 | * - 'F' Same as 'S' |
| 1756 | * - 's' For symbolic direct pointers without offset | 1730 | * - 'f' Same as 's' |
| 1757 | * - '[FfSs]R' as above with __builtin_extract_return_addr() translation | 1731 | * - '[FfSs]R' as above with __builtin_extract_return_addr() translation |
| 1758 | * - 'B' For backtraced symbolic direct pointers with offset | 1732 | * - 'B' For backtraced symbolic direct pointers with offset |
| 1759 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] | 1733 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] |
| @@ -1850,10 +1824,6 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
| 1850 | * ** When making changes please also update: | 1824 | * ** When making changes please also update: |
| 1851 | * Documentation/core-api/printk-formats.rst | 1825 | * Documentation/core-api/printk-formats.rst |
| 1852 | * | 1826 | * |
| 1853 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | ||
| 1854 | * function pointers are really function descriptors, which contain a | ||
| 1855 | * pointer to the real address. | ||
| 1856 | * | ||
| 1857 | * Note: The default behaviour (unadorned %p) is to hash the address, | 1827 | * Note: The default behaviour (unadorned %p) is to hash the address, |
| 1858 | * rendering it useful as a unique identifier. | 1828 | * rendering it useful as a unique identifier. |
| 1859 | */ | 1829 | */ |
| @@ -2129,6 +2099,7 @@ qualifier: | |||
| 2129 | 2099 | ||
| 2130 | case 'x': | 2100 | case 'x': |
| 2131 | spec->flags |= SMALL; | 2101 | spec->flags |= SMALL; |
| 2102 | /* fall through */ | ||
| 2132 | 2103 | ||
| 2133 | case 'X': | 2104 | case 'X': |
| 2134 | spec->base = 16; | 2105 | spec->base = 16; |
| @@ -3087,8 +3058,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
| 3087 | break; | 3058 | break; |
| 3088 | case 'i': | 3059 | case 'i': |
| 3089 | base = 0; | 3060 | base = 0; |
| 3061 | /* fall through */ | ||
| 3090 | case 'd': | 3062 | case 'd': |
| 3091 | is_sign = true; | 3063 | is_sign = true; |
| 3064 | /* fall through */ | ||
| 3092 | case 'u': | 3065 | case 'u': |
| 3093 | break; | 3066 | break; |
| 3094 | case '%': | 3067 | case '%': |
