diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 14 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 20 | ||||
| -rw-r--r-- | lib/Makefile | 6 | ||||
| -rw-r--r-- | lib/crc32.c | 456 | ||||
| -rw-r--r-- | lib/debugobjects.c | 2 | ||||
| -rw-r--r-- | lib/digsig.c | 2 | ||||
| -rw-r--r-- | lib/genalloc.c | 28 | ||||
| -rw-r--r-- | lib/kfifo.c | 4 | ||||
| -rw-r--r-- | lib/kobject.c | 90 | ||||
| -rw-r--r-- | lib/llist.c | 22 | ||||
| -rw-r--r-- | lib/locking-selftest.c | 2 | ||||
| -rw-r--r-- | lib/lockref.c | 3 | ||||
| -rw-r--r-- | lib/percpu-rwsem.c | 165 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 15 | ||||
| -rw-r--r-- | lib/percpu_ida.c | 89 | ||||
| -rw-r--r-- | lib/percpu_test.c | 138 | ||||
| -rw-r--r-- | lib/random32.c | 311 | ||||
| -rw-r--r-- | lib/rwsem-spinlock.c | 296 | ||||
| -rw-r--r-- | lib/rwsem.c | 293 | ||||
| -rw-r--r-- | lib/scatterlist.c | 3 | ||||
| -rw-r--r-- | lib/show_mem.c | 39 | ||||
| -rw-r--r-- | lib/smp_processor_id.c | 3 | ||||
| -rw-r--r-- | lib/spinlock_debug.c | 302 | ||||
| -rw-r--r-- | lib/swiotlb.c | 6 | ||||
| -rw-r--r-- | lib/vsprintf.c | 55 |
25 files changed, 984 insertions, 1380 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index b3c8be0da17f..06dc74200a51 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -51,13 +51,6 @@ config PERCPU_RWSEM | |||
| 51 | config ARCH_USE_CMPXCHG_LOCKREF | 51 | config ARCH_USE_CMPXCHG_LOCKREF |
| 52 | bool | 52 | bool |
| 53 | 53 | ||
| 54 | config CMPXCHG_LOCKREF | ||
| 55 | def_bool y if ARCH_USE_CMPXCHG_LOCKREF | ||
| 56 | depends on SMP | ||
| 57 | depends on !GENERIC_LOCKBREAK | ||
| 58 | depends on !DEBUG_SPINLOCK | ||
| 59 | depends on !DEBUG_LOCK_ALLOC | ||
| 60 | |||
| 61 | config CRC_CCITT | 54 | config CRC_CCITT |
| 62 | tristate "CRC-CCITT functions" | 55 | tristate "CRC-CCITT functions" |
| 63 | help | 56 | help |
| @@ -189,6 +182,13 @@ config AUDIT_GENERIC | |||
| 189 | depends on AUDIT && !AUDIT_ARCH | 182 | depends on AUDIT && !AUDIT_ARCH |
| 190 | default y | 183 | default y |
| 191 | 184 | ||
| 185 | config RANDOM32_SELFTEST | ||
| 186 | bool "PRNG perform self test on init" | ||
| 187 | default n | ||
| 188 | help | ||
| 189 | This option enables the 32 bit PRNG library functions to perform a | ||
| 190 | self test on initialization. | ||
| 191 | |||
| 192 | # | 192 | # |
| 193 | # compression support is select'ed if needed | 193 | # compression support is select'ed if needed |
| 194 | # | 194 | # |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 06344d986eb9..db25707aa41b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -312,6 +312,15 @@ config MAGIC_SYSRQ | |||
| 312 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y | 312 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y |
| 313 | unless you really know what this hack does. | 313 | unless you really know what this hack does. |
| 314 | 314 | ||
| 315 | config MAGIC_SYSRQ_DEFAULT_ENABLE | ||
| 316 | hex "Enable magic SysRq key functions by default" | ||
| 317 | depends on MAGIC_SYSRQ | ||
| 318 | default 0x1 | ||
| 319 | help | ||
| 320 | Specifies which SysRq key functions are enabled by default. | ||
| 321 | This may be set to 1 or 0 to enable or disable them all, or | ||
| 322 | to a bitmask as described in Documentation/sysrq.txt. | ||
| 323 | |||
| 315 | config DEBUG_KERNEL | 324 | config DEBUG_KERNEL |
| 316 | bool "Kernel debugging" | 325 | bool "Kernel debugging" |
| 317 | help | 326 | help |
| @@ -983,7 +992,7 @@ config DEBUG_KOBJECT | |||
| 983 | 992 | ||
| 984 | config DEBUG_KOBJECT_RELEASE | 993 | config DEBUG_KOBJECT_RELEASE |
| 985 | bool "kobject release debugging" | 994 | bool "kobject release debugging" |
| 986 | depends on DEBUG_KERNEL | 995 | depends on DEBUG_OBJECTS_TIMERS |
| 987 | help | 996 | help |
| 988 | kobjects are reference counted objects. This means that their | 997 | kobjects are reference counted objects. This means that their |
| 989 | last reference count put is not predictable, and the kobject can | 998 | last reference count put is not predictable, and the kobject can |
| @@ -1472,6 +1481,15 @@ config INTERVAL_TREE_TEST | |||
| 1472 | help | 1481 | help |
| 1473 | A benchmark measuring the performance of the interval tree library | 1482 | A benchmark measuring the performance of the interval tree library |
| 1474 | 1483 | ||
| 1484 | config PERCPU_TEST | ||
| 1485 | tristate "Per cpu operations test" | ||
| 1486 | depends on m && DEBUG_KERNEL | ||
| 1487 | help | ||
| 1488 | Enable this option to build test module which validates per-cpu | ||
| 1489 | operations. | ||
| 1490 | |||
| 1491 | If unsure, say N. | ||
| 1492 | |||
| 1475 | config ATOMIC64_SELFTEST | 1493 | config ATOMIC64_SELFTEST |
| 1476 | bool "Perform an atomic64_t self-test at boot" | 1494 | bool "Perform an atomic64_t self-test at boot" |
| 1477 | help | 1495 | help |
diff --git a/lib/Makefile b/lib/Makefile index f3bb2cb98adf..d480a8c92385 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -42,10 +42,6 @@ obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o | |||
| 42 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | 42 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o |
| 43 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | 43 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o |
| 44 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 44 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
| 45 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | ||
| 46 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | ||
| 47 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | ||
| 48 | lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o | ||
| 49 | 45 | ||
| 50 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | 46 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) |
| 51 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 47 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
| @@ -157,6 +153,8 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o | |||
| 157 | 153 | ||
| 158 | interval_tree_test-objs := interval_tree_test_main.o interval_tree.o | 154 | interval_tree_test-objs := interval_tree_test_main.o interval_tree.o |
| 159 | 155 | ||
| 156 | obj-$(CONFIG_PERCPU_TEST) += percpu_test.o | ||
| 157 | |||
| 160 | obj-$(CONFIG_ASN1) += asn1_decoder.o | 158 | obj-$(CONFIG_ASN1) += asn1_decoder.o |
| 161 | 159 | ||
| 162 | obj-$(CONFIG_FONT_SUPPORT) += fonts/ | 160 | obj-$(CONFIG_FONT_SUPPORT) += fonts/ |
diff --git a/lib/crc32.c b/lib/crc32.c index 410093dbe51c..70f00ca5ef1e 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/crc32.h> | 29 | #include <linux/crc32.h> |
| 30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
| 32 | #include <linux/sched.h> | ||
| 32 | #include "crc32defs.h" | 33 | #include "crc32defs.h" |
| 33 | 34 | ||
| 34 | #if CRC_LE_BITS > 8 | 35 | #if CRC_LE_BITS > 8 |
| @@ -49,6 +50,30 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | |||
| 49 | MODULE_DESCRIPTION("Various CRC32 calculations"); | 50 | MODULE_DESCRIPTION("Various CRC32 calculations"); |
| 50 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
| 51 | 52 | ||
| 53 | #define GF2_DIM 32 | ||
| 54 | |||
| 55 | static u32 gf2_matrix_times(u32 *mat, u32 vec) | ||
| 56 | { | ||
| 57 | u32 sum = 0; | ||
| 58 | |||
| 59 | while (vec) { | ||
| 60 | if (vec & 1) | ||
| 61 | sum ^= *mat; | ||
| 62 | vec >>= 1; | ||
| 63 | mat++; | ||
| 64 | } | ||
| 65 | |||
| 66 | return sum; | ||
| 67 | } | ||
| 68 | |||
| 69 | static void gf2_matrix_square(u32 *square, u32 *mat) | ||
| 70 | { | ||
| 71 | int i; | ||
| 72 | |||
| 73 | for (i = 0; i < GF2_DIM; i++) | ||
| 74 | square[i] = gf2_matrix_times(mat, mat[i]); | ||
| 75 | } | ||
| 76 | |||
| 52 | #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 | 77 | #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 |
| 53 | 78 | ||
| 54 | /* implements slicing-by-4 or slicing-by-8 algorithm */ | 79 | /* implements slicing-by-4 or slicing-by-8 algorithm */ |
| @@ -130,6 +155,52 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | |||
| 130 | } | 155 | } |
| 131 | #endif | 156 | #endif |
| 132 | 157 | ||
| 158 | /* For conditions of distribution and use, see copyright notice in zlib.h */ | ||
| 159 | static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2, | ||
| 160 | u32 polynomial) | ||
| 161 | { | ||
| 162 | u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */ | ||
| 163 | u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */ | ||
| 164 | u32 row; | ||
| 165 | int i; | ||
| 166 | |||
| 167 | if (len2 <= 0) | ||
| 168 | return crc1; | ||
| 169 | |||
| 170 | /* Put operator for one zero bit in odd */ | ||
| 171 | odd[0] = polynomial; | ||
| 172 | row = 1; | ||
| 173 | for (i = 1; i < GF2_DIM; i++) { | ||
| 174 | odd[i] = row; | ||
| 175 | row <<= 1; | ||
| 176 | } | ||
| 177 | |||
| 178 | gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */ | ||
| 179 | gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */ | ||
| 180 | |||
| 181 | /* Apply len2 zeros to crc1 (first square will put the operator for one | ||
| 182 | * zero byte, eight zero bits, in even). | ||
| 183 | */ | ||
| 184 | do { | ||
| 185 | /* Apply zeros operator for this bit of len2 */ | ||
| 186 | gf2_matrix_square(even, odd); | ||
| 187 | if (len2 & 1) | ||
| 188 | crc1 = gf2_matrix_times(even, crc1); | ||
| 189 | len2 >>= 1; | ||
| 190 | /* If no more bits set, then done */ | ||
| 191 | if (len2 == 0) | ||
| 192 | break; | ||
| 193 | /* Another iteration of the loop with odd and even swapped */ | ||
| 194 | gf2_matrix_square(odd, even); | ||
| 195 | if (len2 & 1) | ||
| 196 | crc1 = gf2_matrix_times(odd, crc1); | ||
| 197 | len2 >>= 1; | ||
| 198 | } while (len2 != 0); | ||
| 199 | |||
| 200 | crc1 ^= crc2; | ||
| 201 | return crc1; | ||
| 202 | } | ||
| 203 | |||
| 133 | /** | 204 | /** |
| 134 | * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II | 205 | * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II |
| 135 | * CRC32/CRC32C | 206 | * CRC32/CRC32C |
| @@ -200,8 +271,19 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) | |||
| 200 | (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); | 271 | (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); |
| 201 | } | 272 | } |
| 202 | #endif | 273 | #endif |
| 274 | u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2) | ||
| 275 | { | ||
| 276 | return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE); | ||
| 277 | } | ||
| 278 | |||
| 279 | u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) | ||
| 280 | { | ||
| 281 | return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE); | ||
| 282 | } | ||
| 203 | EXPORT_SYMBOL(crc32_le); | 283 | EXPORT_SYMBOL(crc32_le); |
| 284 | EXPORT_SYMBOL(crc32_le_combine); | ||
| 204 | EXPORT_SYMBOL(__crc32c_le); | 285 | EXPORT_SYMBOL(__crc32c_le); |
| 286 | EXPORT_SYMBOL(__crc32c_le_combine); | ||
| 205 | 287 | ||
| 206 | /** | 288 | /** |
| 207 | * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 | 289 | * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 |
| @@ -795,206 +877,106 @@ static struct crc_test { | |||
| 795 | u32 crc32c_le; /* expected crc32c_le result */ | 877 | u32 crc32c_le; /* expected crc32c_le result */ |
| 796 | } test[] = | 878 | } test[] = |
| 797 | { | 879 | { |
| 798 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, | 880 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, |
| 799 | 0xf6e93d6c}, | 881 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, |
| 800 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, | 882 | {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8}, |
| 801 | 0x0fe92aca}, | 883 | {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a}, |
| 802 | {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, | 884 | {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152}, |
| 803 | 0x52e1ebb8}, | 885 | {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7}, |
| 804 | {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, | 886 | {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc}, |
| 805 | 0x0798af9a}, | 887 | {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2}, |
| 806 | {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, | 888 | {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d}, |
| 807 | 0x18eb3152}, | 889 | {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5}, |
| 808 | {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, | 890 | {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f}, |
| 809 | 0xd00d08c7}, | 891 | {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a}, |
| 810 | {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, | 892 | {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8}, |
| 811 | 0x8ba966bc}, | 893 | {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa}, |
| 812 | {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, | 894 | {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801}, |
| 813 | 0x11d694a2}, | 895 | {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597}, |
| 814 | {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, | 896 | {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b}, |
| 815 | 0x6ab3208d}, | 897 | {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a}, |
| 816 | {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, | 898 | {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d}, |
| 817 | 0xba4603c5}, | 899 | {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982}, |
| 818 | {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, | 900 | {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18}, |
| 819 | 0xe6071c6f}, | 901 | {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7}, |
| 820 | {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, | 902 | {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3}, |
| 821 | 0x179ec30a}, | 903 | {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5}, |
| 822 | {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, | 904 | {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59}, |
| 823 | 0x0903beb8}, | 905 | {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e}, |
| 824 | {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, | 906 | {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603}, |
| 825 | 0x6a7cb4fa}, | 907 | {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060}, |
| 826 | {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, | 908 | {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072}, |
| 827 | 0xdb535801}, | 909 | {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59}, |
| 828 | {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, | 910 | {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213}, |
| 829 | 0x92bed597}, | 911 | {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41}, |
| 830 | {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, | 912 | {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5}, |
| 831 | 0x192a3f1b}, | 913 | {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2}, |
| 832 | {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, | 914 | {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a}, |
| 833 | 0xccbaec1a}, | 915 | {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2}, |
| 834 | {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, | 916 | {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b}, |
| 835 | 0x7eabae4d}, | 917 | {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1}, |
| 836 | {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, | 918 | {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba}, |
| 837 | 0x28c72982}, | 919 | {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62}, |
| 838 | {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, | 920 | {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe}, |
| 839 | 0xc3cd4d18}, | 921 | {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988}, |
| 840 | {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, | 922 | {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be}, |
| 841 | 0xbca8f0e7}, | 923 | {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546}, |
| 842 | {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, | 924 | {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc}, |
| 843 | 0x713f60b3}, | 925 | {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69}, |
| 844 | {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, | 926 | {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a}, |
| 845 | 0xebd08fd5}, | 927 | {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2}, |
| 846 | {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, | 928 | {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd}, |
| 847 | 0x64406c59}, | 929 | {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb}, |
| 848 | {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, | 930 | {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b}, |
| 849 | 0x7421890e}, | 931 | {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76}, |
| 850 | {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, | 932 | {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339}, |
| 851 | 0xe9347603}, | 933 | {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9}, |
| 852 | {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, | 934 | {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548}, |
| 853 | 0x1bef9060}, | 935 | {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de}, |
| 854 | {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, | 936 | {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59}, |
| 855 | 0x34720072}, | 937 | {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b}, |
| 856 | {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, | 938 | {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73}, |
| 857 | 0x48310f59}, | 939 | {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11}, |
| 858 | {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, | 940 | {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c}, |
| 859 | 0x783a4213}, | 941 | {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b}, |
| 860 | {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, | 942 | {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb}, |
| 861 | 0x9e8efd41}, | 943 | {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc}, |
| 862 | {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, | 944 | {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196}, |
| 863 | 0xfc3d34a5}, | 945 | {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a}, |
| 864 | {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, | 946 | {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de}, |
| 865 | 0x17a52ae2}, | 947 | {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9}, |
| 866 | {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, | 948 | {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0}, |
| 867 | 0x886d935a}, | 949 | {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60}, |
| 868 | {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, | 950 | {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6}, |
| 869 | 0xeaaeaeb2}, | 951 | {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c}, |
| 870 | {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, | 952 | {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73}, |
| 871 | 0x8e900a4b}, | 953 | {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7}, |
| 872 | {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, | 954 | {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf}, |
| 873 | 0xd74662b1}, | 955 | {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83}, |
| 874 | {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, | 956 | {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867}, |
| 875 | 0xd26752ba}, | 957 | {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211}, |
| 876 | {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, | 958 | {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2}, |
| 877 | 0x8b1fcd62}, | 959 | {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874}, |
| 878 | {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, | 960 | {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f}, |
| 879 | 0xf54342fe}, | 961 | {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff}, |
| 880 | {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, | 962 | {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95}, |
| 881 | 0x5b95b988}, | 963 | {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd}, |
| 882 | {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, | 964 | {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06}, |
| 883 | 0x2e1176be}, | 965 | {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784}, |
| 884 | {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, | 966 | {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616}, |
| 885 | 0x66120546}, | 967 | {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c}, |
| 886 | {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, | 968 | {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c}, |
| 887 | 0xf256a5cc}, | 969 | {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d}, |
| 888 | {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, | 970 | {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d}, |
| 889 | 0x4af1dd69}, | 971 | {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272}, |
| 890 | {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, | 972 | {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb}, |
| 891 | 0x56f0a04a}, | 973 | {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b}, |
| 892 | {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, | 974 | {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e}, |
| 893 | 0x74f6b6b2}, | 975 | {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23}, |
| 894 | {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, | 976 | {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672}, |
| 895 | 0x085951fd}, | 977 | {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86}, |
| 896 | {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, | 978 | {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd}, |
| 897 | 0xc65387eb}, | 979 | {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48}, |
| 898 | {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, | ||
| 899 | 0x1ca9257b}, | ||
| 900 | {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, | ||
| 901 | 0xfd196d76}, | ||
| 902 | {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, | ||
| 903 | 0x5ef88339}, | ||
| 904 | {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, | ||
| 905 | 0x2c3714d9}, | ||
| 906 | {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, | ||
| 907 | 0x58576548}, | ||
| 908 | {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, | ||
| 909 | 0xfd7c57de}, | ||
| 910 | {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, | ||
| 911 | 0xd5fedd59}, | ||
| 912 | {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, | ||
| 913 | 0x1cc3b17b}, | ||
| 914 | {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, | ||
| 915 | 0x270eed73}, | ||
| 916 | {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, | ||
| 917 | 0x91ecbb11}, | ||
| 918 | {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, | ||
| 919 | 0x05ed8d0c}, | ||
| 920 | {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, | ||
| 921 | 0x0b09ad5b}, | ||
| 922 | {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, | ||
| 923 | 0xf8d511fb}, | ||
| 924 | {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, | ||
| 925 | 0x5ad832cc}, | ||
| 926 | {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, | ||
| 927 | 0x1214d196}, | ||
| 928 | {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, | ||
| 929 | 0x5747218a}, | ||
| 930 | {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, | ||
| 931 | 0xde8f14de}, | ||
| 932 | {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, | ||
| 933 | 0x3563b7b9}, | ||
| 934 | {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, | ||
| 935 | 0x071475d0}, | ||
| 936 | {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, | ||
| 937 | 0x54c79d60}, | ||
| 938 | {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, | ||
| 939 | 0x4c53eee6}, | ||
| 940 | {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, | ||
| 941 | 0x10137a3c}, | ||
| 942 | {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, | ||
| 943 | 0xaa9d6c73}, | ||
| 944 | {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, | ||
| 945 | 0xb63d23e7}, | ||
| 946 | {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, | ||
| 947 | 0x7f53e9cf}, | ||
| 948 | {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, | ||
| 949 | 0x13c1cd83}, | ||
| 950 | {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, | ||
| 951 | 0x49ff5867}, | ||
| 952 | {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, | ||
| 953 | 0x8467f211}, | ||
| 954 | {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, | ||
| 955 | 0x3f9683b2}, | ||
| 956 | {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, | ||
| 957 | 0x76a3f874}, | ||
| 958 | {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, | ||
| 959 | 0x863b702f}, | ||
| 960 | {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, | ||
| 961 | 0xdc6c58ff}, | ||
| 962 | {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, | ||
| 963 | 0x0622cc95}, | ||
| 964 | {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, | ||
| 965 | 0xe85605cd}, | ||
| 966 | {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, | ||
| 967 | 0x31da5f06}, | ||
| 968 | {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, | ||
| 969 | 0xa1f2e784}, | ||
| 970 | {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, | ||
| 971 | 0xb07cc616}, | ||
| 972 | {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, | ||
| 973 | 0xbf943b6c}, | ||
| 974 | {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, | ||
| 975 | 0x2c01af1c}, | ||
| 976 | {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, | ||
| 977 | 0x0fe5f56d}, | ||
| 978 | {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, | ||
| 979 | 0xf8943b2d}, | ||
| 980 | {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, | ||
| 981 | 0xe4d89272}, | ||
| 982 | {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, | ||
| 983 | 0x7c2f6bbb}, | ||
| 984 | {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, | ||
| 985 | 0xabbf388b}, | ||
| 986 | {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, | ||
| 987 | 0x1dca1f4e}, | ||
| 988 | {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, | ||
| 989 | 0x5c170e23}, | ||
| 990 | {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, | ||
| 991 | 0xc0e9d672}, | ||
| 992 | {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, | ||
| 993 | 0xc18bdc86}, | ||
| 994 | {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, | ||
| 995 | 0xa874fcdd}, | ||
| 996 | {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, | ||
| 997 | 0x9dc0bb48}, | ||
| 998 | }; | 980 | }; |
| 999 | 981 | ||
| 1000 | #include <linux/time.h> | 982 | #include <linux/time.h> |
| @@ -1050,6 +1032,41 @@ static int __init crc32c_test(void) | |||
| 1050 | return 0; | 1032 | return 0; |
| 1051 | } | 1033 | } |
| 1052 | 1034 | ||
| 1035 | static int __init crc32c_combine_test(void) | ||
| 1036 | { | ||
| 1037 | int i, j; | ||
| 1038 | int errors = 0, runs = 0; | ||
| 1039 | |||
| 1040 | for (i = 0; i < 10; i++) { | ||
| 1041 | u32 crc_full; | ||
| 1042 | |||
| 1043 | crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start, | ||
| 1044 | test[i].length); | ||
| 1045 | for (j = 0; j <= test[i].length; ++j) { | ||
| 1046 | u32 crc1, crc2; | ||
| 1047 | u32 len1 = j, len2 = test[i].length - j; | ||
| 1048 | |||
| 1049 | crc1 = __crc32c_le(test[i].crc, test_buf + | ||
| 1050 | test[i].start, len1); | ||
| 1051 | crc2 = __crc32c_le(0, test_buf + test[i].start + | ||
| 1052 | len1, len2); | ||
| 1053 | |||
| 1054 | if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) && | ||
| 1055 | crc_full == test[i].crc32c_le)) | ||
| 1056 | errors++; | ||
| 1057 | runs++; | ||
| 1058 | cond_resched(); | ||
| 1059 | } | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | if (errors) | ||
| 1063 | pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs); | ||
| 1064 | else | ||
| 1065 | pr_info("crc32c_combine: %d self tests passed\n", runs); | ||
| 1066 | |||
| 1067 | return 0; | ||
| 1068 | } | ||
| 1069 | |||
| 1053 | static int __init crc32_test(void) | 1070 | static int __init crc32_test(void) |
| 1054 | { | 1071 | { |
| 1055 | int i; | 1072 | int i; |
| @@ -1109,10 +1126,49 @@ static int __init crc32_test(void) | |||
| 1109 | return 0; | 1126 | return 0; |
| 1110 | } | 1127 | } |
| 1111 | 1128 | ||
| 1129 | static int __init crc32_combine_test(void) | ||
| 1130 | { | ||
| 1131 | int i, j; | ||
| 1132 | int errors = 0, runs = 0; | ||
| 1133 | |||
| 1134 | for (i = 0; i < 10; i++) { | ||
| 1135 | u32 crc_full; | ||
| 1136 | |||
| 1137 | crc_full = crc32_le(test[i].crc, test_buf + test[i].start, | ||
| 1138 | test[i].length); | ||
| 1139 | for (j = 0; j <= test[i].length; ++j) { | ||
| 1140 | u32 crc1, crc2; | ||
| 1141 | u32 len1 = j, len2 = test[i].length - j; | ||
| 1142 | |||
| 1143 | crc1 = crc32_le(test[i].crc, test_buf + | ||
| 1144 | test[i].start, len1); | ||
| 1145 | crc2 = crc32_le(0, test_buf + test[i].start + | ||
| 1146 | len1, len2); | ||
| 1147 | |||
| 1148 | if (!(crc_full == crc32_le_combine(crc1, crc2, len2) && | ||
| 1149 | crc_full == test[i].crc_le)) | ||
| 1150 | errors++; | ||
| 1151 | runs++; | ||
| 1152 | cond_resched(); | ||
| 1153 | } | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | if (errors) | ||
| 1157 | pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs); | ||
| 1158 | else | ||
| 1159 | pr_info("crc32_combine: %d self tests passed\n", runs); | ||
| 1160 | |||
| 1161 | return 0; | ||
| 1162 | } | ||
| 1163 | |||
| 1112 | static int __init crc32test_init(void) | 1164 | static int __init crc32test_init(void) |
| 1113 | { | 1165 | { |
| 1114 | crc32_test(); | 1166 | crc32_test(); |
| 1115 | crc32c_test(); | 1167 | crc32c_test(); |
| 1168 | |||
| 1169 | crc32_combine_test(); | ||
| 1170 | crc32c_combine_test(); | ||
| 1171 | |||
| 1116 | return 0; | 1172 | return 0; |
| 1117 | } | 1173 | } |
| 1118 | 1174 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index bf2c8b1043d8..e0731c3db706 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -196,7 +196,7 @@ static void free_object(struct debug_obj *obj) | |||
| 196 | * initialized: | 196 | * initialized: |
| 197 | */ | 197 | */ |
| 198 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | 198 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
| 199 | sched = keventd_up() && !work_pending(&debug_obj_work); | 199 | sched = keventd_up(); |
| 200 | hlist_add_head(&obj->node, &obj_pool); | 200 | hlist_add_head(&obj->node, &obj_pool); |
| 201 | obj_pool_free++; | 201 | obj_pool_free++; |
| 202 | obj_pool_used--; | 202 | obj_pool_used--; |
diff --git a/lib/digsig.c b/lib/digsig.c index 2f31e6a45f0a..8793aeda30ca 100644 --- a/lib/digsig.c +++ b/lib/digsig.c | |||
| @@ -209,7 +209,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen, | |||
| 209 | kref = keyring_search(make_key_ref(keyring, 1UL), | 209 | kref = keyring_search(make_key_ref(keyring, 1UL), |
| 210 | &key_type_user, name); | 210 | &key_type_user, name); |
| 211 | if (IS_ERR(kref)) | 211 | if (IS_ERR(kref)) |
| 212 | key = ERR_PTR(PTR_ERR(kref)); | 212 | key = ERR_CAST(kref); |
| 213 | else | 213 | else |
| 214 | key = key_ref_to_ptr(kref); | 214 | key = key_ref_to_ptr(kref); |
| 215 | } else { | 215 | } else { |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 26cf20be72b7..dda31168844f 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -313,6 +313,34 @@ retry: | |||
| 313 | EXPORT_SYMBOL(gen_pool_alloc); | 313 | EXPORT_SYMBOL(gen_pool_alloc); |
| 314 | 314 | ||
| 315 | /** | 315 | /** |
| 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | ||
| 317 | * @pool: pool to allocate from | ||
| 318 | * @size: number of bytes to allocate from the pool | ||
| 319 | * @dma: dma-view physical address | ||
| 320 | * | ||
| 321 | * Allocate the requested number of bytes from the specified pool. | ||
| 322 | * Uses the pool allocation function (with first-fit algorithm by default). | ||
| 323 | * Can not be used in NMI handler on architectures without | ||
| 324 | * NMI-safe cmpxchg implementation. | ||
| 325 | */ | ||
| 326 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) | ||
| 327 | { | ||
| 328 | unsigned long vaddr; | ||
| 329 | |||
| 330 | if (!pool) | ||
| 331 | return NULL; | ||
| 332 | |||
| 333 | vaddr = gen_pool_alloc(pool, size); | ||
| 334 | if (!vaddr) | ||
| 335 | return NULL; | ||
| 336 | |||
| 337 | *dma = gen_pool_virt_to_phys(pool, vaddr); | ||
| 338 | |||
| 339 | return (void *)vaddr; | ||
| 340 | } | ||
| 341 | EXPORT_SYMBOL(gen_pool_dma_alloc); | ||
| 342 | |||
| 343 | /** | ||
| 316 | * gen_pool_free - free allocated special memory back to the pool | 344 | * gen_pool_free - free allocated special memory back to the pool |
| 317 | * @pool: pool to free to | 345 | * @pool: pool to free to |
| 318 | * @addr: starting address of memory to free back to pool | 346 | * @addr: starting address of memory to free back to pool |
diff --git a/lib/kfifo.c b/lib/kfifo.c index 7b7f83027b7b..d79b9d222065 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c | |||
| @@ -215,7 +215,7 @@ static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, | |||
| 215 | * incrementing the fifo->in index counter | 215 | * incrementing the fifo->in index counter |
| 216 | */ | 216 | */ |
| 217 | smp_wmb(); | 217 | smp_wmb(); |
| 218 | *copied = len - ret; | 218 | *copied = len - ret * esize; |
| 219 | /* return the number of elements which are not copied */ | 219 | /* return the number of elements which are not copied */ |
| 220 | return ret; | 220 | return ret; |
| 221 | } | 221 | } |
| @@ -275,7 +275,7 @@ static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, | |||
| 275 | * incrementing the fifo->out index counter | 275 | * incrementing the fifo->out index counter |
| 276 | */ | 276 | */ |
| 277 | smp_wmb(); | 277 | smp_wmb(); |
| 278 | *copied = len - ret; | 278 | *copied = len - ret * esize; |
| 279 | /* return the number of elements which are not copied */ | 279 | /* return the number of elements which are not copied */ |
| 280 | return ret; | 280 | return ret; |
| 281 | } | 281 | } |
diff --git a/lib/kobject.c b/lib/kobject.c index 084f7b18d0c0..5b4b8886435e 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -13,11 +13,30 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/kobject.h> | 15 | #include <linux/kobject.h> |
| 16 | #include <linux/kobj_completion.h> | ||
| 16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
| 17 | #include <linux/export.h> | 18 | #include <linux/export.h> |
| 18 | #include <linux/stat.h> | 19 | #include <linux/stat.h> |
| 19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 20 | 21 | ||
| 22 | /** | ||
| 23 | * kobject_namespace - return @kobj's namespace tag | ||
| 24 | * @kobj: kobject in question | ||
| 25 | * | ||
| 26 | * Returns namespace tag of @kobj if its parent has namespace ops enabled | ||
| 27 | * and thus @kobj should have a namespace tag associated with it. Returns | ||
| 28 | * %NULL otherwise. | ||
| 29 | */ | ||
| 30 | const void *kobject_namespace(struct kobject *kobj) | ||
| 31 | { | ||
| 32 | const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj); | ||
| 33 | |||
| 34 | if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE) | ||
| 35 | return NULL; | ||
| 36 | |||
| 37 | return kobj->ktype->namespace(kobj); | ||
| 38 | } | ||
| 39 | |||
| 21 | /* | 40 | /* |
| 22 | * populate_dir - populate directory with attributes. | 41 | * populate_dir - populate directory with attributes. |
| 23 | * @kobj: object we're working on. | 42 | * @kobj: object we're working on. |
| @@ -46,13 +65,21 @@ static int populate_dir(struct kobject *kobj) | |||
| 46 | 65 | ||
| 47 | static int create_dir(struct kobject *kobj) | 66 | static int create_dir(struct kobject *kobj) |
| 48 | { | 67 | { |
| 49 | int error = 0; | 68 | int error; |
| 50 | error = sysfs_create_dir(kobj); | 69 | |
| 70 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); | ||
| 51 | if (!error) { | 71 | if (!error) { |
| 52 | error = populate_dir(kobj); | 72 | error = populate_dir(kobj); |
| 53 | if (error) | 73 | if (error) |
| 54 | sysfs_remove_dir(kobj); | 74 | sysfs_remove_dir(kobj); |
| 55 | } | 75 | } |
| 76 | |||
| 77 | /* | ||
| 78 | * @kobj->sd may be deleted by an ancestor going away. Hold an | ||
| 79 | * extra reference so that it stays until @kobj is gone. | ||
| 80 | */ | ||
| 81 | sysfs_get(kobj->sd); | ||
| 82 | |||
| 56 | return error; | 83 | return error; |
| 57 | } | 84 | } |
| 58 | 85 | ||
| @@ -428,7 +455,7 @@ int kobject_rename(struct kobject *kobj, const char *new_name) | |||
| 428 | goto out; | 455 | goto out; |
| 429 | } | 456 | } |
| 430 | 457 | ||
| 431 | error = sysfs_rename_dir(kobj, new_name); | 458 | error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj)); |
| 432 | if (error) | 459 | if (error) |
| 433 | goto out; | 460 | goto out; |
| 434 | 461 | ||
| @@ -472,6 +499,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent) | |||
| 472 | if (kobj->kset) | 499 | if (kobj->kset) |
| 473 | new_parent = kobject_get(&kobj->kset->kobj); | 500 | new_parent = kobject_get(&kobj->kset->kobj); |
| 474 | } | 501 | } |
| 502 | |||
| 475 | /* old object path */ | 503 | /* old object path */ |
| 476 | devpath = kobject_get_path(kobj, GFP_KERNEL); | 504 | devpath = kobject_get_path(kobj, GFP_KERNEL); |
| 477 | if (!devpath) { | 505 | if (!devpath) { |
| @@ -486,7 +514,7 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent) | |||
| 486 | sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); | 514 | sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); |
| 487 | envp[0] = devpath_string; | 515 | envp[0] = devpath_string; |
| 488 | envp[1] = NULL; | 516 | envp[1] = NULL; |
| 489 | error = sysfs_move_dir(kobj, new_parent); | 517 | error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj)); |
| 490 | if (error) | 518 | if (error) |
| 491 | goto out; | 519 | goto out; |
| 492 | old_parent = kobj->parent; | 520 | old_parent = kobj->parent; |
| @@ -508,10 +536,15 @@ out: | |||
| 508 | */ | 536 | */ |
| 509 | void kobject_del(struct kobject *kobj) | 537 | void kobject_del(struct kobject *kobj) |
| 510 | { | 538 | { |
| 539 | struct sysfs_dirent *sd; | ||
| 540 | |||
| 511 | if (!kobj) | 541 | if (!kobj) |
| 512 | return; | 542 | return; |
| 513 | 543 | ||
| 544 | sd = kobj->sd; | ||
| 514 | sysfs_remove_dir(kobj); | 545 | sysfs_remove_dir(kobj); |
| 546 | sysfs_put(sd); | ||
| 547 | |||
| 515 | kobj->state_in_sysfs = 0; | 548 | kobj->state_in_sysfs = 0; |
| 516 | kobj_kset_leave(kobj); | 549 | kobj_kset_leave(kobj); |
| 517 | kobject_put(kobj->parent); | 550 | kobject_put(kobj->parent); |
| @@ -727,6 +760,55 @@ const struct sysfs_ops kobj_sysfs_ops = { | |||
| 727 | }; | 760 | }; |
| 728 | 761 | ||
| 729 | /** | 762 | /** |
| 763 | * kobj_completion_init - initialize a kobj_completion object. | ||
| 764 | * @kc: kobj_completion | ||
| 765 | * @ktype: type of kobject to initialize | ||
| 766 | * | ||
| 767 | * kobj_completion structures can be embedded within structures with different | ||
| 768 | * lifetime rules. During the release of the enclosing object, we can | ||
| 769 | * wait on the release of the kobject so that we don't free it while it's | ||
| 770 | * still busy. | ||
| 771 | */ | ||
| 772 | void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype) | ||
| 773 | { | ||
| 774 | init_completion(&kc->kc_unregister); | ||
| 775 | kobject_init(&kc->kc_kobj, ktype); | ||
| 776 | } | ||
| 777 | EXPORT_SYMBOL_GPL(kobj_completion_init); | ||
| 778 | |||
| 779 | /** | ||
| 780 | * kobj_completion_release - release a kobj_completion object | ||
| 781 | * @kobj: kobject embedded in kobj_completion | ||
| 782 | * | ||
| 783 | * Used with kobject_release to notify waiters that the kobject has been | ||
| 784 | * released. | ||
| 785 | */ | ||
| 786 | void kobj_completion_release(struct kobject *kobj) | ||
| 787 | { | ||
| 788 | struct kobj_completion *kc = kobj_to_kobj_completion(kobj); | ||
| 789 | complete(&kc->kc_unregister); | ||
| 790 | } | ||
| 791 | EXPORT_SYMBOL_GPL(kobj_completion_release); | ||
| 792 | |||
| 793 | /** | ||
| 794 | * kobj_completion_del_and_wait - release the kobject and wait for it | ||
| 795 | * @kc: kobj_completion object to release | ||
| 796 | * | ||
| 797 | * Delete the kobject from sysfs and drop the reference count. Then wait | ||
| 798 | * until any other outstanding references are also dropped. This routine | ||
| 799 | * is only necessary once other references may have been taken on the | ||
| 800 | * kobject. Typically this happens when the kobject has been published | ||
| 801 | * to sysfs via kobject_add. | ||
| 802 | */ | ||
| 803 | void kobj_completion_del_and_wait(struct kobj_completion *kc) | ||
| 804 | { | ||
| 805 | kobject_del(&kc->kc_kobj); | ||
| 806 | kobject_put(&kc->kc_kobj); | ||
| 807 | wait_for_completion(&kc->kc_unregister); | ||
| 808 | } | ||
| 809 | EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait); | ||
| 810 | |||
| 811 | /** | ||
| 730 | * kset_register - initialize and add a kset. | 812 | * kset_register - initialize and add a kset. |
| 731 | * @k: kset. | 813 | * @k: kset. |
| 732 | */ | 814 | */ |
diff --git a/lib/llist.c b/lib/llist.c index 4a70d120138c..f76196d07409 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
| @@ -81,3 +81,25 @@ struct llist_node *llist_del_first(struct llist_head *head) | |||
| 81 | return entry; | 81 | return entry; |
| 82 | } | 82 | } |
| 83 | EXPORT_SYMBOL_GPL(llist_del_first); | 83 | EXPORT_SYMBOL_GPL(llist_del_first); |
| 84 | |||
| 85 | /** | ||
| 86 | * llist_reverse_order - reverse order of a llist chain | ||
| 87 | * @head: first item of the list to be reversed | ||
| 88 | * | ||
| 89 | * Reverse the order of a chain of llist entries and return the | ||
| 90 | * new first entry. | ||
| 91 | */ | ||
| 92 | struct llist_node *llist_reverse_order(struct llist_node *head) | ||
| 93 | { | ||
| 94 | struct llist_node *new_head = NULL; | ||
| 95 | |||
| 96 | while (head) { | ||
| 97 | struct llist_node *tmp = head; | ||
| 98 | head = head->next; | ||
| 99 | tmp->next = new_head; | ||
| 100 | new_head = tmp; | ||
| 101 | } | ||
| 102 | |||
| 103 | return new_head; | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL_GPL(llist_reverse_order); | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6dc09d8f4c24..872a15a2a637 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
| @@ -1002,7 +1002,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) | |||
| 1002 | * Some tests (e.g. double-unlock) might corrupt the preemption | 1002 | * Some tests (e.g. double-unlock) might corrupt the preemption |
| 1003 | * count, so restore it: | 1003 | * count, so restore it: |
| 1004 | */ | 1004 | */ |
| 1005 | preempt_count() = saved_preempt_count; | 1005 | preempt_count_set(saved_preempt_count); |
| 1006 | #ifdef CONFIG_TRACE_IRQFLAGS | 1006 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 1007 | if (softirq_count()) | 1007 | if (softirq_count()) |
| 1008 | current->softirqs_enabled = 0; | 1008 | current->softirqs_enabled = 0; |
diff --git a/lib/lockref.c b/lib/lockref.c index 6f9d434c1521..d2b123f8456b 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
| 2 | #include <linux/lockref.h> | 2 | #include <linux/lockref.h> |
| 3 | 3 | ||
| 4 | #ifdef CONFIG_CMPXCHG_LOCKREF | 4 | #if USE_CMPXCHG_LOCKREF |
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * Allow weakly-ordered memory architectures to provide barrier-less | 7 | * Allow weakly-ordered memory architectures to provide barrier-less |
| @@ -153,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref) | |||
| 153 | assert_spin_locked(&lockref->lock); | 153 | assert_spin_locked(&lockref->lock); |
| 154 | lockref->count = -128; | 154 | lockref->count = -128; |
| 155 | } | 155 | } |
| 156 | EXPORT_SYMBOL(lockref_mark_dead); | ||
| 156 | 157 | ||
| 157 | /** | 158 | /** |
| 158 | * lockref_get_not_dead - Increments count unless the ref is dead | 159 | * lockref_get_not_dead - Increments count unless the ref is dead |
diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c deleted file mode 100644 index 652a8ee8efe9..000000000000 --- a/lib/percpu-rwsem.c +++ /dev/null | |||
| @@ -1,165 +0,0 @@ | |||
| 1 | #include <linux/atomic.h> | ||
| 2 | #include <linux/rwsem.h> | ||
| 3 | #include <linux/percpu.h> | ||
| 4 | #include <linux/wait.h> | ||
| 5 | #include <linux/lockdep.h> | ||
| 6 | #include <linux/percpu-rwsem.h> | ||
| 7 | #include <linux/rcupdate.h> | ||
| 8 | #include <linux/sched.h> | ||
| 9 | #include <linux/errno.h> | ||
| 10 | |||
| 11 | int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, | ||
| 12 | const char *name, struct lock_class_key *rwsem_key) | ||
| 13 | { | ||
| 14 | brw->fast_read_ctr = alloc_percpu(int); | ||
| 15 | if (unlikely(!brw->fast_read_ctr)) | ||
| 16 | return -ENOMEM; | ||
| 17 | |||
| 18 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ | ||
| 19 | __init_rwsem(&brw->rw_sem, name, rwsem_key); | ||
| 20 | atomic_set(&brw->write_ctr, 0); | ||
| 21 | atomic_set(&brw->slow_read_ctr, 0); | ||
| 22 | init_waitqueue_head(&brw->write_waitq); | ||
| 23 | return 0; | ||
| 24 | } | ||
| 25 | |||
| 26 | void percpu_free_rwsem(struct percpu_rw_semaphore *brw) | ||
| 27 | { | ||
| 28 | free_percpu(brw->fast_read_ctr); | ||
| 29 | brw->fast_read_ctr = NULL; /* catch use after free bugs */ | ||
| 30 | } | ||
| 31 | |||
| 32 | /* | ||
| 33 | * This is the fast-path for down_read/up_read, it only needs to ensure | ||
| 34 | * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the | ||
| 35 | * fast per-cpu counter. The writer uses synchronize_sched_expedited() to | ||
| 36 | * serialize with the preempt-disabled section below. | ||
| 37 | * | ||
| 38 | * The nontrivial part is that we should guarantee acquire/release semantics | ||
| 39 | * in case when | ||
| 40 | * | ||
| 41 | * R_W: down_write() comes after up_read(), the writer should see all | ||
| 42 | * changes done by the reader | ||
| 43 | * or | ||
| 44 | * W_R: down_read() comes after up_write(), the reader should see all | ||
| 45 | * changes done by the writer | ||
| 46 | * | ||
| 47 | * If this helper fails the callers rely on the normal rw_semaphore and | ||
| 48 | * atomic_dec_and_test(), so in this case we have the necessary barriers. | ||
| 49 | * | ||
| 50 | * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or | ||
| 51 | * __this_cpu_add() below can be reordered with any LOAD/STORE done by the | ||
| 52 | * reader inside the critical section. See the comments in down_write and | ||
| 53 | * up_write below. | ||
| 54 | */ | ||
| 55 | static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) | ||
| 56 | { | ||
| 57 | bool success = false; | ||
| 58 | |||
| 59 | preempt_disable(); | ||
| 60 | if (likely(!atomic_read(&brw->write_ctr))) { | ||
| 61 | __this_cpu_add(*brw->fast_read_ctr, val); | ||
| 62 | success = true; | ||
| 63 | } | ||
| 64 | preempt_enable(); | ||
| 65 | |||
| 66 | return success; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Like the normal down_read() this is not recursive, the writer can | ||
| 71 | * come after the first percpu_down_read() and create the deadlock. | ||
| 72 | * | ||
| 73 | * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, | ||
| 74 | * percpu_up_read() does rwsem_release(). This pairs with the usage | ||
| 75 | * of ->rw_sem in percpu_down/up_write(). | ||
| 76 | */ | ||
| 77 | void percpu_down_read(struct percpu_rw_semaphore *brw) | ||
| 78 | { | ||
| 79 | might_sleep(); | ||
| 80 | if (likely(update_fast_ctr(brw, +1))) { | ||
| 81 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); | ||
| 82 | return; | ||
| 83 | } | ||
| 84 | |||
| 85 | down_read(&brw->rw_sem); | ||
| 86 | atomic_inc(&brw->slow_read_ctr); | ||
| 87 | /* avoid up_read()->rwsem_release() */ | ||
| 88 | __up_read(&brw->rw_sem); | ||
| 89 | } | ||
| 90 | |||
| 91 | void percpu_up_read(struct percpu_rw_semaphore *brw) | ||
| 92 | { | ||
| 93 | rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); | ||
| 94 | |||
| 95 | if (likely(update_fast_ctr(brw, -1))) | ||
| 96 | return; | ||
| 97 | |||
| 98 | /* false-positive is possible but harmless */ | ||
| 99 | if (atomic_dec_and_test(&brw->slow_read_ctr)) | ||
| 100 | wake_up_all(&brw->write_waitq); | ||
| 101 | } | ||
| 102 | |||
| 103 | static int clear_fast_ctr(struct percpu_rw_semaphore *brw) | ||
| 104 | { | ||
| 105 | unsigned int sum = 0; | ||
| 106 | int cpu; | ||
| 107 | |||
| 108 | for_each_possible_cpu(cpu) { | ||
| 109 | sum += per_cpu(*brw->fast_read_ctr, cpu); | ||
| 110 | per_cpu(*brw->fast_read_ctr, cpu) = 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | return sum; | ||
| 114 | } | ||
| 115 | |||
| 116 | /* | ||
| 117 | * A writer increments ->write_ctr to force the readers to switch to the | ||
| 118 | * slow mode, note the atomic_read() check in update_fast_ctr(). | ||
| 119 | * | ||
| 120 | * After that the readers can only inc/dec the slow ->slow_read_ctr counter, | ||
| 121 | * ->fast_read_ctr is stable. Once the writer moves its sum into the slow | ||
| 122 | * counter it represents the number of active readers. | ||
| 123 | * | ||
| 124 | * Finally the writer takes ->rw_sem for writing and blocks the new readers, | ||
| 125 | * then waits until the slow counter becomes zero. | ||
| 126 | */ | ||
| 127 | void percpu_down_write(struct percpu_rw_semaphore *brw) | ||
| 128 | { | ||
| 129 | /* tell update_fast_ctr() there is a pending writer */ | ||
| 130 | atomic_inc(&brw->write_ctr); | ||
| 131 | /* | ||
| 132 | * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read | ||
| 133 | * so that update_fast_ctr() can't succeed. | ||
| 134 | * | ||
| 135 | * 2. Ensures we see the result of every previous this_cpu_add() in | ||
| 136 | * update_fast_ctr(). | ||
| 137 | * | ||
| 138 | * 3. Ensures that if any reader has exited its critical section via | ||
| 139 | * fast-path, it executes a full memory barrier before we return. | ||
| 140 | * See R_W case in the comment above update_fast_ctr(). | ||
| 141 | */ | ||
| 142 | synchronize_sched_expedited(); | ||
| 143 | |||
| 144 | /* exclude other writers, and block the new readers completely */ | ||
| 145 | down_write(&brw->rw_sem); | ||
| 146 | |||
| 147 | /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ | ||
| 148 | atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); | ||
| 149 | |||
| 150 | /* wait for all readers to complete their percpu_up_read() */ | ||
| 151 | wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); | ||
| 152 | } | ||
| 153 | |||
| 154 | void percpu_up_write(struct percpu_rw_semaphore *brw) | ||
| 155 | { | ||
| 156 | /* release the lock, but the readers can't use the fast-path */ | ||
| 157 | up_write(&brw->rw_sem); | ||
| 158 | /* | ||
| 159 | * Insert the barrier before the next fast-path in down_read, | ||
| 160 | * see W_R case in the comment above update_fast_ctr(). | ||
| 161 | */ | ||
| 162 | synchronize_sched_expedited(); | ||
| 163 | /* the last writer unblocks update_fast_ctr() */ | ||
| 164 | atomic_dec(&brw->write_ctr); | ||
| 165 | } | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 93c5d5ecff4e..7473ee3b4ee7 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) | |||
| 60 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | 60 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
| 61 | { | 61 | { |
| 62 | int cpu; | 62 | int cpu; |
| 63 | unsigned long flags; | ||
| 63 | 64 | ||
| 64 | raw_spin_lock(&fbc->lock); | 65 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 65 | for_each_possible_cpu(cpu) { | 66 | for_each_possible_cpu(cpu) { |
| 66 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 67 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 67 | *pcount = 0; | 68 | *pcount = 0; |
| 68 | } | 69 | } |
| 69 | fbc->count = amount; | 70 | fbc->count = amount; |
| 70 | raw_spin_unlock(&fbc->lock); | 71 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 71 | } | 72 | } |
| 72 | EXPORT_SYMBOL(percpu_counter_set); | 73 | EXPORT_SYMBOL(percpu_counter_set); |
| 73 | 74 | ||
| @@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
| 78 | preempt_disable(); | 79 | preempt_disable(); |
| 79 | count = __this_cpu_read(*fbc->counters) + amount; | 80 | count = __this_cpu_read(*fbc->counters) + amount; |
| 80 | if (count >= batch || count <= -batch) { | 81 | if (count >= batch || count <= -batch) { |
| 81 | raw_spin_lock(&fbc->lock); | 82 | unsigned long flags; |
| 83 | raw_spin_lock_irqsave(&fbc->lock, flags); | ||
| 82 | fbc->count += count; | 84 | fbc->count += count; |
| 83 | raw_spin_unlock(&fbc->lock); | 85 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 84 | __this_cpu_write(*fbc->counters, 0); | 86 | __this_cpu_write(*fbc->counters, 0); |
| 85 | } else { | 87 | } else { |
| 86 | __this_cpu_write(*fbc->counters, count); | 88 | __this_cpu_write(*fbc->counters, count); |
| @@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
| 97 | { | 99 | { |
| 98 | s64 ret; | 100 | s64 ret; |
| 99 | int cpu; | 101 | int cpu; |
| 102 | unsigned long flags; | ||
| 100 | 103 | ||
| 101 | raw_spin_lock(&fbc->lock); | 104 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 102 | ret = fbc->count; | 105 | ret = fbc->count; |
| 103 | for_each_online_cpu(cpu) { | 106 | for_each_online_cpu(cpu) { |
| 104 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 107 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 105 | ret += *pcount; | 108 | ret += *pcount; |
| 106 | } | 109 | } |
| 107 | raw_spin_unlock(&fbc->lock); | 110 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 108 | return ret; | 111 | return ret; |
| 109 | } | 112 | } |
| 110 | EXPORT_SYMBOL(__percpu_counter_sum); | 113 | EXPORT_SYMBOL(__percpu_counter_sum); |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index bab1ba2a4c71..b0698ea972c6 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
| @@ -30,15 +30,6 @@ | |||
| 30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
| 31 | #include <linux/percpu_ida.h> | 31 | #include <linux/percpu_ida.h> |
| 32 | 32 | ||
| 33 | /* | ||
| 34 | * Number of tags we move between the percpu freelist and the global freelist at | ||
| 35 | * a time | ||
| 36 | */ | ||
| 37 | #define IDA_PCPU_BATCH_MOVE 32U | ||
| 38 | |||
| 39 | /* Max size of percpu freelist, */ | ||
| 40 | #define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2) | ||
| 41 | |||
| 42 | struct percpu_ida_cpu { | 33 | struct percpu_ida_cpu { |
| 43 | /* | 34 | /* |
| 44 | * Even though this is percpu, we need a lock for tag stealing by remote | 35 | * Even though this is percpu, we need a lock for tag stealing by remote |
| @@ -78,7 +69,7 @@ static inline void steal_tags(struct percpu_ida *pool, | |||
| 78 | struct percpu_ida_cpu *remote; | 69 | struct percpu_ida_cpu *remote; |
| 79 | 70 | ||
| 80 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | 71 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); |
| 81 | cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; | 72 | cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; |
| 82 | cpus_have_tags--) { | 73 | cpus_have_tags--) { |
| 83 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | 74 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); |
| 84 | 75 | ||
| @@ -123,7 +114,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool, | |||
| 123 | { | 114 | { |
| 124 | move_tags(tags->freelist, &tags->nr_free, | 115 | move_tags(tags->freelist, &tags->nr_free, |
| 125 | pool->freelist, &pool->nr_free, | 116 | pool->freelist, &pool->nr_free, |
| 126 | min(pool->nr_free, IDA_PCPU_BATCH_MOVE)); | 117 | min(pool->nr_free, pool->percpu_batch_size)); |
| 127 | } | 118 | } |
| 128 | 119 | ||
| 129 | static inline unsigned alloc_local_tag(struct percpu_ida *pool, | 120 | static inline unsigned alloc_local_tag(struct percpu_ida *pool, |
| @@ -245,17 +236,17 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | |||
| 245 | wake_up(&pool->wait); | 236 | wake_up(&pool->wait); |
| 246 | } | 237 | } |
| 247 | 238 | ||
| 248 | if (nr_free == IDA_PCPU_SIZE) { | 239 | if (nr_free == pool->percpu_max_size) { |
| 249 | spin_lock(&pool->lock); | 240 | spin_lock(&pool->lock); |
| 250 | 241 | ||
| 251 | /* | 242 | /* |
| 252 | * Global lock held and irqs disabled, don't need percpu | 243 | * Global lock held and irqs disabled, don't need percpu |
| 253 | * lock | 244 | * lock |
| 254 | */ | 245 | */ |
| 255 | if (tags->nr_free == IDA_PCPU_SIZE) { | 246 | if (tags->nr_free == pool->percpu_max_size) { |
| 256 | move_tags(pool->freelist, &pool->nr_free, | 247 | move_tags(pool->freelist, &pool->nr_free, |
| 257 | tags->freelist, &tags->nr_free, | 248 | tags->freelist, &tags->nr_free, |
| 258 | IDA_PCPU_BATCH_MOVE); | 249 | pool->percpu_batch_size); |
| 259 | 250 | ||
| 260 | wake_up(&pool->wait); | 251 | wake_up(&pool->wait); |
| 261 | } | 252 | } |
| @@ -292,7 +283,8 @@ EXPORT_SYMBOL_GPL(percpu_ida_destroy); | |||
| 292 | * Allocation is percpu, but sharding is limited by nr_tags - for best | 283 | * Allocation is percpu, but sharding is limited by nr_tags - for best |
| 293 | * performance, the workload should not span more cpus than nr_tags / 128. | 284 | * performance, the workload should not span more cpus than nr_tags / 128. |
| 294 | */ | 285 | */ |
| 295 | int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) | 286 | int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, |
| 287 | unsigned long max_size, unsigned long batch_size) | ||
| 296 | { | 288 | { |
| 297 | unsigned i, cpu, order; | 289 | unsigned i, cpu, order; |
| 298 | 290 | ||
| @@ -301,6 +293,8 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) | |||
| 301 | init_waitqueue_head(&pool->wait); | 293 | init_waitqueue_head(&pool->wait); |
| 302 | spin_lock_init(&pool->lock); | 294 | spin_lock_init(&pool->lock); |
| 303 | pool->nr_tags = nr_tags; | 295 | pool->nr_tags = nr_tags; |
| 296 | pool->percpu_max_size = max_size; | ||
| 297 | pool->percpu_batch_size = batch_size; | ||
| 304 | 298 | ||
| 305 | /* Guard against overflow */ | 299 | /* Guard against overflow */ |
| 306 | if (nr_tags > (unsigned) INT_MAX + 1) { | 300 | if (nr_tags > (unsigned) INT_MAX + 1) { |
| @@ -319,7 +313,7 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) | |||
| 319 | pool->nr_free = nr_tags; | 313 | pool->nr_free = nr_tags; |
| 320 | 314 | ||
| 321 | pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + | 315 | pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + |
| 322 | IDA_PCPU_SIZE * sizeof(unsigned), | 316 | pool->percpu_max_size * sizeof(unsigned), |
| 323 | sizeof(unsigned)); | 317 | sizeof(unsigned)); |
| 324 | if (!pool->tag_cpu) | 318 | if (!pool->tag_cpu) |
| 325 | goto err; | 319 | goto err; |
| @@ -332,4 +326,65 @@ err: | |||
| 332 | percpu_ida_destroy(pool); | 326 | percpu_ida_destroy(pool); |
| 333 | return -ENOMEM; | 327 | return -ENOMEM; |
| 334 | } | 328 | } |
| 335 | EXPORT_SYMBOL_GPL(percpu_ida_init); | 329 | EXPORT_SYMBOL_GPL(__percpu_ida_init); |
| 330 | |||
| 331 | /** | ||
| 332 | * percpu_ida_for_each_free - iterate free ids of a pool | ||
| 333 | * @pool: pool to iterate | ||
| 334 | * @fn: interate callback function | ||
| 335 | * @data: parameter for @fn | ||
| 336 | * | ||
| 337 | * Note, this doesn't guarantee to iterate all free ids restrictly. Some free | ||
| 338 | * ids might be missed, some might be iterated duplicated, and some might | ||
| 339 | * be iterated and not free soon. | ||
| 340 | */ | ||
| 341 | int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, | ||
| 342 | void *data) | ||
| 343 | { | ||
| 344 | unsigned long flags; | ||
| 345 | struct percpu_ida_cpu *remote; | ||
| 346 | unsigned cpu, i, err = 0; | ||
| 347 | |||
| 348 | local_irq_save(flags); | ||
| 349 | for_each_possible_cpu(cpu) { | ||
| 350 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | ||
| 351 | spin_lock(&remote->lock); | ||
| 352 | for (i = 0; i < remote->nr_free; i++) { | ||
| 353 | err = fn(remote->freelist[i], data); | ||
| 354 | if (err) | ||
| 355 | break; | ||
| 356 | } | ||
| 357 | spin_unlock(&remote->lock); | ||
| 358 | if (err) | ||
| 359 | goto out; | ||
| 360 | } | ||
| 361 | |||
| 362 | spin_lock(&pool->lock); | ||
| 363 | for (i = 0; i < pool->nr_free; i++) { | ||
| 364 | err = fn(pool->freelist[i], data); | ||
| 365 | if (err) | ||
| 366 | break; | ||
| 367 | } | ||
| 368 | spin_unlock(&pool->lock); | ||
| 369 | out: | ||
| 370 | local_irq_restore(flags); | ||
| 371 | return err; | ||
| 372 | } | ||
| 373 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); | ||
| 374 | |||
| 375 | /** | ||
| 376 | * percpu_ida_free_tags - return free tags number of a specific cpu or global pool | ||
| 377 | * @pool: pool related | ||
| 378 | * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids | ||
| 379 | * | ||
| 380 | * Note: this just returns a snapshot of free tags number. | ||
| 381 | */ | ||
| 382 | unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu) | ||
| 383 | { | ||
| 384 | struct percpu_ida_cpu *remote; | ||
| 385 | if (cpu == nr_cpu_ids) | ||
| 386 | return pool->nr_free; | ||
| 387 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | ||
| 388 | return remote->nr_free; | ||
| 389 | } | ||
| 390 | EXPORT_SYMBOL_GPL(percpu_ida_free_tags); | ||
diff --git a/lib/percpu_test.c b/lib/percpu_test.c new file mode 100644 index 000000000000..0b5d14dadd1a --- /dev/null +++ b/lib/percpu_test.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | |||
| 3 | /* validate @native and @pcp counter values match @expected */ | ||
| 4 | #define CHECK(native, pcp, expected) \ | ||
| 5 | do { \ | ||
| 6 | WARN((native) != (expected), \ | ||
| 7 | "raw %ld (0x%lx) != expected %lld (0x%llx)", \ | ||
| 8 | (native), (native), \ | ||
| 9 | (long long)(expected), (long long)(expected)); \ | ||
| 10 | WARN(__this_cpu_read(pcp) != (expected), \ | ||
| 11 | "pcp %ld (0x%lx) != expected %lld (0x%llx)", \ | ||
| 12 | __this_cpu_read(pcp), __this_cpu_read(pcp), \ | ||
| 13 | (long long)(expected), (long long)(expected)); \ | ||
| 14 | } while (0) | ||
| 15 | |||
| 16 | static DEFINE_PER_CPU(long, long_counter); | ||
| 17 | static DEFINE_PER_CPU(unsigned long, ulong_counter); | ||
| 18 | |||
| 19 | static int __init percpu_test_init(void) | ||
| 20 | { | ||
| 21 | /* | ||
| 22 | * volatile prevents compiler from optimizing it uses, otherwise the | ||
| 23 | * +ul_one/-ul_one below would replace with inc/dec instructions. | ||
| 24 | */ | ||
| 25 | volatile unsigned int ui_one = 1; | ||
| 26 | long l = 0; | ||
| 27 | unsigned long ul = 0; | ||
| 28 | |||
| 29 | pr_info("percpu test start\n"); | ||
| 30 | |||
| 31 | preempt_disable(); | ||
| 32 | |||
| 33 | l += -1; | ||
| 34 | __this_cpu_add(long_counter, -1); | ||
| 35 | CHECK(l, long_counter, -1); | ||
| 36 | |||
| 37 | l += 1; | ||
| 38 | __this_cpu_add(long_counter, 1); | ||
| 39 | CHECK(l, long_counter, 0); | ||
| 40 | |||
| 41 | ul = 0; | ||
| 42 | __this_cpu_write(ulong_counter, 0); | ||
| 43 | |||
| 44 | ul += 1UL; | ||
| 45 | __this_cpu_add(ulong_counter, 1UL); | ||
| 46 | CHECK(ul, ulong_counter, 1); | ||
| 47 | |||
| 48 | ul += -1UL; | ||
| 49 | __this_cpu_add(ulong_counter, -1UL); | ||
| 50 | CHECK(ul, ulong_counter, 0); | ||
| 51 | |||
| 52 | ul += -(unsigned long)1; | ||
| 53 | __this_cpu_add(ulong_counter, -(unsigned long)1); | ||
| 54 | CHECK(ul, ulong_counter, -1); | ||
| 55 | |||
| 56 | ul = 0; | ||
| 57 | __this_cpu_write(ulong_counter, 0); | ||
| 58 | |||
| 59 | ul -= 1; | ||
| 60 | __this_cpu_dec(ulong_counter); | ||
| 61 | CHECK(ul, ulong_counter, -1); | ||
| 62 | CHECK(ul, ulong_counter, ULONG_MAX); | ||
| 63 | |||
| 64 | l += -ui_one; | ||
| 65 | __this_cpu_add(long_counter, -ui_one); | ||
| 66 | CHECK(l, long_counter, 0xffffffff); | ||
| 67 | |||
| 68 | l += ui_one; | ||
| 69 | __this_cpu_add(long_counter, ui_one); | ||
| 70 | CHECK(l, long_counter, (long)0x100000000LL); | ||
| 71 | |||
| 72 | |||
| 73 | l = 0; | ||
| 74 | __this_cpu_write(long_counter, 0); | ||
| 75 | |||
| 76 | l -= ui_one; | ||
| 77 | __this_cpu_sub(long_counter, ui_one); | ||
| 78 | CHECK(l, long_counter, -1); | ||
| 79 | |||
| 80 | l = 0; | ||
| 81 | __this_cpu_write(long_counter, 0); | ||
| 82 | |||
| 83 | l += ui_one; | ||
| 84 | __this_cpu_add(long_counter, ui_one); | ||
| 85 | CHECK(l, long_counter, 1); | ||
| 86 | |||
| 87 | l += -ui_one; | ||
| 88 | __this_cpu_add(long_counter, -ui_one); | ||
| 89 | CHECK(l, long_counter, (long)0x100000000LL); | ||
| 90 | |||
| 91 | l = 0; | ||
| 92 | __this_cpu_write(long_counter, 0); | ||
| 93 | |||
| 94 | l -= ui_one; | ||
| 95 | this_cpu_sub(long_counter, ui_one); | ||
| 96 | CHECK(l, long_counter, -1); | ||
| 97 | CHECK(l, long_counter, ULONG_MAX); | ||
| 98 | |||
| 99 | ul = 0; | ||
| 100 | __this_cpu_write(ulong_counter, 0); | ||
| 101 | |||
| 102 | ul += ui_one; | ||
| 103 | __this_cpu_add(ulong_counter, ui_one); | ||
| 104 | CHECK(ul, ulong_counter, 1); | ||
| 105 | |||
| 106 | ul = 0; | ||
| 107 | __this_cpu_write(ulong_counter, 0); | ||
| 108 | |||
| 109 | ul -= ui_one; | ||
| 110 | __this_cpu_sub(ulong_counter, ui_one); | ||
| 111 | CHECK(ul, ulong_counter, -1); | ||
| 112 | CHECK(ul, ulong_counter, ULONG_MAX); | ||
| 113 | |||
| 114 | ul = 3; | ||
| 115 | __this_cpu_write(ulong_counter, 3); | ||
| 116 | |||
| 117 | ul = this_cpu_sub_return(ulong_counter, ui_one); | ||
| 118 | CHECK(ul, ulong_counter, 2); | ||
| 119 | |||
| 120 | ul = __this_cpu_sub_return(ulong_counter, ui_one); | ||
| 121 | CHECK(ul, ulong_counter, 1); | ||
| 122 | |||
| 123 | preempt_enable(); | ||
| 124 | |||
| 125 | pr_info("percpu test done\n"); | ||
| 126 | return -EAGAIN; /* Fail will directly unload the module */ | ||
| 127 | } | ||
| 128 | |||
| 129 | static void __exit percpu_test_exit(void) | ||
| 130 | { | ||
| 131 | } | ||
| 132 | |||
| 133 | module_init(percpu_test_init) | ||
| 134 | module_exit(percpu_test_exit) | ||
| 135 | |||
| 136 | MODULE_LICENSE("GPL"); | ||
| 137 | MODULE_AUTHOR("Greg Thelen"); | ||
| 138 | MODULE_DESCRIPTION("percpu operations test"); | ||
diff --git a/lib/random32.c b/lib/random32.c index 52280d5526be..1e5b2df44291 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -2,19 +2,19 @@ | |||
| 2 | This is a maximally equidistributed combined Tausworthe generator | 2 | This is a maximally equidistributed combined Tausworthe generator |
| 3 | based on code from GNU Scientific Library 1.5 (30 Jun 2004) | 3 | based on code from GNU Scientific Library 1.5 (30 Jun 2004) |
| 4 | 4 | ||
| 5 | x_n = (s1_n ^ s2_n ^ s3_n) | 5 | lfsr113 version: |
| 6 | 6 | ||
| 7 | s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19)) | 7 | x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) |
| 8 | s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25)) | ||
| 9 | s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11)) | ||
| 10 | 8 | ||
| 11 | The period of this generator is about 2^88. | 9 | s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) |
| 10 | s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) | ||
| 11 | s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) | ||
| 12 | s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) | ||
| 12 | 13 | ||
| 13 | From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe | 14 | The period of this generator is about 2^113 (see erratum paper). |
| 14 | Generators", Mathematics of Computation, 65, 213 (1996), 203--213. | ||
| 15 | |||
| 16 | This is available on the net from L'Ecuyer's home page, | ||
| 17 | 15 | ||
| 16 | From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe | ||
| 17 | Generators", Mathematics of Computation, 65, 213 (1996), 203--213: | ||
| 18 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps | 18 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps |
| 19 | ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps | 19 | ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps |
| 20 | 20 | ||
| @@ -29,7 +29,7 @@ | |||
| 29 | that paper.) | 29 | that paper.) |
| 30 | 30 | ||
| 31 | This affects the seeding procedure by imposing the requirement | 31 | This affects the seeding procedure by imposing the requirement |
| 32 | s1 > 1, s2 > 7, s3 > 15. | 32 | s1 > 1, s2 > 7, s3 > 15, s4 > 127. |
| 33 | 33 | ||
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| @@ -38,6 +38,11 @@ | |||
| 38 | #include <linux/export.h> | 38 | #include <linux/export.h> |
| 39 | #include <linux/jiffies.h> | 39 | #include <linux/jiffies.h> |
| 40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
| 41 | #include <linux/sched.h> | ||
| 42 | |||
| 43 | #ifdef CONFIG_RANDOM32_SELFTEST | ||
| 44 | static void __init prandom_state_selftest(void); | ||
| 45 | #endif | ||
| 41 | 46 | ||
| 42 | static DEFINE_PER_CPU(struct rnd_state, net_rand_state); | 47 | static DEFINE_PER_CPU(struct rnd_state, net_rand_state); |
| 43 | 48 | ||
| @@ -52,11 +57,12 @@ u32 prandom_u32_state(struct rnd_state *state) | |||
| 52 | { | 57 | { |
| 53 | #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) | 58 | #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) |
| 54 | 59 | ||
| 55 | state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12); | 60 | state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U); |
| 56 | state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4); | 61 | state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U); |
| 57 | state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17); | 62 | state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U); |
| 63 | state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U); | ||
| 58 | 64 | ||
| 59 | return (state->s1 ^ state->s2 ^ state->s3); | 65 | return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4); |
| 60 | } | 66 | } |
| 61 | EXPORT_SYMBOL(prandom_u32_state); | 67 | EXPORT_SYMBOL(prandom_u32_state); |
| 62 | 68 | ||
| @@ -126,6 +132,38 @@ void prandom_bytes(void *buf, int bytes) | |||
| 126 | } | 132 | } |
| 127 | EXPORT_SYMBOL(prandom_bytes); | 133 | EXPORT_SYMBOL(prandom_bytes); |
| 128 | 134 | ||
| 135 | static void prandom_warmup(struct rnd_state *state) | ||
| 136 | { | ||
| 137 | /* Calling RNG ten times to satify recurrence condition */ | ||
| 138 | prandom_u32_state(state); | ||
| 139 | prandom_u32_state(state); | ||
| 140 | prandom_u32_state(state); | ||
| 141 | prandom_u32_state(state); | ||
| 142 | prandom_u32_state(state); | ||
| 143 | prandom_u32_state(state); | ||
| 144 | prandom_u32_state(state); | ||
| 145 | prandom_u32_state(state); | ||
| 146 | prandom_u32_state(state); | ||
| 147 | prandom_u32_state(state); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void prandom_seed_very_weak(struct rnd_state *state, u32 seed) | ||
| 151 | { | ||
| 152 | /* Note: This sort of seeding is ONLY used in test cases and | ||
| 153 | * during boot at the time from core_initcall until late_initcall | ||
| 154 | * as we don't have a stronger entropy source available yet. | ||
| 155 | * After late_initcall, we reseed entire state, we have to (!), | ||
| 156 | * otherwise an attacker just needs to search 32 bit space to | ||
| 157 | * probe for our internal 128 bit state if he knows a couple | ||
| 158 | * of prandom32 outputs! | ||
| 159 | */ | ||
| 160 | #define LCG(x) ((x) * 69069U) /* super-duper LCG */ | ||
| 161 | state->s1 = __seed(LCG(seed), 2U); | ||
| 162 | state->s2 = __seed(LCG(state->s1), 8U); | ||
| 163 | state->s3 = __seed(LCG(state->s2), 16U); | ||
| 164 | state->s4 = __seed(LCG(state->s3), 128U); | ||
| 165 | } | ||
| 166 | |||
| 129 | /** | 167 | /** |
| 130 | * prandom_seed - add entropy to pseudo random number generator | 168 | * prandom_seed - add entropy to pseudo random number generator |
| 131 | * @seed: seed value | 169 | * @seed: seed value |
| @@ -141,7 +179,9 @@ void prandom_seed(u32 entropy) | |||
| 141 | */ | 179 | */ |
| 142 | for_each_possible_cpu (i) { | 180 | for_each_possible_cpu (i) { |
| 143 | struct rnd_state *state = &per_cpu(net_rand_state, i); | 181 | struct rnd_state *state = &per_cpu(net_rand_state, i); |
| 144 | state->s1 = __seed(state->s1 ^ entropy, 1); | 182 | |
| 183 | state->s1 = __seed(state->s1 ^ entropy, 2U); | ||
| 184 | prandom_warmup(state); | ||
| 145 | } | 185 | } |
| 146 | } | 186 | } |
| 147 | EXPORT_SYMBOL(prandom_seed); | 187 | EXPORT_SYMBOL(prandom_seed); |
| @@ -154,46 +194,249 @@ static int __init prandom_init(void) | |||
| 154 | { | 194 | { |
| 155 | int i; | 195 | int i; |
| 156 | 196 | ||
| 197 | #ifdef CONFIG_RANDOM32_SELFTEST | ||
| 198 | prandom_state_selftest(); | ||
| 199 | #endif | ||
| 200 | |||
| 157 | for_each_possible_cpu(i) { | 201 | for_each_possible_cpu(i) { |
| 158 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 202 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
| 159 | 203 | ||
| 160 | #define LCG(x) ((x) * 69069) /* super-duper LCG */ | 204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); |
| 161 | state->s1 = __seed(LCG(i + jiffies), 1); | 205 | prandom_warmup(state); |
| 162 | state->s2 = __seed(LCG(state->s1), 7); | ||
| 163 | state->s3 = __seed(LCG(state->s2), 15); | ||
| 164 | |||
| 165 | /* "warm it up" */ | ||
| 166 | prandom_u32_state(state); | ||
| 167 | prandom_u32_state(state); | ||
| 168 | prandom_u32_state(state); | ||
| 169 | prandom_u32_state(state); | ||
| 170 | prandom_u32_state(state); | ||
| 171 | prandom_u32_state(state); | ||
| 172 | } | 206 | } |
| 173 | return 0; | 207 | return 0; |
| 174 | } | 208 | } |
| 175 | core_initcall(prandom_init); | 209 | core_initcall(prandom_init); |
| 176 | 210 | ||
| 211 | static void __prandom_timer(unsigned long dontcare); | ||
| 212 | static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); | ||
| 213 | |||
| 214 | static void __prandom_timer(unsigned long dontcare) | ||
| 215 | { | ||
| 216 | u32 entropy; | ||
| 217 | unsigned long expires; | ||
| 218 | |||
| 219 | get_random_bytes(&entropy, sizeof(entropy)); | ||
| 220 | prandom_seed(entropy); | ||
| 221 | |||
| 222 | /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ | ||
| 223 | expires = 40 + (prandom_u32() % 40); | ||
| 224 | seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); | ||
| 225 | |||
| 226 | add_timer(&seed_timer); | ||
| 227 | } | ||
| 228 | |||
| 229 | static void __init __prandom_start_seed_timer(void) | ||
| 230 | { | ||
| 231 | set_timer_slack(&seed_timer, HZ); | ||
| 232 | seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); | ||
| 233 | add_timer(&seed_timer); | ||
| 234 | } | ||
| 235 | |||
| 177 | /* | 236 | /* |
| 178 | * Generate better values after random number generator | 237 | * Generate better values after random number generator |
| 179 | * is fully initialized. | 238 | * is fully initialized. |
| 180 | */ | 239 | */ |
| 181 | static int __init prandom_reseed(void) | 240 | static void __prandom_reseed(bool late) |
| 182 | { | 241 | { |
| 183 | int i; | 242 | int i; |
| 243 | unsigned long flags; | ||
| 244 | static bool latch = false; | ||
| 245 | static DEFINE_SPINLOCK(lock); | ||
| 246 | |||
| 247 | /* only allow initial seeding (late == false) once */ | ||
| 248 | spin_lock_irqsave(&lock, flags); | ||
| 249 | if (latch && !late) | ||
| 250 | goto out; | ||
| 251 | latch = true; | ||
| 184 | 252 | ||
| 185 | for_each_possible_cpu(i) { | 253 | for_each_possible_cpu(i) { |
| 186 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 254 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
| 187 | u32 seeds[3]; | 255 | u32 seeds[4]; |
| 188 | 256 | ||
| 189 | get_random_bytes(&seeds, sizeof(seeds)); | 257 | get_random_bytes(&seeds, sizeof(seeds)); |
| 190 | state->s1 = __seed(seeds[0], 1); | 258 | state->s1 = __seed(seeds[0], 2U); |
| 191 | state->s2 = __seed(seeds[1], 7); | 259 | state->s2 = __seed(seeds[1], 8U); |
| 192 | state->s3 = __seed(seeds[2], 15); | 260 | state->s3 = __seed(seeds[2], 16U); |
| 261 | state->s4 = __seed(seeds[3], 128U); | ||
| 193 | 262 | ||
| 194 | /* mix it in */ | 263 | prandom_warmup(state); |
| 195 | prandom_u32_state(state); | ||
| 196 | } | 264 | } |
| 265 | out: | ||
| 266 | spin_unlock_irqrestore(&lock, flags); | ||
| 267 | } | ||
| 268 | |||
| 269 | void prandom_reseed_late(void) | ||
| 270 | { | ||
| 271 | __prandom_reseed(true); | ||
| 272 | } | ||
| 273 | |||
| 274 | static int __init prandom_reseed(void) | ||
| 275 | { | ||
| 276 | __prandom_reseed(false); | ||
| 277 | __prandom_start_seed_timer(); | ||
| 197 | return 0; | 278 | return 0; |
| 198 | } | 279 | } |
| 199 | late_initcall(prandom_reseed); | 280 | late_initcall(prandom_reseed); |
| 281 | |||
| 282 | #ifdef CONFIG_RANDOM32_SELFTEST | ||
| 283 | static struct prandom_test1 { | ||
| 284 | u32 seed; | ||
| 285 | u32 result; | ||
| 286 | } test1[] = { | ||
| 287 | { 1U, 3484351685U }, | ||
| 288 | { 2U, 2623130059U }, | ||
| 289 | { 3U, 3125133893U }, | ||
| 290 | { 4U, 984847254U }, | ||
| 291 | }; | ||
| 292 | |||
| 293 | static struct prandom_test2 { | ||
| 294 | u32 seed; | ||
| 295 | u32 iteration; | ||
| 296 | u32 result; | ||
| 297 | } test2[] = { | ||
| 298 | /* Test cases against taus113 from GSL library. */ | ||
| 299 | { 931557656U, 959U, 2975593782U }, | ||
| 300 | { 1339693295U, 876U, 3887776532U }, | ||
| 301 | { 1545556285U, 961U, 1615538833U }, | ||
| 302 | { 601730776U, 723U, 1776162651U }, | ||
| 303 | { 1027516047U, 687U, 511983079U }, | ||
| 304 | { 416526298U, 700U, 916156552U }, | ||
| 305 | { 1395522032U, 652U, 2222063676U }, | ||
| 306 | { 366221443U, 617U, 2992857763U }, | ||
| 307 | { 1539836965U, 714U, 3783265725U }, | ||
| 308 | { 556206671U, 994U, 799626459U }, | ||
| 309 | { 684907218U, 799U, 367789491U }, | ||
| 310 | { 2121230701U, 931U, 2115467001U }, | ||
| 311 | { 1668516451U, 644U, 3620590685U }, | ||
| 312 | { 768046066U, 883U, 2034077390U }, | ||
| 313 | { 1989159136U, 833U, 1195767305U }, | ||
| 314 | { 536585145U, 996U, 3577259204U }, | ||
| 315 | { 1008129373U, 642U, 1478080776U }, | ||
| 316 | { 1740775604U, 939U, 1264980372U }, | ||
| 317 | { 1967883163U, 508U, 10734624U }, | ||
| 318 | { 1923019697U, 730U, 3821419629U }, | ||
| 319 | { 442079932U, 560U, 3440032343U }, | ||
| 320 | { 1961302714U, 845U, 841962572U }, | ||
| 321 | { 2030205964U, 962U, 1325144227U }, | ||
| 322 | { 1160407529U, 507U, 240940858U }, | ||
| 323 | { 635482502U, 779U, 4200489746U }, | ||
| 324 | { 1252788931U, 699U, 867195434U }, | ||
| 325 | { 1961817131U, 719U, 668237657U }, | ||
| 326 | { 1071468216U, 983U, 917876630U }, | ||
| 327 | { 1281848367U, 932U, 1003100039U }, | ||
| 328 | { 582537119U, 780U, 1127273778U }, | ||
| 329 | { 1973672777U, 853U, 1071368872U }, | ||
| 330 | { 1896756996U, 762U, 1127851055U }, | ||
| 331 | { 847917054U, 500U, 1717499075U }, | ||
| 332 | { 1240520510U, 951U, 2849576657U }, | ||
| 333 | { 1685071682U, 567U, 1961810396U }, | ||
| 334 | { 1516232129U, 557U, 3173877U }, | ||
| 335 | { 1208118903U, 612U, 1613145022U }, | ||
| 336 | { 1817269927U, 693U, 4279122573U }, | ||
| 337 | { 1510091701U, 717U, 638191229U }, | ||
| 338 | { 365916850U, 807U, 600424314U }, | ||
| 339 | { 399324359U, 702U, 1803598116U }, | ||
| 340 | { 1318480274U, 779U, 2074237022U }, | ||
| 341 | { 697758115U, 840U, 1483639402U }, | ||
| 342 | { 1696507773U, 840U, 577415447U }, | ||
| 343 | { 2081979121U, 981U, 3041486449U }, | ||
| 344 | { 955646687U, 742U, 3846494357U }, | ||
| 345 | { 1250683506U, 749U, 836419859U }, | ||
| 346 | { 595003102U, 534U, 366794109U }, | ||
| 347 | { 47485338U, 558U, 3521120834U }, | ||
| 348 | { 619433479U, 610U, 3991783875U }, | ||
| 349 | { 704096520U, 518U, 4139493852U }, | ||
| 350 | { 1712224984U, 606U, 2393312003U }, | ||
| 351 | { 1318233152U, 922U, 3880361134U }, | ||
| 352 | { 855572992U, 761U, 1472974787U }, | ||
| 353 | { 64721421U, 703U, 683860550U }, | ||
| 354 | { 678931758U, 840U, 380616043U }, | ||
| 355 | { 692711973U, 778U, 1382361947U }, | ||
| 356 | { 677703619U, 530U, 2826914161U }, | ||
| 357 | { 92393223U, 586U, 1522128471U }, | ||
| 358 | { 1222592920U, 743U, 3466726667U }, | ||
| 359 | { 358288986U, 695U, 1091956998U }, | ||
| 360 | { 1935056945U, 958U, 514864477U }, | ||
| 361 | { 735675993U, 990U, 1294239989U }, | ||
| 362 | { 1560089402U, 897U, 2238551287U }, | ||
| 363 | { 70616361U, 829U, 22483098U }, | ||
| 364 | { 368234700U, 731U, 2913875084U }, | ||
| 365 | { 20221190U, 879U, 1564152970U }, | ||
| 366 | { 539444654U, 682U, 1835141259U }, | ||
| 367 | { 1314987297U, 840U, 1801114136U }, | ||
| 368 | { 2019295544U, 645U, 3286438930U }, | ||
| 369 | { 469023838U, 716U, 1637918202U }, | ||
| 370 | { 1843754496U, 653U, 2562092152U }, | ||
| 371 | { 400672036U, 809U, 4264212785U }, | ||
| 372 | { 404722249U, 965U, 2704116999U }, | ||
| 373 | { 600702209U, 758U, 584979986U }, | ||
| 374 | { 519953954U, 667U, 2574436237U }, | ||
| 375 | { 1658071126U, 694U, 2214569490U }, | ||
| 376 | { 420480037U, 749U, 3430010866U }, | ||
| 377 | { 690103647U, 969U, 3700758083U }, | ||
| 378 | { 1029424799U, 937U, 3787746841U }, | ||
| 379 | { 2012608669U, 506U, 3362628973U }, | ||
| 380 | { 1535432887U, 998U, 42610943U }, | ||
| 381 | { 1330635533U, 857U, 3040806504U }, | ||
| 382 | { 1223800550U, 539U, 3954229517U }, | ||
| 383 | { 1322411537U, 680U, 3223250324U }, | ||
| 384 | { 1877847898U, 945U, 2915147143U }, | ||
| 385 | { 1646356099U, 874U, 965988280U }, | ||
| 386 | { 805687536U, 744U, 4032277920U }, | ||
| 387 | { 1948093210U, 633U, 1346597684U }, | ||
| 388 | { 392609744U, 783U, 1636083295U }, | ||
| 389 | { 690241304U, 770U, 1201031298U }, | ||
| 390 | { 1360302965U, 696U, 1665394461U }, | ||
| 391 | { 1220090946U, 780U, 1316922812U }, | ||
| 392 | { 447092251U, 500U, 3438743375U }, | ||
| 393 | { 1613868791U, 592U, 828546883U }, | ||
| 394 | { 523430951U, 548U, 2552392304U }, | ||
| 395 | { 726692899U, 810U, 1656872867U }, | ||
| 396 | { 1364340021U, 836U, 3710513486U }, | ||
| 397 | { 1986257729U, 931U, 935013962U }, | ||
| 398 | { 407983964U, 921U, 728767059U }, | ||
| 399 | }; | ||
| 400 | |||
| 401 | static void __init prandom_state_selftest(void) | ||
| 402 | { | ||
| 403 | int i, j, errors = 0, runs = 0; | ||
| 404 | bool error = false; | ||
| 405 | |||
| 406 | for (i = 0; i < ARRAY_SIZE(test1); i++) { | ||
| 407 | struct rnd_state state; | ||
| 408 | |||
| 409 | prandom_seed_very_weak(&state, test1[i].seed); | ||
| 410 | prandom_warmup(&state); | ||
| 411 | |||
| 412 | if (test1[i].result != prandom_u32_state(&state)) | ||
| 413 | error = true; | ||
| 414 | } | ||
| 415 | |||
| 416 | if (error) | ||
| 417 | pr_warn("prandom: seed boundary self test failed\n"); | ||
| 418 | else | ||
| 419 | pr_info("prandom: seed boundary self test passed\n"); | ||
| 420 | |||
| 421 | for (i = 0; i < ARRAY_SIZE(test2); i++) { | ||
| 422 | struct rnd_state state; | ||
| 423 | |||
| 424 | prandom_seed_very_weak(&state, test2[i].seed); | ||
| 425 | prandom_warmup(&state); | ||
| 426 | |||
| 427 | for (j = 0; j < test2[i].iteration - 1; j++) | ||
| 428 | prandom_u32_state(&state); | ||
| 429 | |||
| 430 | if (test2[i].result != prandom_u32_state(&state)) | ||
| 431 | errors++; | ||
| 432 | |||
| 433 | runs++; | ||
| 434 | cond_resched(); | ||
| 435 | } | ||
| 436 | |||
| 437 | if (errors) | ||
| 438 | pr_warn("prandom: %d/%d self tests failed\n", errors, runs); | ||
| 439 | else | ||
| 440 | pr_info("prandom: %d self tests passed\n", runs); | ||
| 441 | } | ||
| 442 | #endif | ||
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c deleted file mode 100644 index 9be8a9144978..000000000000 --- a/lib/rwsem-spinlock.c +++ /dev/null | |||
| @@ -1,296 +0,0 @@ | |||
| 1 | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for | ||
| 2 | * generic spinlock implementation | ||
| 3 | * | ||
| 4 | * Copyright (c) 2001 David Howells (dhowells@redhat.com). | ||
| 5 | * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> | ||
| 6 | * - Derived also from comments by Linus | ||
| 7 | */ | ||
| 8 | #include <linux/rwsem.h> | ||
| 9 | #include <linux/sched.h> | ||
| 10 | #include <linux/export.h> | ||
| 11 | |||
| 12 | enum rwsem_waiter_type { | ||
| 13 | RWSEM_WAITING_FOR_WRITE, | ||
| 14 | RWSEM_WAITING_FOR_READ | ||
| 15 | }; | ||
| 16 | |||
| 17 | struct rwsem_waiter { | ||
| 18 | struct list_head list; | ||
| 19 | struct task_struct *task; | ||
| 20 | enum rwsem_waiter_type type; | ||
| 21 | }; | ||
| 22 | |||
| 23 | int rwsem_is_locked(struct rw_semaphore *sem) | ||
| 24 | { | ||
| 25 | int ret = 1; | ||
| 26 | unsigned long flags; | ||
| 27 | |||
| 28 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { | ||
| 29 | ret = (sem->activity != 0); | ||
| 30 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 31 | } | ||
| 32 | return ret; | ||
| 33 | } | ||
| 34 | EXPORT_SYMBOL(rwsem_is_locked); | ||
| 35 | |||
| 36 | /* | ||
| 37 | * initialise the semaphore | ||
| 38 | */ | ||
| 39 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
| 40 | struct lock_class_key *key) | ||
| 41 | { | ||
| 42 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 43 | /* | ||
| 44 | * Make sure we are not reinitializing a held semaphore: | ||
| 45 | */ | ||
| 46 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
| 47 | lockdep_init_map(&sem->dep_map, name, key, 0); | ||
| 48 | #endif | ||
| 49 | sem->activity = 0; | ||
| 50 | raw_spin_lock_init(&sem->wait_lock); | ||
| 51 | INIT_LIST_HEAD(&sem->wait_list); | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(__init_rwsem); | ||
| 54 | |||
| 55 | /* | ||
| 56 | * handle the lock release when processes blocked on it that can now run | ||
| 57 | * - if we come here, then: | ||
| 58 | * - the 'active count' _reached_ zero | ||
| 59 | * - the 'waiting count' is non-zero | ||
| 60 | * - the spinlock must be held by the caller | ||
| 61 | * - woken process blocks are discarded from the list after having task zeroed | ||
| 62 | * - writers are only woken if wakewrite is non-zero | ||
| 63 | */ | ||
| 64 | static inline struct rw_semaphore * | ||
| 65 | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | ||
| 66 | { | ||
| 67 | struct rwsem_waiter *waiter; | ||
| 68 | struct task_struct *tsk; | ||
| 69 | int woken; | ||
| 70 | |||
| 71 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
| 72 | |||
| 73 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { | ||
| 74 | if (wakewrite) | ||
| 75 | /* Wake up a writer. Note that we do not grant it the | ||
| 76 | * lock - it will have to acquire it when it runs. */ | ||
| 77 | wake_up_process(waiter->task); | ||
| 78 | goto out; | ||
| 79 | } | ||
| 80 | |||
| 81 | /* grant an infinite number of read locks to the front of the queue */ | ||
| 82 | woken = 0; | ||
| 83 | do { | ||
| 84 | struct list_head *next = waiter->list.next; | ||
| 85 | |||
| 86 | list_del(&waiter->list); | ||
| 87 | tsk = waiter->task; | ||
| 88 | smp_mb(); | ||
| 89 | waiter->task = NULL; | ||
| 90 | wake_up_process(tsk); | ||
| 91 | put_task_struct(tsk); | ||
| 92 | woken++; | ||
| 93 | if (next == &sem->wait_list) | ||
| 94 | break; | ||
| 95 | waiter = list_entry(next, struct rwsem_waiter, list); | ||
| 96 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); | ||
| 97 | |||
| 98 | sem->activity += woken; | ||
| 99 | |||
| 100 | out: | ||
| 101 | return sem; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * wake a single writer | ||
| 106 | */ | ||
| 107 | static inline struct rw_semaphore * | ||
| 108 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | ||
| 109 | { | ||
| 110 | struct rwsem_waiter *waiter; | ||
| 111 | |||
| 112 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
| 113 | wake_up_process(waiter->task); | ||
| 114 | |||
| 115 | return sem; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * get a read lock on the semaphore | ||
| 120 | */ | ||
| 121 | void __sched __down_read(struct rw_semaphore *sem) | ||
| 122 | { | ||
| 123 | struct rwsem_waiter waiter; | ||
| 124 | struct task_struct *tsk; | ||
| 125 | unsigned long flags; | ||
| 126 | |||
| 127 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 128 | |||
| 129 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | ||
| 130 | /* granted */ | ||
| 131 | sem->activity++; | ||
| 132 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 133 | goto out; | ||
| 134 | } | ||
| 135 | |||
| 136 | tsk = current; | ||
| 137 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 138 | |||
| 139 | /* set up my own style of waitqueue */ | ||
| 140 | waiter.task = tsk; | ||
| 141 | waiter.type = RWSEM_WAITING_FOR_READ; | ||
| 142 | get_task_struct(tsk); | ||
| 143 | |||
| 144 | list_add_tail(&waiter.list, &sem->wait_list); | ||
| 145 | |||
| 146 | /* we don't need to touch the semaphore struct anymore */ | ||
| 147 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 148 | |||
| 149 | /* wait to be given the lock */ | ||
| 150 | for (;;) { | ||
| 151 | if (!waiter.task) | ||
| 152 | break; | ||
| 153 | schedule(); | ||
| 154 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 155 | } | ||
| 156 | |||
| 157 | tsk->state = TASK_RUNNING; | ||
| 158 | out: | ||
| 159 | ; | ||
| 160 | } | ||
| 161 | |||
| 162 | /* | ||
| 163 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
| 164 | */ | ||
| 165 | int __down_read_trylock(struct rw_semaphore *sem) | ||
| 166 | { | ||
| 167 | unsigned long flags; | ||
| 168 | int ret = 0; | ||
| 169 | |||
| 170 | |||
| 171 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 172 | |||
| 173 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | ||
| 174 | /* granted */ | ||
| 175 | sem->activity++; | ||
| 176 | ret = 1; | ||
| 177 | } | ||
| 178 | |||
| 179 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 180 | |||
| 181 | return ret; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* | ||
| 185 | * get a write lock on the semaphore | ||
| 186 | */ | ||
| 187 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | ||
| 188 | { | ||
| 189 | struct rwsem_waiter waiter; | ||
| 190 | struct task_struct *tsk; | ||
| 191 | unsigned long flags; | ||
| 192 | |||
| 193 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 194 | |||
| 195 | /* set up my own style of waitqueue */ | ||
| 196 | tsk = current; | ||
| 197 | waiter.task = tsk; | ||
| 198 | waiter.type = RWSEM_WAITING_FOR_WRITE; | ||
| 199 | list_add_tail(&waiter.list, &sem->wait_list); | ||
| 200 | |||
| 201 | /* wait for someone to release the lock */ | ||
| 202 | for (;;) { | ||
| 203 | /* | ||
| 204 | * That is the key to support write lock stealing: allows the | ||
| 205 | * task already on CPU to get the lock soon rather than put | ||
| 206 | * itself into sleep and waiting for system woke it or someone | ||
| 207 | * else in the head of the wait list up. | ||
| 208 | */ | ||
| 209 | if (sem->activity == 0) | ||
| 210 | break; | ||
| 211 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 212 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 213 | schedule(); | ||
| 214 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 215 | } | ||
| 216 | /* got the lock */ | ||
| 217 | sem->activity = -1; | ||
| 218 | list_del(&waiter.list); | ||
| 219 | |||
| 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 221 | } | ||
| 222 | |||
| 223 | void __sched __down_write(struct rw_semaphore *sem) | ||
| 224 | { | ||
| 225 | __down_write_nested(sem, 0); | ||
| 226 | } | ||
| 227 | |||
| 228 | /* | ||
| 229 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
| 230 | */ | ||
| 231 | int __down_write_trylock(struct rw_semaphore *sem) | ||
| 232 | { | ||
| 233 | unsigned long flags; | ||
| 234 | int ret = 0; | ||
| 235 | |||
| 236 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 237 | |||
| 238 | if (sem->activity == 0) { | ||
| 239 | /* got the lock */ | ||
| 240 | sem->activity = -1; | ||
| 241 | ret = 1; | ||
| 242 | } | ||
| 243 | |||
| 244 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 245 | |||
| 246 | return ret; | ||
| 247 | } | ||
| 248 | |||
| 249 | /* | ||
| 250 | * release a read lock on the semaphore | ||
| 251 | */ | ||
| 252 | void __up_read(struct rw_semaphore *sem) | ||
| 253 | { | ||
| 254 | unsigned long flags; | ||
| 255 | |||
| 256 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 257 | |||
| 258 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | ||
| 259 | sem = __rwsem_wake_one_writer(sem); | ||
| 260 | |||
| 261 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 262 | } | ||
| 263 | |||
| 264 | /* | ||
| 265 | * release a write lock on the semaphore | ||
| 266 | */ | ||
| 267 | void __up_write(struct rw_semaphore *sem) | ||
| 268 | { | ||
| 269 | unsigned long flags; | ||
| 270 | |||
| 271 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 272 | |||
| 273 | sem->activity = 0; | ||
| 274 | if (!list_empty(&sem->wait_list)) | ||
| 275 | sem = __rwsem_do_wake(sem, 1); | ||
| 276 | |||
| 277 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 278 | } | ||
| 279 | |||
| 280 | /* | ||
| 281 | * downgrade a write lock into a read lock | ||
| 282 | * - just wake up any readers at the front of the queue | ||
| 283 | */ | ||
| 284 | void __downgrade_write(struct rw_semaphore *sem) | ||
| 285 | { | ||
| 286 | unsigned long flags; | ||
| 287 | |||
| 288 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 289 | |||
| 290 | sem->activity = 1; | ||
| 291 | if (!list_empty(&sem->wait_list)) | ||
| 292 | sem = __rwsem_do_wake(sem, 0); | ||
| 293 | |||
| 294 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 295 | } | ||
| 296 | |||
diff --git a/lib/rwsem.c b/lib/rwsem.c deleted file mode 100644 index 19c5fa95e0b4..000000000000 --- a/lib/rwsem.c +++ /dev/null | |||
| @@ -1,293 +0,0 @@ | |||
| 1 | /* rwsem.c: R/W semaphores: contention handling functions | ||
| 2 | * | ||
| 3 | * Written by David Howells (dhowells@redhat.com). | ||
| 4 | * Derived from arch/i386/kernel/semaphore.c | ||
| 5 | * | ||
| 6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | ||
| 7 | * and Michel Lespinasse <walken@google.com> | ||
| 8 | */ | ||
| 9 | #include <linux/rwsem.h> | ||
| 10 | #include <linux/sched.h> | ||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/export.h> | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Initialize an rwsem: | ||
| 16 | */ | ||
| 17 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
| 18 | struct lock_class_key *key) | ||
| 19 | { | ||
| 20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 21 | /* | ||
| 22 | * Make sure we are not reinitializing a held semaphore: | ||
| 23 | */ | ||
| 24 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
| 25 | lockdep_init_map(&sem->dep_map, name, key, 0); | ||
| 26 | #endif | ||
| 27 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
| 28 | raw_spin_lock_init(&sem->wait_lock); | ||
| 29 | INIT_LIST_HEAD(&sem->wait_list); | ||
| 30 | } | ||
| 31 | |||
| 32 | EXPORT_SYMBOL(__init_rwsem); | ||
| 33 | |||
| 34 | enum rwsem_waiter_type { | ||
| 35 | RWSEM_WAITING_FOR_WRITE, | ||
| 36 | RWSEM_WAITING_FOR_READ | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct rwsem_waiter { | ||
| 40 | struct list_head list; | ||
| 41 | struct task_struct *task; | ||
| 42 | enum rwsem_waiter_type type; | ||
| 43 | }; | ||
| 44 | |||
| 45 | enum rwsem_wake_type { | ||
| 46 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ | ||
| 47 | RWSEM_WAKE_READERS, /* Wake readers only */ | ||
| 48 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* | ||
| 52 | * handle the lock release when processes blocked on it that can now run | ||
| 53 | * - if we come here from up_xxxx(), then: | ||
| 54 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | ||
| 55 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | ||
| 56 | * - there must be someone on the queue | ||
| 57 | * - the spinlock must be held by the caller | ||
| 58 | * - woken process blocks are discarded from the list after having task zeroed | ||
| 59 | * - writers are only woken if downgrading is false | ||
| 60 | */ | ||
| 61 | static struct rw_semaphore * | ||
| 62 | __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) | ||
| 63 | { | ||
| 64 | struct rwsem_waiter *waiter; | ||
| 65 | struct task_struct *tsk; | ||
| 66 | struct list_head *next; | ||
| 67 | long oldcount, woken, loop, adjustment; | ||
| 68 | |||
| 69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
| 70 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { | ||
| 71 | if (wake_type == RWSEM_WAKE_ANY) | ||
| 72 | /* Wake writer at the front of the queue, but do not | ||
| 73 | * grant it the lock yet as we want other writers | ||
| 74 | * to be able to steal it. Readers, on the other hand, | ||
| 75 | * will block as they will notice the queued writer. | ||
| 76 | */ | ||
| 77 | wake_up_process(waiter->task); | ||
| 78 | goto out; | ||
| 79 | } | ||
| 80 | |||
| 81 | /* Writers might steal the lock before we grant it to the next reader. | ||
| 82 | * We prefer to do the first reader grant before counting readers | ||
| 83 | * so we can bail out early if a writer stole the lock. | ||
| 84 | */ | ||
| 85 | adjustment = 0; | ||
| 86 | if (wake_type != RWSEM_WAKE_READ_OWNED) { | ||
| 87 | adjustment = RWSEM_ACTIVE_READ_BIAS; | ||
| 88 | try_reader_grant: | ||
| 89 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | ||
| 90 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { | ||
| 91 | /* A writer stole the lock. Undo our reader grant. */ | ||
| 92 | if (rwsem_atomic_update(-adjustment, sem) & | ||
| 93 | RWSEM_ACTIVE_MASK) | ||
| 94 | goto out; | ||
| 95 | /* Last active locker left. Retry waking readers. */ | ||
| 96 | goto try_reader_grant; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | /* Grant an infinite number of read locks to the readers at the front | ||
| 101 | * of the queue. Note we increment the 'active part' of the count by | ||
| 102 | * the number of readers before waking any processes up. | ||
| 103 | */ | ||
| 104 | woken = 0; | ||
| 105 | do { | ||
| 106 | woken++; | ||
| 107 | |||
| 108 | if (waiter->list.next == &sem->wait_list) | ||
| 109 | break; | ||
| 110 | |||
| 111 | waiter = list_entry(waiter->list.next, | ||
| 112 | struct rwsem_waiter, list); | ||
| 113 | |||
| 114 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); | ||
| 115 | |||
| 116 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; | ||
| 117 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) | ||
| 118 | /* hit end of list above */ | ||
| 119 | adjustment -= RWSEM_WAITING_BIAS; | ||
| 120 | |||
| 121 | if (adjustment) | ||
| 122 | rwsem_atomic_add(adjustment, sem); | ||
| 123 | |||
| 124 | next = sem->wait_list.next; | ||
| 125 | loop = woken; | ||
| 126 | do { | ||
| 127 | waiter = list_entry(next, struct rwsem_waiter, list); | ||
| 128 | next = waiter->list.next; | ||
| 129 | tsk = waiter->task; | ||
| 130 | smp_mb(); | ||
| 131 | waiter->task = NULL; | ||
| 132 | wake_up_process(tsk); | ||
| 133 | put_task_struct(tsk); | ||
| 134 | } while (--loop); | ||
| 135 | |||
| 136 | sem->wait_list.next = next; | ||
| 137 | next->prev = &sem->wait_list; | ||
| 138 | |||
| 139 | out: | ||
| 140 | return sem; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * wait for the read lock to be granted | ||
| 145 | */ | ||
| 146 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | ||
| 147 | { | ||
| 148 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; | ||
| 149 | struct rwsem_waiter waiter; | ||
| 150 | struct task_struct *tsk = current; | ||
| 151 | |||
| 152 | /* set up my own style of waitqueue */ | ||
| 153 | waiter.task = tsk; | ||
| 154 | waiter.type = RWSEM_WAITING_FOR_READ; | ||
| 155 | get_task_struct(tsk); | ||
| 156 | |||
| 157 | raw_spin_lock_irq(&sem->wait_lock); | ||
| 158 | if (list_empty(&sem->wait_list)) | ||
| 159 | adjustment += RWSEM_WAITING_BIAS; | ||
| 160 | list_add_tail(&waiter.list, &sem->wait_list); | ||
| 161 | |||
| 162 | /* we're now waiting on the lock, but no longer actively locking */ | ||
| 163 | count = rwsem_atomic_update(adjustment, sem); | ||
| 164 | |||
| 165 | /* If there are no active locks, wake the front queued process(es). | ||
| 166 | * | ||
| 167 | * If there are no writers and we are first in the queue, | ||
| 168 | * wake our own waiter to join the existing active readers ! | ||
| 169 | */ | ||
| 170 | if (count == RWSEM_WAITING_BIAS || | ||
| 171 | (count > RWSEM_WAITING_BIAS && | ||
| 172 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) | ||
| 173 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | ||
| 174 | |||
| 175 | raw_spin_unlock_irq(&sem->wait_lock); | ||
| 176 | |||
| 177 | /* wait to be given the lock */ | ||
| 178 | while (true) { | ||
| 179 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 180 | if (!waiter.task) | ||
| 181 | break; | ||
| 182 | schedule(); | ||
| 183 | } | ||
| 184 | |||
| 185 | tsk->state = TASK_RUNNING; | ||
| 186 | |||
| 187 | return sem; | ||
| 188 | } | ||
| 189 | |||
| 190 | /* | ||
| 191 | * wait until we successfully acquire the write lock | ||
| 192 | */ | ||
| 193 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | ||
| 194 | { | ||
| 195 | long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; | ||
| 196 | struct rwsem_waiter waiter; | ||
| 197 | struct task_struct *tsk = current; | ||
| 198 | |||
| 199 | /* set up my own style of waitqueue */ | ||
| 200 | waiter.task = tsk; | ||
| 201 | waiter.type = RWSEM_WAITING_FOR_WRITE; | ||
| 202 | |||
| 203 | raw_spin_lock_irq(&sem->wait_lock); | ||
| 204 | if (list_empty(&sem->wait_list)) | ||
| 205 | adjustment += RWSEM_WAITING_BIAS; | ||
| 206 | list_add_tail(&waiter.list, &sem->wait_list); | ||
| 207 | |||
| 208 | /* we're now waiting on the lock, but no longer actively locking */ | ||
| 209 | count = rwsem_atomic_update(adjustment, sem); | ||
| 210 | |||
| 211 | /* If there were already threads queued before us and there are no | ||
| 212 | * active writers, the lock must be read owned; so we try to wake | ||
| 213 | * any read locks that were queued ahead of us. */ | ||
| 214 | if (count > RWSEM_WAITING_BIAS && | ||
| 215 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
| 216 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); | ||
| 217 | |||
| 218 | /* wait until we successfully acquire the lock */ | ||
| 219 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 220 | while (true) { | ||
| 221 | if (!(count & RWSEM_ACTIVE_MASK)) { | ||
| 222 | /* Try acquiring the write lock. */ | ||
| 223 | count = RWSEM_ACTIVE_WRITE_BIAS; | ||
| 224 | if (!list_is_singular(&sem->wait_list)) | ||
| 225 | count += RWSEM_WAITING_BIAS; | ||
| 226 | |||
| 227 | if (sem->count == RWSEM_WAITING_BIAS && | ||
| 228 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) == | ||
| 229 | RWSEM_WAITING_BIAS) | ||
| 230 | break; | ||
| 231 | } | ||
| 232 | |||
| 233 | raw_spin_unlock_irq(&sem->wait_lock); | ||
| 234 | |||
| 235 | /* Block until there are no active lockers. */ | ||
| 236 | do { | ||
| 237 | schedule(); | ||
| 238 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
| 239 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); | ||
| 240 | |||
| 241 | raw_spin_lock_irq(&sem->wait_lock); | ||
| 242 | } | ||
| 243 | |||
| 244 | list_del(&waiter.list); | ||
| 245 | raw_spin_unlock_irq(&sem->wait_lock); | ||
| 246 | tsk->state = TASK_RUNNING; | ||
| 247 | |||
| 248 | return sem; | ||
| 249 | } | ||
| 250 | |||
| 251 | /* | ||
| 252 | * handle waking up a waiter on the semaphore | ||
| 253 | * - up_read/up_write has decremented the active part of count if we come here | ||
| 254 | */ | ||
| 255 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | ||
| 256 | { | ||
| 257 | unsigned long flags; | ||
| 258 | |||
| 259 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 260 | |||
| 261 | /* do nothing if list empty */ | ||
| 262 | if (!list_empty(&sem->wait_list)) | ||
| 263 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | ||
| 264 | |||
| 265 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 266 | |||
| 267 | return sem; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* | ||
| 271 | * downgrade a write lock into a read lock | ||
| 272 | * - caller incremented waiting part of count and discovered it still negative | ||
| 273 | * - just wake up any readers at the front of the queue | ||
| 274 | */ | ||
| 275 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | ||
| 276 | { | ||
| 277 | unsigned long flags; | ||
| 278 | |||
| 279 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
| 280 | |||
| 281 | /* do nothing if list empty */ | ||
| 282 | if (!list_empty(&sem->wait_list)) | ||
| 283 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | ||
| 284 | |||
| 285 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 286 | |||
| 287 | return sem; | ||
| 288 | } | ||
| 289 | |||
| 290 | EXPORT_SYMBOL(rwsem_down_read_failed); | ||
| 291 | EXPORT_SYMBOL(rwsem_down_write_failed); | ||
| 292 | EXPORT_SYMBOL(rwsem_wake); | ||
| 293 | EXPORT_SYMBOL(rwsem_downgrade_wake); | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a685c8a79578..d16fa295ae1d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
| 577 | miter->__offset += miter->consumed; | 577 | miter->__offset += miter->consumed; |
| 578 | miter->__remaining -= miter->consumed; | 578 | miter->__remaining -= miter->consumed; |
| 579 | 579 | ||
| 580 | if (miter->__flags & SG_MITER_TO_SG) | 580 | if ((miter->__flags & SG_MITER_TO_SG) && |
| 581 | !PageSlab(miter->page)) | ||
| 581 | flush_kernel_dcache_page(miter->page); | 582 | flush_kernel_dcache_page(miter->page); |
| 582 | 583 | ||
| 583 | if (miter->__flags & SG_MITER_ATOMIC) { | 584 | if (miter->__flags & SG_MITER_ATOMIC) { |
diff --git a/lib/show_mem.c b/lib/show_mem.c index b7c72311ad0c..5847a4921b8e 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -12,8 +12,7 @@ | |||
| 12 | void show_mem(unsigned int filter) | 12 | void show_mem(unsigned int filter) |
| 13 | { | 13 | { |
| 14 | pg_data_t *pgdat; | 14 | pg_data_t *pgdat; |
| 15 | unsigned long total = 0, reserved = 0, shared = 0, | 15 | unsigned long total = 0, reserved = 0, highmem = 0; |
| 16 | nonshared = 0, highmem = 0; | ||
| 17 | 16 | ||
| 18 | printk("Mem-Info:\n"); | 17 | printk("Mem-Info:\n"); |
| 19 | show_free_areas(filter); | 18 | show_free_areas(filter); |
| @@ -22,43 +21,27 @@ void show_mem(unsigned int filter) | |||
| 22 | return; | 21 | return; |
| 23 | 22 | ||
| 24 | for_each_online_pgdat(pgdat) { | 23 | for_each_online_pgdat(pgdat) { |
| 25 | unsigned long i, flags; | 24 | unsigned long flags; |
| 25 | int zoneid; | ||
| 26 | 26 | ||
| 27 | pgdat_resize_lock(pgdat, &flags); | 27 | pgdat_resize_lock(pgdat, &flags); |
| 28 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | 28 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 29 | struct page *page; | 29 | struct zone *zone = &pgdat->node_zones[zoneid]; |
| 30 | unsigned long pfn = pgdat->node_start_pfn + i; | 30 | if (!populated_zone(zone)) |
| 31 | |||
| 32 | if (unlikely(!(i % MAX_ORDER_NR_PAGES))) | ||
| 33 | touch_nmi_watchdog(); | ||
| 34 | |||
| 35 | if (!pfn_valid(pfn)) | ||
| 36 | continue; | 31 | continue; |
| 37 | 32 | ||
| 38 | page = pfn_to_page(pfn); | 33 | total += zone->present_pages; |
| 39 | 34 | reserved = zone->present_pages - zone->managed_pages; | |
| 40 | if (PageHighMem(page)) | ||
| 41 | highmem++; | ||
| 42 | 35 | ||
| 43 | if (PageReserved(page)) | 36 | if (is_highmem_idx(zoneid)) |
| 44 | reserved++; | 37 | highmem += zone->present_pages; |
| 45 | else if (page_count(page) == 1) | ||
| 46 | nonshared++; | ||
| 47 | else if (page_count(page) > 1) | ||
| 48 | shared += page_count(page) - 1; | ||
| 49 | |||
| 50 | total++; | ||
| 51 | } | 38 | } |
| 52 | pgdat_resize_unlock(pgdat, &flags); | 39 | pgdat_resize_unlock(pgdat, &flags); |
| 53 | } | 40 | } |
| 54 | 41 | ||
| 55 | printk("%lu pages RAM\n", total); | 42 | printk("%lu pages RAM\n", total); |
| 56 | #ifdef CONFIG_HIGHMEM | 43 | printk("%lu pages HighMem/MovableOnly\n", highmem); |
| 57 | printk("%lu pages HighMem\n", highmem); | ||
| 58 | #endif | ||
| 59 | printk("%lu pages reserved\n", reserved); | 44 | printk("%lu pages reserved\n", reserved); |
| 60 | printk("%lu pages shared\n", shared); | ||
| 61 | printk("%lu pages non-shared\n", nonshared); | ||
| 62 | #ifdef CONFIG_QUICKLIST | 45 | #ifdef CONFIG_QUICKLIST |
| 63 | printk("%lu pages in pagetable cache\n", | 46 | printk("%lu pages in pagetable cache\n", |
| 64 | quicklist_total_size()); | 47 | quicklist_total_size()); |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4c0d0e51d49e..04abe53f12a1 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
| @@ -9,10 +9,9 @@ | |||
| 9 | 9 | ||
| 10 | notrace unsigned int debug_smp_processor_id(void) | 10 | notrace unsigned int debug_smp_processor_id(void) |
| 11 | { | 11 | { |
| 12 | unsigned long preempt_count = preempt_count(); | ||
| 13 | int this_cpu = raw_smp_processor_id(); | 12 | int this_cpu = raw_smp_processor_id(); |
| 14 | 13 | ||
| 15 | if (likely(preempt_count)) | 14 | if (likely(preempt_count())) |
| 16 | goto out; | 15 | goto out; |
| 17 | 16 | ||
| 18 | if (irqs_disabled()) | 17 | if (irqs_disabled()) |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c deleted file mode 100644 index 0374a596cffa..000000000000 --- a/lib/spinlock_debug.c +++ /dev/null | |||
| @@ -1,302 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 3 | * Released under the General Public License (GPL). | ||
| 4 | * | ||
| 5 | * This file contains the spinlock/rwlock implementations for | ||
| 6 | * DEBUG_SPINLOCK. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/spinlock.h> | ||
| 10 | #include <linux/nmi.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/debug_locks.h> | ||
| 13 | #include <linux/delay.h> | ||
| 14 | #include <linux/export.h> | ||
| 15 | |||
| 16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | ||
| 17 | struct lock_class_key *key) | ||
| 18 | { | ||
| 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 20 | /* | ||
| 21 | * Make sure we are not reinitializing a held lock: | ||
| 22 | */ | ||
| 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
| 24 | lockdep_init_map(&lock->dep_map, name, key, 0); | ||
| 25 | #endif | ||
| 26 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||
| 27 | lock->magic = SPINLOCK_MAGIC; | ||
| 28 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 29 | lock->owner_cpu = -1; | ||
| 30 | } | ||
| 31 | |||
| 32 | EXPORT_SYMBOL(__raw_spin_lock_init); | ||
| 33 | |||
| 34 | void __rwlock_init(rwlock_t *lock, const char *name, | ||
| 35 | struct lock_class_key *key) | ||
| 36 | { | ||
| 37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 38 | /* | ||
| 39 | * Make sure we are not reinitializing a held lock: | ||
| 40 | */ | ||
| 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
| 42 | lockdep_init_map(&lock->dep_map, name, key, 0); | ||
| 43 | #endif | ||
| 44 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; | ||
| 45 | lock->magic = RWLOCK_MAGIC; | ||
| 46 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 47 | lock->owner_cpu = -1; | ||
| 48 | } | ||
| 49 | |||
| 50 | EXPORT_SYMBOL(__rwlock_init); | ||
| 51 | |||
| 52 | static void spin_dump(raw_spinlock_t *lock, const char *msg) | ||
| 53 | { | ||
| 54 | struct task_struct *owner = NULL; | ||
| 55 | |||
| 56 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | ||
| 57 | owner = lock->owner; | ||
| 58 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | ||
| 59 | msg, raw_smp_processor_id(), | ||
| 60 | current->comm, task_pid_nr(current)); | ||
| 61 | printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " | ||
| 62 | ".owner_cpu: %d\n", | ||
| 63 | lock, lock->magic, | ||
| 64 | owner ? owner->comm : "<none>", | ||
| 65 | owner ? task_pid_nr(owner) : -1, | ||
| 66 | lock->owner_cpu); | ||
| 67 | dump_stack(); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | ||
| 71 | { | ||
| 72 | if (!debug_locks_off()) | ||
| 73 | return; | ||
| 74 | |||
| 75 | spin_dump(lock, msg); | ||
| 76 | } | ||
| 77 | |||
| 78 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | ||
| 79 | |||
| 80 | static inline void | ||
| 81 | debug_spin_lock_before(raw_spinlock_t *lock) | ||
| 82 | { | ||
| 83 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 84 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 85 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 86 | lock, "cpu recursion"); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) | ||
| 90 | { | ||
| 91 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 92 | lock->owner = current; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline void debug_spin_unlock(raw_spinlock_t *lock) | ||
| 96 | { | ||
| 97 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 98 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); | ||
| 99 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 100 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 101 | lock, "wrong CPU"); | ||
| 102 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 103 | lock->owner_cpu = -1; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void __spin_lock_debug(raw_spinlock_t *lock) | ||
| 107 | { | ||
| 108 | u64 i; | ||
| 109 | u64 loops = loops_per_jiffy * HZ; | ||
| 110 | |||
| 111 | for (i = 0; i < loops; i++) { | ||
| 112 | if (arch_spin_trylock(&lock->raw_lock)) | ||
| 113 | return; | ||
| 114 | __delay(1); | ||
| 115 | } | ||
| 116 | /* lockup suspected: */ | ||
| 117 | spin_dump(lock, "lockup suspected"); | ||
| 118 | #ifdef CONFIG_SMP | ||
| 119 | trigger_all_cpu_backtrace(); | ||
| 120 | #endif | ||
| 121 | |||
| 122 | /* | ||
| 123 | * The trylock above was causing a livelock. Give the lower level arch | ||
| 124 | * specific lock code a chance to acquire the lock. We have already | ||
| 125 | * printed a warning/backtrace at this point. The non-debug arch | ||
| 126 | * specific code might actually succeed in acquiring the lock. If it is | ||
| 127 | * not successful, the end-result is the same - there is no forward | ||
| 128 | * progress. | ||
| 129 | */ | ||
| 130 | arch_spin_lock(&lock->raw_lock); | ||
| 131 | } | ||
| 132 | |||
| 133 | void do_raw_spin_lock(raw_spinlock_t *lock) | ||
| 134 | { | ||
| 135 | debug_spin_lock_before(lock); | ||
| 136 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) | ||
| 137 | __spin_lock_debug(lock); | ||
| 138 | debug_spin_lock_after(lock); | ||
| 139 | } | ||
| 140 | |||
| 141 | int do_raw_spin_trylock(raw_spinlock_t *lock) | ||
| 142 | { | ||
| 143 | int ret = arch_spin_trylock(&lock->raw_lock); | ||
| 144 | |||
| 145 | if (ret) | ||
| 146 | debug_spin_lock_after(lock); | ||
| 147 | #ifndef CONFIG_SMP | ||
| 148 | /* | ||
| 149 | * Must not happen on UP: | ||
| 150 | */ | ||
| 151 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 152 | #endif | ||
| 153 | return ret; | ||
| 154 | } | ||
| 155 | |||
| 156 | void do_raw_spin_unlock(raw_spinlock_t *lock) | ||
| 157 | { | ||
| 158 | debug_spin_unlock(lock); | ||
| 159 | arch_spin_unlock(&lock->raw_lock); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void rwlock_bug(rwlock_t *lock, const char *msg) | ||
| 163 | { | ||
| 164 | if (!debug_locks_off()) | ||
| 165 | return; | ||
| 166 | |||
| 167 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | ||
| 168 | msg, raw_smp_processor_id(), current->comm, | ||
| 169 | task_pid_nr(current), lock); | ||
| 170 | dump_stack(); | ||
| 171 | } | ||
| 172 | |||
| 173 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | ||
| 174 | |||
| 175 | #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ | ||
| 176 | static void __read_lock_debug(rwlock_t *lock) | ||
| 177 | { | ||
| 178 | u64 i; | ||
| 179 | u64 loops = loops_per_jiffy * HZ; | ||
| 180 | int print_once = 1; | ||
| 181 | |||
| 182 | for (;;) { | ||
| 183 | for (i = 0; i < loops; i++) { | ||
| 184 | if (arch_read_trylock(&lock->raw_lock)) | ||
| 185 | return; | ||
| 186 | __delay(1); | ||
| 187 | } | ||
| 188 | /* lockup suspected: */ | ||
| 189 | if (print_once) { | ||
| 190 | print_once = 0; | ||
| 191 | printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " | ||
| 192 | "%s/%d, %p\n", | ||
| 193 | raw_smp_processor_id(), current->comm, | ||
| 194 | current->pid, lock); | ||
| 195 | dump_stack(); | ||
| 196 | } | ||
| 197 | } | ||
| 198 | } | ||
| 199 | #endif | ||
| 200 | |||
| 201 | void do_raw_read_lock(rwlock_t *lock) | ||
| 202 | { | ||
| 203 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 204 | arch_read_lock(&lock->raw_lock); | ||
| 205 | } | ||
| 206 | |||
| 207 | int do_raw_read_trylock(rwlock_t *lock) | ||
| 208 | { | ||
| 209 | int ret = arch_read_trylock(&lock->raw_lock); | ||
| 210 | |||
| 211 | #ifndef CONFIG_SMP | ||
| 212 | /* | ||
| 213 | * Must not happen on UP: | ||
| 214 | */ | ||
| 215 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 216 | #endif | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | void do_raw_read_unlock(rwlock_t *lock) | ||
| 221 | { | ||
| 222 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 223 | arch_read_unlock(&lock->raw_lock); | ||
| 224 | } | ||
| 225 | |||
| 226 | static inline void debug_write_lock_before(rwlock_t *lock) | ||
| 227 | { | ||
| 228 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 229 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 230 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 231 | lock, "cpu recursion"); | ||
| 232 | } | ||
| 233 | |||
| 234 | static inline void debug_write_lock_after(rwlock_t *lock) | ||
| 235 | { | ||
| 236 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 237 | lock->owner = current; | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline void debug_write_unlock(rwlock_t *lock) | ||
| 241 | { | ||
| 242 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 243 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 244 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 245 | lock, "wrong CPU"); | ||
| 246 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 247 | lock->owner_cpu = -1; | ||
| 248 | } | ||
| 249 | |||
| 250 | #if 0 /* This can cause lockups */ | ||
| 251 | static void __write_lock_debug(rwlock_t *lock) | ||
| 252 | { | ||
| 253 | u64 i; | ||
| 254 | u64 loops = loops_per_jiffy * HZ; | ||
| 255 | int print_once = 1; | ||
| 256 | |||
| 257 | for (;;) { | ||
| 258 | for (i = 0; i < loops; i++) { | ||
| 259 | if (arch_write_trylock(&lock->raw_lock)) | ||
| 260 | return; | ||
| 261 | __delay(1); | ||
| 262 | } | ||
| 263 | /* lockup suspected: */ | ||
| 264 | if (print_once) { | ||
| 265 | print_once = 0; | ||
| 266 | printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " | ||
| 267 | "%s/%d, %p\n", | ||
| 268 | raw_smp_processor_id(), current->comm, | ||
| 269 | current->pid, lock); | ||
| 270 | dump_stack(); | ||
| 271 | } | ||
| 272 | } | ||
| 273 | } | ||
| 274 | #endif | ||
| 275 | |||
| 276 | void do_raw_write_lock(rwlock_t *lock) | ||
| 277 | { | ||
| 278 | debug_write_lock_before(lock); | ||
| 279 | arch_write_lock(&lock->raw_lock); | ||
| 280 | debug_write_lock_after(lock); | ||
| 281 | } | ||
| 282 | |||
| 283 | int do_raw_write_trylock(rwlock_t *lock) | ||
| 284 | { | ||
| 285 | int ret = arch_write_trylock(&lock->raw_lock); | ||
| 286 | |||
| 287 | if (ret) | ||
| 288 | debug_write_lock_after(lock); | ||
| 289 | #ifndef CONFIG_SMP | ||
| 290 | /* | ||
| 291 | * Must not happen on UP: | ||
| 292 | */ | ||
| 293 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 294 | #endif | ||
| 295 | return ret; | ||
| 296 | } | ||
| 297 | |||
| 298 | void do_raw_write_unlock(rwlock_t *lock) | ||
| 299 | { | ||
| 300 | debug_write_unlock(lock); | ||
| 301 | arch_write_unlock(&lock->raw_lock); | ||
| 302 | } | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4e8686c7e5a4..e4399fa65ad6 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -38,6 +38,9 @@ | |||
| 38 | #include <linux/bootmem.h> | 38 | #include <linux/bootmem.h> |
| 39 | #include <linux/iommu-helper.h> | 39 | #include <linux/iommu-helper.h> |
| 40 | 40 | ||
| 41 | #define CREATE_TRACE_POINTS | ||
| 42 | #include <trace/events/swiotlb.h> | ||
| 43 | |||
| 41 | #define OFFSET(val,align) ((unsigned long) \ | 44 | #define OFFSET(val,align) ((unsigned long) \ |
| 42 | ( (val) & ( (align) - 1))) | 45 | ( (val) & ( (align) - 1))) |
| 43 | 46 | ||
| @@ -502,6 +505,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
| 502 | 505 | ||
| 503 | not_found: | 506 | not_found: |
| 504 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 507 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| 508 | dev_warn(hwdev, "swiotlb buffer is full\n"); | ||
| 505 | return SWIOTLB_MAP_ERROR; | 509 | return SWIOTLB_MAP_ERROR; |
| 506 | found: | 510 | found: |
| 507 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 511 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| @@ -726,6 +730,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 726 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) | 730 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) |
| 727 | return dev_addr; | 731 | return dev_addr; |
| 728 | 732 | ||
| 733 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); | ||
| 734 | |||
| 729 | /* Oh well, have to allocate and map a bounce buffer. */ | 735 | /* Oh well, have to allocate and map a bounce buffer. */ |
| 730 | map = map_single(dev, phys, size, dir); | 736 | map = map_single(dev, phys, size, dir); |
| 731 | if (map == SWIOTLB_MAP_ERROR) { | 737 | if (map == SWIOTLB_MAP_ERROR) { |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 26559bdb4c49..10909c571494 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
| 28 | #include <linux/ioport.h> | 28 | #include <linux/ioport.h> |
| 29 | #include <linux/dcache.h> | 29 | #include <linux/dcache.h> |
| 30 | #include <linux/cred.h> | ||
| 30 | #include <net/addrconf.h> | 31 | #include <net/addrconf.h> |
| 31 | 32 | ||
| 32 | #include <asm/page.h> /* for PAGE_SIZE */ | 33 | #include <asm/page.h> /* for PAGE_SIZE */ |
| @@ -1218,6 +1219,8 @@ int kptr_restrict __read_mostly; | |||
| 1218 | * The maximum supported length is 64 bytes of the input. Consider | 1219 | * The maximum supported length is 64 bytes of the input. Consider |
| 1219 | * to use print_hex_dump() for the larger input. | 1220 | * to use print_hex_dump() for the larger input. |
| 1220 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | 1221 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) |
| 1222 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | ||
| 1223 | * - 'D[234]' Same as 'd' but for a struct file | ||
| 1221 | * | 1224 | * |
| 1222 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 1225 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
| 1223 | * function pointers are really function descriptors, which contain a | 1226 | * function pointers are really function descriptors, which contain a |
| @@ -1312,11 +1315,37 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1312 | spec.field_width = default_width; | 1315 | spec.field_width = default_width; |
| 1313 | return string(buf, end, "pK-error", spec); | 1316 | return string(buf, end, "pK-error", spec); |
| 1314 | } | 1317 | } |
| 1315 | if (!((kptr_restrict == 0) || | 1318 | |
| 1316 | (kptr_restrict == 1 && | 1319 | switch (kptr_restrict) { |
| 1317 | has_capability_noaudit(current, CAP_SYSLOG)))) | 1320 | case 0: |
| 1321 | /* Always print %pK values */ | ||
| 1322 | break; | ||
| 1323 | case 1: { | ||
| 1324 | /* | ||
| 1325 | * Only print the real pointer value if the current | ||
| 1326 | * process has CAP_SYSLOG and is running with the | ||
| 1327 | * same credentials it started with. This is because | ||
| 1328 | * access to files is checked at open() time, but %pK | ||
| 1329 | * checks permission at read() time. We don't want to | ||
| 1330 | * leak pointer values if a binary opens a file using | ||
| 1331 | * %pK and then elevates privileges before reading it. | ||
| 1332 | */ | ||
| 1333 | const struct cred *cred = current_cred(); | ||
| 1334 | |||
| 1335 | if (!has_capability_noaudit(current, CAP_SYSLOG) || | ||
| 1336 | !uid_eq(cred->euid, cred->uid) || | ||
| 1337 | !gid_eq(cred->egid, cred->gid)) | ||
| 1338 | ptr = NULL; | ||
| 1339 | break; | ||
| 1340 | } | ||
| 1341 | case 2: | ||
| 1342 | default: | ||
| 1343 | /* Always print 0's for %pK */ | ||
| 1318 | ptr = NULL; | 1344 | ptr = NULL; |
| 1345 | break; | ||
| 1346 | } | ||
| 1319 | break; | 1347 | break; |
| 1348 | |||
| 1320 | case 'N': | 1349 | case 'N': |
| 1321 | switch (fmt[1]) { | 1350 | switch (fmt[1]) { |
| 1322 | case 'F': | 1351 | case 'F': |
| @@ -1683,18 +1712,16 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1683 | break; | 1712 | break; |
| 1684 | 1713 | ||
| 1685 | case FORMAT_TYPE_NRCHARS: { | 1714 | case FORMAT_TYPE_NRCHARS: { |
| 1686 | u8 qualifier = spec.qualifier; | 1715 | /* |
| 1716 | * Since %n poses a greater security risk than | ||
| 1717 | * utility, ignore %n and skip its argument. | ||
| 1718 | */ | ||
| 1719 | void *skip_arg; | ||
| 1687 | 1720 | ||
| 1688 | if (qualifier == 'l') { | 1721 | WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", |
| 1689 | long *ip = va_arg(args, long *); | 1722 | old_fmt); |
| 1690 | *ip = (str - buf); | 1723 | |
| 1691 | } else if (_tolower(qualifier) == 'z') { | 1724 | skip_arg = va_arg(args, void *); |
| 1692 | size_t *ip = va_arg(args, size_t *); | ||
| 1693 | *ip = (str - buf); | ||
| 1694 | } else { | ||
| 1695 | int *ip = va_arg(args, int *); | ||
| 1696 | *ip = (str - buf); | ||
| 1697 | } | ||
| 1698 | break; | 1725 | break; |
| 1699 | } | 1726 | } |
| 1700 | 1727 | ||
