diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 30 | ||||
| -rw-r--r-- | lib/Kconfig.kgdb | 25 | ||||
| -rw-r--r-- | lib/Makefile | 4 | ||||
| -rw-r--r-- | lib/assoc_array.c | 1 | ||||
| -rw-r--r-- | lib/audit.c | 3 | ||||
| -rw-r--r-- | lib/bitmap.c | 53 | ||||
| -rw-r--r-- | lib/bug.c | 20 | ||||
| -rw-r--r-- | lib/decompress.c | 4 | ||||
| -rw-r--r-- | lib/decompress_bunzip2.c | 2 | ||||
| -rw-r--r-- | lib/devres.c | 4 | ||||
| -rw-r--r-- | lib/dma-debug.c | 43 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 4 | ||||
| -rw-r--r-- | lib/fault-inject.c | 21 | ||||
| -rw-r--r-- | lib/hash.c | 39 | ||||
| -rw-r--r-- | lib/iovec.c | 25 | ||||
| -rw-r--r-- | lib/kobject.c | 2 | ||||
| -rw-r--r-- | lib/lcm.c | 8 | ||||
| -rw-r--r-- | lib/rhashtable.c | 88 | ||||
| -rw-r--r-- | lib/seq_buf.c | 359 | ||||
| -rw-r--r-- | lib/show_mem.c | 6 | ||||
| -rw-r--r-- | lib/test_bpf.c | 53 |
21 files changed, 627 insertions, 167 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4e35a5d767ed..5f2ce616c046 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -227,6 +227,22 @@ config UNUSED_SYMBOLS | |||
| 227 | you really need it, and what the merge plan to the mainline kernel for | 227 | you really need it, and what the merge plan to the mainline kernel for |
| 228 | your module is. | 228 | your module is. |
| 229 | 229 | ||
| 230 | config PAGE_OWNER | ||
| 231 | bool "Track page owner" | ||
| 232 | depends on DEBUG_KERNEL && STACKTRACE_SUPPORT | ||
| 233 | select DEBUG_FS | ||
| 234 | select STACKTRACE | ||
| 235 | select PAGE_EXTENSION | ||
| 236 | help | ||
| 237 | This keeps track of what call chain is the owner of a page, may | ||
| 238 | help to find bare alloc_page(s) leaks. Even if you include this | ||
| 239 | feature on your build, it is disabled in default. You should pass | ||
| 240 | "page_owner=on" to boot parameter in order to enable it. Eats | ||
| 241 | a fair amount of memory if enabled. See tools/vm/page_owner_sort.c | ||
| 242 | for user-space helper. | ||
| 243 | |||
| 244 | If unsure, say N. | ||
| 245 | |||
| 230 | config DEBUG_FS | 246 | config DEBUG_FS |
| 231 | bool "Debug Filesystem" | 247 | bool "Debug Filesystem" |
| 232 | help | 248 | help |
| @@ -1238,21 +1254,9 @@ config RCU_CPU_STALL_TIMEOUT | |||
| 1238 | RCU grace period persists, additional CPU stall warnings are | 1254 | RCU grace period persists, additional CPU stall warnings are |
| 1239 | printed at more widely spaced intervals. | 1255 | printed at more widely spaced intervals. |
| 1240 | 1256 | ||
| 1241 | config RCU_CPU_STALL_VERBOSE | ||
| 1242 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
| 1243 | depends on TREE_PREEMPT_RCU | ||
| 1244 | default y | ||
| 1245 | help | ||
| 1246 | This option causes RCU to printk detailed per-task information | ||
| 1247 | for any tasks that are stalling the current RCU grace period. | ||
| 1248 | |||
| 1249 | Say N if you are unsure. | ||
| 1250 | |||
| 1251 | Say Y if you want to enable such checks. | ||
| 1252 | |||
| 1253 | config RCU_CPU_STALL_INFO | 1257 | config RCU_CPU_STALL_INFO |
| 1254 | bool "Print additional diagnostics on RCU CPU stall" | 1258 | bool "Print additional diagnostics on RCU CPU stall" |
| 1255 | depends on (TREE_RCU || TREE_PREEMPT_RCU) && DEBUG_KERNEL | 1259 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL |
| 1256 | default n | 1260 | default n |
| 1257 | help | 1261 | help |
| 1258 | For each stalled CPU that is aware of the current RCU grace | 1262 | For each stalled CPU that is aware of the current RCU grace |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 358eb81fa28d..c635a107a7de 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -73,6 +73,31 @@ config KGDB_KDB | |||
| 73 | help | 73 | help |
| 74 | KDB frontend for kernel | 74 | KDB frontend for kernel |
| 75 | 75 | ||
| 76 | config KDB_DEFAULT_ENABLE | ||
| 77 | hex "KDB: Select kdb command functions to be enabled by default" | ||
| 78 | depends on KGDB_KDB | ||
| 79 | default 0x1 | ||
| 80 | help | ||
| 81 | Specifiers which kdb commands are enabled by default. This may | ||
| 82 | be set to 1 or 0 to enable all commands or disable almost all | ||
| 83 | commands. | ||
| 84 | |||
| 85 | Alternatively the following bitmask applies: | ||
| 86 | |||
| 87 | 0x0002 - allow arbitrary reads from memory and symbol lookup | ||
| 88 | 0x0004 - allow arbitrary writes to memory | ||
| 89 | 0x0008 - allow current register state to be inspected | ||
| 90 | 0x0010 - allow current register state to be modified | ||
| 91 | 0x0020 - allow passive inspection (backtrace, process list, lsmod) | ||
| 92 | 0x0040 - allow flow control management (breakpoint, single step) | ||
| 93 | 0x0080 - enable signalling of processes | ||
| 94 | 0x0100 - allow machine to be rebooted | ||
| 95 | |||
| 96 | The config option merely sets the default at boot time. Both | ||
| 97 | issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or | ||
| 98 | setting with kdb.cmd_enable=X kernel command line option will | ||
| 99 | override the default settings. | ||
| 100 | |||
| 76 | config KDB_KEYBOARD | 101 | config KDB_KEYBOARD |
| 77 | bool "KGDB_KDB: keyboard as input device" | 102 | bool "KGDB_KDB: keyboard as input device" |
| 78 | depends on VT && KGDB_KDB | 103 | depends on VT && KGDB_KDB |
diff --git a/lib/Makefile b/lib/Makefile index 0211d2bd5e17..3c3b30b9e020 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 13 | sha1.o md5.o irq_regs.o argv_split.o \ | 13 | sha1.o md5.o irq_regs.o argv_split.o \ |
| 14 | proportions.o flex_proportions.o ratelimit.o show_mem.o \ | 14 | proportions.o flex_proportions.o ratelimit.o show_mem.o \ |
| 15 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 15 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
| 16 | earlycpio.o | 16 | earlycpio.o seq_buf.o |
| 17 | 17 | ||
| 18 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o | 18 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o |
| 19 | lib-$(CONFIG_MMU) += ioremap.o | 19 | lib-$(CONFIG_MMU) += ioremap.o |
| @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
| 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
| 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
| 29 | percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
| 30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
| 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 2404d03e251a..03dd576e6773 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * 2 of the Licence, or (at your option) any later version. | 11 | * 2 of the Licence, or (at your option) any later version. |
| 12 | */ | 12 | */ |
| 13 | //#define DEBUG | 13 | //#define DEBUG |
| 14 | #include <linux/rcupdate.h> | ||
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 15 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 16 | #include <linux/assoc_array_priv.h> | 17 | #include <linux/assoc_array_priv.h> |
diff --git a/lib/audit.c b/lib/audit.c index 1d726a22565b..b8fb5ee81e26 100644 --- a/lib/audit.c +++ b/lib/audit.c | |||
| @@ -54,6 +54,9 @@ int audit_classify_syscall(int abi, unsigned syscall) | |||
| 54 | case __NR_socketcall: | 54 | case __NR_socketcall: |
| 55 | return 4; | 55 | return 4; |
| 56 | #endif | 56 | #endif |
| 57 | #ifdef __NR_execveat | ||
| 58 | case __NR_execveat: | ||
| 59 | #endif | ||
| 57 | case __NR_execve: | 60 | case __NR_execve: |
| 58 | return 5; | 61 | return 5; |
| 59 | default: | 62 | default: |
diff --git a/lib/bitmap.c b/lib/bitmap.c index b499ab6ada29..324ea9eab8c1 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | #include <linux/bitmap.h> | 12 | #include <linux/bitmap.h> |
| 13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
| 14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
| 15 | |||
| 16 | #include <asm/page.h> | ||
| 15 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
| 16 | 18 | ||
| 17 | /* | 19 | /* |
| @@ -326,30 +328,32 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len) | |||
| 326 | } | 328 | } |
| 327 | EXPORT_SYMBOL(bitmap_clear); | 329 | EXPORT_SYMBOL(bitmap_clear); |
| 328 | 330 | ||
| 329 | /* | 331 | /** |
| 330 | * bitmap_find_next_zero_area - find a contiguous aligned zero area | 332 | * bitmap_find_next_zero_area_off - find a contiguous aligned zero area |
| 331 | * @map: The address to base the search on | 333 | * @map: The address to base the search on |
| 332 | * @size: The bitmap size in bits | 334 | * @size: The bitmap size in bits |
| 333 | * @start: The bitnumber to start searching at | 335 | * @start: The bitnumber to start searching at |
| 334 | * @nr: The number of zeroed bits we're looking for | 336 | * @nr: The number of zeroed bits we're looking for |
| 335 | * @align_mask: Alignment mask for zero area | 337 | * @align_mask: Alignment mask for zero area |
| 338 | * @align_offset: Alignment offset for zero area. | ||
| 336 | * | 339 | * |
| 337 | * The @align_mask should be one less than a power of 2; the effect is that | 340 | * The @align_mask should be one less than a power of 2; the effect is that |
| 338 | * the bit offset of all zero areas this function finds is multiples of that | 341 | * the bit offset of all zero areas this function finds plus @align_offset |
| 339 | * power of 2. A @align_mask of 0 means no alignment is required. | 342 | * is multiple of that power of 2. |
| 340 | */ | 343 | */ |
| 341 | unsigned long bitmap_find_next_zero_area(unsigned long *map, | 344 | unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
| 342 | unsigned long size, | 345 | unsigned long size, |
| 343 | unsigned long start, | 346 | unsigned long start, |
| 344 | unsigned int nr, | 347 | unsigned int nr, |
| 345 | unsigned long align_mask) | 348 | unsigned long align_mask, |
| 349 | unsigned long align_offset) | ||
| 346 | { | 350 | { |
| 347 | unsigned long index, end, i; | 351 | unsigned long index, end, i; |
| 348 | again: | 352 | again: |
| 349 | index = find_next_zero_bit(map, size, start); | 353 | index = find_next_zero_bit(map, size, start); |
| 350 | 354 | ||
| 351 | /* Align allocation */ | 355 | /* Align allocation */ |
| 352 | index = __ALIGN_MASK(index, align_mask); | 356 | index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; |
| 353 | 357 | ||
| 354 | end = index + nr; | 358 | end = index + nr; |
| 355 | if (end > size) | 359 | if (end > size) |
| @@ -361,7 +365,7 @@ again: | |||
| 361 | } | 365 | } |
| 362 | return index; | 366 | return index; |
| 363 | } | 367 | } |
| 364 | EXPORT_SYMBOL(bitmap_find_next_zero_area); | 368 | EXPORT_SYMBOL(bitmap_find_next_zero_area_off); |
| 365 | 369 | ||
| 366 | /* | 370 | /* |
| 367 | * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, | 371 | * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, |
| @@ -584,6 +588,33 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen, | |||
| 584 | EXPORT_SYMBOL(bitmap_scnlistprintf); | 588 | EXPORT_SYMBOL(bitmap_scnlistprintf); |
| 585 | 589 | ||
| 586 | /** | 590 | /** |
| 591 | * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string | ||
| 592 | * @list: indicates whether the bitmap must be list | ||
| 593 | * @buf: page aligned buffer into which string is placed | ||
| 594 | * @maskp: pointer to bitmap to convert | ||
| 595 | * @nmaskbits: size of bitmap, in bits | ||
| 596 | * | ||
| 597 | * Output format is a comma-separated list of decimal numbers and | ||
| 598 | * ranges if list is specified or hex digits grouped into comma-separated | ||
| 599 | * sets of 8 digits/set. Returns the number of characters written to buf. | ||
| 600 | */ | ||
| 601 | int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, | ||
| 602 | int nmaskbits) | ||
| 603 | { | ||
| 604 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; | ||
| 605 | int n = 0; | ||
| 606 | |||
| 607 | if (len > 1) { | ||
| 608 | n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : | ||
| 609 | bitmap_scnprintf(buf, len, maskp, nmaskbits); | ||
| 610 | buf[n++] = '\n'; | ||
| 611 | buf[n] = '\0'; | ||
| 612 | } | ||
| 613 | return n; | ||
| 614 | } | ||
| 615 | EXPORT_SYMBOL(bitmap_print_to_pagebuf); | ||
| 616 | |||
| 617 | /** | ||
| 587 | * __bitmap_parselist - convert list format ASCII string to bitmap | 618 | * __bitmap_parselist - convert list format ASCII string to bitmap |
| 588 | * @buf: read nul-terminated user string from this buffer | 619 | * @buf: read nul-terminated user string from this buffer |
| 589 | * @buflen: buffer size in bytes. If string is smaller than this | 620 | * @buflen: buffer size in bytes. If string is smaller than this |
| @@ -64,16 +64,22 @@ static LIST_HEAD(module_bug_list); | |||
| 64 | static const struct bug_entry *module_find_bug(unsigned long bugaddr) | 64 | static const struct bug_entry *module_find_bug(unsigned long bugaddr) |
| 65 | { | 65 | { |
| 66 | struct module *mod; | 66 | struct module *mod; |
| 67 | const struct bug_entry *bug = NULL; | ||
| 67 | 68 | ||
| 68 | list_for_each_entry(mod, &module_bug_list, bug_list) { | 69 | rcu_read_lock(); |
| 69 | const struct bug_entry *bug = mod->bug_table; | 70 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { |
| 70 | unsigned i; | 71 | unsigned i; |
| 71 | 72 | ||
| 73 | bug = mod->bug_table; | ||
| 72 | for (i = 0; i < mod->num_bugs; ++i, ++bug) | 74 | for (i = 0; i < mod->num_bugs; ++i, ++bug) |
| 73 | if (bugaddr == bug_addr(bug)) | 75 | if (bugaddr == bug_addr(bug)) |
| 74 | return bug; | 76 | goto out; |
| 75 | } | 77 | } |
| 76 | return NULL; | 78 | bug = NULL; |
| 79 | out: | ||
| 80 | rcu_read_unlock(); | ||
| 81 | |||
| 82 | return bug; | ||
| 77 | } | 83 | } |
| 78 | 84 | ||
| 79 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 85 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| @@ -99,13 +105,15 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 99 | * Strictly speaking this should have a spinlock to protect against | 105 | * Strictly speaking this should have a spinlock to protect against |
| 100 | * traversals, but since we only traverse on BUG()s, a spinlock | 106 | * traversals, but since we only traverse on BUG()s, a spinlock |
| 101 | * could potentially lead to deadlock and thus be counter-productive. | 107 | * could potentially lead to deadlock and thus be counter-productive. |
| 108 | * Thus, this uses RCU to safely manipulate the bug list, since BUG | ||
| 109 | * must run in non-interruptive state. | ||
| 102 | */ | 110 | */ |
| 103 | list_add(&mod->bug_list, &module_bug_list); | 111 | list_add_rcu(&mod->bug_list, &module_bug_list); |
| 104 | } | 112 | } |
| 105 | 113 | ||
| 106 | void module_bug_cleanup(struct module *mod) | 114 | void module_bug_cleanup(struct module *mod) |
| 107 | { | 115 | { |
| 108 | list_del(&mod->bug_list); | 116 | list_del_rcu(&mod->bug_list); |
| 109 | } | 117 | } |
| 110 | 118 | ||
| 111 | #else | 119 | #else |
diff --git a/lib/decompress.c b/lib/decompress.c index 37f3c786348f..528ff932d8e4 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
| @@ -44,8 +44,8 @@ struct compress_format { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | static const struct compress_format compressed_formats[] __initconst = { | 46 | static const struct compress_format compressed_formats[] __initconst = { |
| 47 | { {037, 0213}, "gzip", gunzip }, | 47 | { {0x1f, 0x8b}, "gzip", gunzip }, |
| 48 | { {037, 0236}, "gzip", gunzip }, | 48 | { {0x1f, 0x9e}, "gzip", gunzip }, |
| 49 | { {0x42, 0x5a}, "bzip2", bunzip2 }, | 49 | { {0x42, 0x5a}, "bzip2", bunzip2 }, |
| 50 | { {0x5d, 0x00}, "lzma", unlzma }, | 50 | { {0x5d, 0x00}, "lzma", unlzma }, |
| 51 | { {0xfd, 0x37}, "xz", unxz }, | 51 | { {0xfd, 0x37}, "xz", unxz }, |
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 8290e0bef7ea..6dd0335ea61b 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
| @@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd) | |||
| 184 | if (get_bits(bd, 1)) | 184 | if (get_bits(bd, 1)) |
| 185 | return RETVAL_OBSOLETE_INPUT; | 185 | return RETVAL_OBSOLETE_INPUT; |
| 186 | origPtr = get_bits(bd, 24); | 186 | origPtr = get_bits(bd, 24); |
| 187 | if (origPtr > dbufSize) | 187 | if (origPtr >= dbufSize) |
| 188 | return RETVAL_DATA_ERROR; | 188 | return RETVAL_DATA_ERROR; |
| 189 | /* mapping table: if some byte values are never used (encoding things | 189 | /* mapping table: if some byte values are never used (encoding things |
| 190 | like ascii text), the compression code removes the gaps to have fewer | 190 | like ascii text), the compression code removes the gaps to have fewer |
diff --git a/lib/devres.c b/lib/devres.c index f4a195a6efe4..0f1dd2e9d2c1 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
| @@ -23,7 +23,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |||
| 23 | * Managed ioremap(). Map is automatically unmapped on driver detach. | 23 | * Managed ioremap(). Map is automatically unmapped on driver detach. |
| 24 | */ | 24 | */ |
| 25 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | 25 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
| 26 | unsigned long size) | 26 | resource_size_t size) |
| 27 | { | 27 | { |
| 28 | void __iomem **ptr, *addr; | 28 | void __iomem **ptr, *addr; |
| 29 | 29 | ||
| @@ -52,7 +52,7 @@ EXPORT_SYMBOL(devm_ioremap); | |||
| 52 | * detach. | 52 | * detach. |
| 53 | */ | 53 | */ |
| 54 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, | 54 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, |
| 55 | unsigned long size) | 55 | resource_size_t size) |
| 56 | { | 56 | { |
| 57 | void __iomem **ptr, *addr; | 57 | void __iomem **ptr, *addr; |
| 58 | 58 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index add80cc02dbe..9722bd2dbc9b 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -102,6 +102,14 @@ static DEFINE_SPINLOCK(free_entries_lock); | |||
| 102 | /* Global disable flag - will be set in case of an error */ | 102 | /* Global disable flag - will be set in case of an error */ |
| 103 | static u32 global_disable __read_mostly; | 103 | static u32 global_disable __read_mostly; |
| 104 | 104 | ||
| 105 | /* Early initialization disable flag, set at the end of dma_debug_init */ | ||
| 106 | static bool dma_debug_initialized __read_mostly; | ||
| 107 | |||
| 108 | static inline bool dma_debug_disabled(void) | ||
| 109 | { | ||
| 110 | return global_disable || !dma_debug_initialized; | ||
| 111 | } | ||
| 112 | |||
| 105 | /* Global error count */ | 113 | /* Global error count */ |
| 106 | static u32 error_count; | 114 | static u32 error_count; |
| 107 | 115 | ||
| @@ -945,7 +953,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti | |||
| 945 | struct dma_debug_entry *uninitialized_var(entry); | 953 | struct dma_debug_entry *uninitialized_var(entry); |
| 946 | int count; | 954 | int count; |
| 947 | 955 | ||
| 948 | if (global_disable) | 956 | if (dma_debug_disabled()) |
| 949 | return 0; | 957 | return 0; |
| 950 | 958 | ||
| 951 | switch (action) { | 959 | switch (action) { |
| @@ -973,7 +981,7 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
| 973 | { | 981 | { |
| 974 | struct notifier_block *nb; | 982 | struct notifier_block *nb; |
| 975 | 983 | ||
| 976 | if (global_disable) | 984 | if (dma_debug_disabled()) |
| 977 | return; | 985 | return; |
| 978 | 986 | ||
| 979 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | 987 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
| @@ -994,6 +1002,9 @@ void dma_debug_init(u32 num_entries) | |||
| 994 | { | 1002 | { |
| 995 | int i; | 1003 | int i; |
| 996 | 1004 | ||
| 1005 | /* Do not use dma_debug_initialized here, since we really want to be | ||
| 1006 | * called to set dma_debug_initialized | ||
| 1007 | */ | ||
| 997 | if (global_disable) | 1008 | if (global_disable) |
| 998 | return; | 1009 | return; |
| 999 | 1010 | ||
| @@ -1021,6 +1032,8 @@ void dma_debug_init(u32 num_entries) | |||
| 1021 | 1032 | ||
| 1022 | nr_total_entries = num_free_entries; | 1033 | nr_total_entries = num_free_entries; |
| 1023 | 1034 | ||
| 1035 | dma_debug_initialized = true; | ||
| 1036 | |||
| 1024 | pr_info("DMA-API: debugging enabled by kernel config\n"); | 1037 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
| 1025 | } | 1038 | } |
| 1026 | 1039 | ||
| @@ -1243,7 +1256,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
| 1243 | { | 1256 | { |
| 1244 | struct dma_debug_entry *entry; | 1257 | struct dma_debug_entry *entry; |
| 1245 | 1258 | ||
| 1246 | if (unlikely(global_disable)) | 1259 | if (unlikely(dma_debug_disabled())) |
| 1247 | return; | 1260 | return; |
| 1248 | 1261 | ||
| 1249 | if (dma_mapping_error(dev, dma_addr)) | 1262 | if (dma_mapping_error(dev, dma_addr)) |
| @@ -1283,7 +1296,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
| 1283 | struct hash_bucket *bucket; | 1296 | struct hash_bucket *bucket; |
| 1284 | unsigned long flags; | 1297 | unsigned long flags; |
| 1285 | 1298 | ||
| 1286 | if (unlikely(global_disable)) | 1299 | if (unlikely(dma_debug_disabled())) |
| 1287 | return; | 1300 | return; |
| 1288 | 1301 | ||
| 1289 | ref.dev = dev; | 1302 | ref.dev = dev; |
| @@ -1325,7 +1338,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | |||
| 1325 | .direction = direction, | 1338 | .direction = direction, |
| 1326 | }; | 1339 | }; |
| 1327 | 1340 | ||
| 1328 | if (unlikely(global_disable)) | 1341 | if (unlikely(dma_debug_disabled())) |
| 1329 | return; | 1342 | return; |
| 1330 | 1343 | ||
| 1331 | if (map_single) | 1344 | if (map_single) |
| @@ -1342,7 +1355,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1342 | struct scatterlist *s; | 1355 | struct scatterlist *s; |
| 1343 | int i; | 1356 | int i; |
| 1344 | 1357 | ||
| 1345 | if (unlikely(global_disable)) | 1358 | if (unlikely(dma_debug_disabled())) |
| 1346 | return; | 1359 | return; |
| 1347 | 1360 | ||
| 1348 | for_each_sg(sg, s, mapped_ents, i) { | 1361 | for_each_sg(sg, s, mapped_ents, i) { |
| @@ -1395,7 +1408,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1395 | struct scatterlist *s; | 1408 | struct scatterlist *s; |
| 1396 | int mapped_ents = 0, i; | 1409 | int mapped_ents = 0, i; |
| 1397 | 1410 | ||
| 1398 | if (unlikely(global_disable)) | 1411 | if (unlikely(dma_debug_disabled())) |
| 1399 | return; | 1412 | return; |
| 1400 | 1413 | ||
| 1401 | for_each_sg(sglist, s, nelems, i) { | 1414 | for_each_sg(sglist, s, nelems, i) { |
| @@ -1427,7 +1440,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1427 | { | 1440 | { |
| 1428 | struct dma_debug_entry *entry; | 1441 | struct dma_debug_entry *entry; |
| 1429 | 1442 | ||
| 1430 | if (unlikely(global_disable)) | 1443 | if (unlikely(dma_debug_disabled())) |
| 1431 | return; | 1444 | return; |
| 1432 | 1445 | ||
| 1433 | if (unlikely(virt == NULL)) | 1446 | if (unlikely(virt == NULL)) |
| @@ -1462,7 +1475,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1462 | .direction = DMA_BIDIRECTIONAL, | 1475 | .direction = DMA_BIDIRECTIONAL, |
| 1463 | }; | 1476 | }; |
| 1464 | 1477 | ||
| 1465 | if (unlikely(global_disable)) | 1478 | if (unlikely(dma_debug_disabled())) |
| 1466 | return; | 1479 | return; |
| 1467 | 1480 | ||
| 1468 | check_unmap(&ref); | 1481 | check_unmap(&ref); |
| @@ -1474,7 +1487,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |||
| 1474 | { | 1487 | { |
| 1475 | struct dma_debug_entry ref; | 1488 | struct dma_debug_entry ref; |
| 1476 | 1489 | ||
| 1477 | if (unlikely(global_disable)) | 1490 | if (unlikely(dma_debug_disabled())) |
| 1478 | return; | 1491 | return; |
| 1479 | 1492 | ||
| 1480 | ref.type = dma_debug_single; | 1493 | ref.type = dma_debug_single; |
| @@ -1494,7 +1507,7 @@ void debug_dma_sync_single_for_device(struct device *dev, | |||
| 1494 | { | 1507 | { |
| 1495 | struct dma_debug_entry ref; | 1508 | struct dma_debug_entry ref; |
| 1496 | 1509 | ||
| 1497 | if (unlikely(global_disable)) | 1510 | if (unlikely(dma_debug_disabled())) |
| 1498 | return; | 1511 | return; |
| 1499 | 1512 | ||
| 1500 | ref.type = dma_debug_single; | 1513 | ref.type = dma_debug_single; |
| @@ -1515,7 +1528,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, | |||
| 1515 | { | 1528 | { |
| 1516 | struct dma_debug_entry ref; | 1529 | struct dma_debug_entry ref; |
| 1517 | 1530 | ||
| 1518 | if (unlikely(global_disable)) | 1531 | if (unlikely(dma_debug_disabled())) |
| 1519 | return; | 1532 | return; |
| 1520 | 1533 | ||
| 1521 | ref.type = dma_debug_single; | 1534 | ref.type = dma_debug_single; |
| @@ -1536,7 +1549,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev, | |||
| 1536 | { | 1549 | { |
| 1537 | struct dma_debug_entry ref; | 1550 | struct dma_debug_entry ref; |
| 1538 | 1551 | ||
| 1539 | if (unlikely(global_disable)) | 1552 | if (unlikely(dma_debug_disabled())) |
| 1540 | return; | 1553 | return; |
| 1541 | 1554 | ||
| 1542 | ref.type = dma_debug_single; | 1555 | ref.type = dma_debug_single; |
| @@ -1556,7 +1569,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
| 1556 | struct scatterlist *s; | 1569 | struct scatterlist *s; |
| 1557 | int mapped_ents = 0, i; | 1570 | int mapped_ents = 0, i; |
| 1558 | 1571 | ||
| 1559 | if (unlikely(global_disable)) | 1572 | if (unlikely(dma_debug_disabled())) |
| 1560 | return; | 1573 | return; |
| 1561 | 1574 | ||
| 1562 | for_each_sg(sg, s, nelems, i) { | 1575 | for_each_sg(sg, s, nelems, i) { |
| @@ -1589,7 +1602,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 1589 | struct scatterlist *s; | 1602 | struct scatterlist *s; |
| 1590 | int mapped_ents = 0, i; | 1603 | int mapped_ents = 0, i; |
| 1591 | 1604 | ||
| 1592 | if (unlikely(global_disable)) | 1605 | if (unlikely(dma_debug_disabled())) |
| 1593 | return; | 1606 | return; |
| 1594 | 1607 | ||
| 1595 | for_each_sg(sg, s, nelems, i) { | 1608 | for_each_sg(sg, s, nelems, i) { |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index dfba05521748..527799d44476 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -576,7 +576,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor, | |||
| 576 | } else { | 576 | } else { |
| 577 | char buf[PREFIX_SIZE]; | 577 | char buf[PREFIX_SIZE]; |
| 578 | 578 | ||
| 579 | dev_printk_emit(7, dev, "%s%s %s: %pV", | 579 | dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV", |
| 580 | dynamic_emit_prefix(descriptor, buf), | 580 | dynamic_emit_prefix(descriptor, buf), |
| 581 | dev_driver_string(dev), dev_name(dev), | 581 | dev_driver_string(dev), dev_name(dev), |
| 582 | &vaf); | 582 | &vaf); |
| @@ -605,7 +605,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
| 605 | if (dev && dev->dev.parent) { | 605 | if (dev && dev->dev.parent) { |
| 606 | char buf[PREFIX_SIZE]; | 606 | char buf[PREFIX_SIZE]; |
| 607 | 607 | ||
| 608 | dev_printk_emit(7, dev->dev.parent, | 608 | dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent, |
| 609 | "%s%s %s %s%s: %pV", | 609 | "%s%s %s %s%s: %pV", |
| 610 | dynamic_emit_prefix(descriptor, buf), | 610 | dynamic_emit_prefix(descriptor, buf), |
| 611 | dev_driver_string(dev->dev.parent), | 611 | dev_driver_string(dev->dev.parent), |
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index d7d501ea856d..f1cdeb024d17 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
| @@ -40,10 +40,16 @@ EXPORT_SYMBOL_GPL(setup_fault_attr); | |||
| 40 | 40 | ||
| 41 | static void fail_dump(struct fault_attr *attr) | 41 | static void fail_dump(struct fault_attr *attr) |
| 42 | { | 42 | { |
| 43 | if (attr->verbose > 0) | 43 | if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { |
| 44 | printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n"); | 44 | printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" |
| 45 | if (attr->verbose > 1) | 45 | "name %pd, interval %lu, probability %lu, " |
| 46 | dump_stack(); | 46 | "space %d, times %d\n", attr->dname, |
| 47 | attr->probability, attr->interval, | ||
| 48 | atomic_read(&attr->space), | ||
| 49 | atomic_read(&attr->times)); | ||
| 50 | if (attr->verbose > 1) | ||
| 51 | dump_stack(); | ||
| 52 | } | ||
| 47 | } | 53 | } |
| 48 | 54 | ||
| 49 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) | 55 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) |
| @@ -202,6 +208,12 @@ struct dentry *fault_create_debugfs_attr(const char *name, | |||
| 202 | goto fail; | 208 | goto fail; |
| 203 | if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) | 209 | if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) |
| 204 | goto fail; | 210 | goto fail; |
| 211 | if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir, | ||
| 212 | &attr->ratelimit_state.interval)) | ||
| 213 | goto fail; | ||
| 214 | if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir, | ||
| 215 | &attr->ratelimit_state.burst)) | ||
| 216 | goto fail; | ||
| 205 | if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter)) | 217 | if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter)) |
| 206 | goto fail; | 218 | goto fail; |
| 207 | 219 | ||
| @@ -222,6 +234,7 @@ struct dentry *fault_create_debugfs_attr(const char *name, | |||
| 222 | 234 | ||
| 223 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | 235 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ |
| 224 | 236 | ||
| 237 | attr->dname = dget(dir); | ||
| 225 | return dir; | 238 | return dir; |
| 226 | fail: | 239 | fail: |
| 227 | debugfs_remove_recursive(dir); | 240 | debugfs_remove_recursive(dir); |
diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f4bd57..000000000000 --- a/lib/hash.c +++ /dev/null | |||
| @@ -1,39 +0,0 @@ | |||
| 1 | /* General purpose hashing library | ||
| 2 | * | ||
| 3 | * That's a start of a kernel hashing library, which can be extended | ||
| 4 | * with further algorithms in future. arch_fast_hash{2,}() will | ||
| 5 | * eventually resolve to an architecture optimized implementation. | ||
| 6 | * | ||
| 7 | * Copyright 2013 Francesco Fusco <ffusco@redhat.com> | ||
| 8 | * Copyright 2013 Daniel Borkmann <dborkman@redhat.com> | ||
| 9 | * Copyright 2013 Thomas Graf <tgraf@redhat.com> | ||
| 10 | * Licensed under the GNU General Public License, version 2.0 (GPLv2) | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/jhash.h> | ||
| 14 | #include <linux/hash.h> | ||
| 15 | #include <linux/cache.h> | ||
| 16 | |||
| 17 | static struct fast_hash_ops arch_hash_ops __read_mostly = { | ||
| 18 | .hash = jhash, | ||
| 19 | .hash2 = jhash2, | ||
| 20 | }; | ||
| 21 | |||
| 22 | u32 arch_fast_hash(const void *data, u32 len, u32 seed) | ||
| 23 | { | ||
| 24 | return arch_hash_ops.hash(data, len, seed); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL_GPL(arch_fast_hash); | ||
| 27 | |||
| 28 | u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) | ||
| 29 | { | ||
| 30 | return arch_hash_ops.hash2(data, len, seed); | ||
| 31 | } | ||
| 32 | EXPORT_SYMBOL_GPL(arch_fast_hash2); | ||
| 33 | |||
| 34 | static int __init hashlib_init(void) | ||
| 35 | { | ||
| 36 | setup_arch_fast_hash(&arch_hash_ops); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | early_initcall(hashlib_init); | ||
diff --git a/lib/iovec.c b/lib/iovec.c index df3abd1eaa4a..2d99cb4a5006 100644 --- a/lib/iovec.c +++ b/lib/iovec.c | |||
| @@ -29,31 +29,6 @@ EXPORT_SYMBOL(memcpy_fromiovec); | |||
| 29 | 29 | ||
| 30 | /* | 30 | /* |
| 31 | * Copy kernel to iovec. Returns -EFAULT on error. | 31 | * Copy kernel to iovec. Returns -EFAULT on error. |
| 32 | * | ||
| 33 | * Note: this modifies the original iovec. | ||
| 34 | */ | ||
| 35 | |||
| 36 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) | ||
| 37 | { | ||
| 38 | while (len > 0) { | ||
| 39 | if (iov->iov_len) { | ||
| 40 | int copy = min_t(unsigned int, iov->iov_len, len); | ||
| 41 | if (copy_to_user(iov->iov_base, kdata, copy)) | ||
| 42 | return -EFAULT; | ||
| 43 | kdata += copy; | ||
| 44 | len -= copy; | ||
| 45 | iov->iov_len -= copy; | ||
| 46 | iov->iov_base += copy; | ||
| 47 | } | ||
| 48 | iov++; | ||
| 49 | } | ||
| 50 | |||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(memcpy_toiovec); | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Copy kernel to iovec. Returns -EFAULT on error. | ||
| 57 | */ | 32 | */ |
| 58 | 33 | ||
| 59 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, | 34 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, |
diff --git a/lib/kobject.c b/lib/kobject.c index 58751bb80a7c..03d4ab349fa7 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -976,7 +976,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent) | |||
| 976 | { | 976 | { |
| 977 | const struct kobj_ns_type_operations *ops = NULL; | 977 | const struct kobj_ns_type_operations *ops = NULL; |
| 978 | 978 | ||
| 979 | if (parent && parent->ktype->child_ns_type) | 979 | if (parent && parent->ktype && parent->ktype->child_ns_type) |
| 980 | ops = parent->ktype->child_ns_type(parent); | 980 | ops = parent->ktype->child_ns_type(parent); |
| 981 | 981 | ||
| 982 | return ops; | 982 | return ops; |
| @@ -7,10 +7,8 @@ | |||
| 7 | unsigned long lcm(unsigned long a, unsigned long b) | 7 | unsigned long lcm(unsigned long a, unsigned long b) |
| 8 | { | 8 | { |
| 9 | if (a && b) | 9 | if (a && b) |
| 10 | return (a * b) / gcd(a, b); | 10 | return (a / gcd(a, b)) * b; |
| 11 | else if (b) | 11 | else |
| 12 | return b; | 12 | return 0; |
| 13 | |||
| 14 | return a; | ||
| 15 | } | 13 | } |
| 16 | EXPORT_SYMBOL_GPL(lcm); | 14 | EXPORT_SYMBOL_GPL(lcm); |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 624a0b7c05ef..6c3c723e902b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/vmalloc.h> | 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
| 23 | #include <linux/hash.h> | 23 | #include <linux/jhash.h> |
| 24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
| 25 | #include <linux/rhashtable.h> | 25 | #include <linux/rhashtable.h> |
| 26 | 26 | ||
| @@ -32,7 +32,7 @@ | |||
| 32 | #ifdef CONFIG_PROVE_LOCKING | 32 | #ifdef CONFIG_PROVE_LOCKING |
| 33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | 33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) |
| 34 | { | 34 | { |
| 35 | return ht->p.mutex_is_held(); | 35 | return ht->p.mutex_is_held(ht->p.parent); |
| 36 | } | 36 | } |
| 37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | 37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
| 38 | #endif | 38 | #endif |
| @@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, | |||
| 107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | 107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) | 110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets) |
| 111 | { | 111 | { |
| 112 | struct bucket_table *tbl; | 112 | struct bucket_table *tbl; |
| 113 | size_t size; | 113 | size_t size; |
| 114 | 114 | ||
| 115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
| 116 | tbl = kzalloc(size, flags); | 116 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
| 117 | if (tbl == NULL) | 117 | if (tbl == NULL) |
| 118 | tbl = vzalloc(size); | 118 | tbl = vzalloc(size); |
| 119 | 119 | ||
| @@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
| 200 | /** | 200 | /** |
| 201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | 201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
| 202 | * @ht: the hash table to expand | 202 | * @ht: the hash table to expand |
| 203 | * @flags: allocation flags | ||
| 204 | * | 203 | * |
| 205 | * A secondary bucket array is allocated and the hash entries are migrated | 204 | * A secondary bucket array is allocated and the hash entries are migrated |
| 206 | * while keeping them on both lists until the end of the RCU grace period. | 205 | * while keeping them on both lists until the end of the RCU grace period. |
| @@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
| 211 | * The caller must ensure that no concurrent table mutations take place. | 210 | * The caller must ensure that no concurrent table mutations take place. |
| 212 | * It is however valid to have concurrent lookups if they are RCU protected. | 211 | * It is however valid to have concurrent lookups if they are RCU protected. |
| 213 | */ | 212 | */ |
| 214 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | 213 | int rhashtable_expand(struct rhashtable *ht) |
| 215 | { | 214 | { |
| 216 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 215 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
| 217 | struct rhash_head *he; | 216 | struct rhash_head *he; |
| @@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | |||
| 223 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | 222 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) |
| 224 | return 0; | 223 | return 0; |
| 225 | 224 | ||
| 226 | new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); | 225 | new_tbl = bucket_table_alloc(old_tbl->size * 2); |
| 227 | if (new_tbl == NULL) | 226 | if (new_tbl == NULL) |
| 228 | return -ENOMEM; | 227 | return -ENOMEM; |
| 229 | 228 | ||
| @@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
| 281 | /** | 280 | /** |
| 282 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | 281 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
| 283 | * @ht: the hash table to shrink | 282 | * @ht: the hash table to shrink |
| 284 | * @flags: allocation flags | ||
| 285 | * | 283 | * |
| 286 | * This function may only be called in a context where it is safe to call | 284 | * This function may only be called in a context where it is safe to call |
| 287 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 285 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| @@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
| 289 | * The caller must ensure that no concurrent table mutations take place. | 287 | * The caller must ensure that no concurrent table mutations take place. |
| 290 | * It is however valid to have concurrent lookups if they are RCU protected. | 288 | * It is however valid to have concurrent lookups if they are RCU protected. |
| 291 | */ | 289 | */ |
| 292 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | 290 | int rhashtable_shrink(struct rhashtable *ht) |
| 293 | { | 291 | { |
| 294 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | 292 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); |
| 295 | struct rhash_head __rcu **pprev; | 293 | struct rhash_head __rcu **pprev; |
| @@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | |||
| 300 | if (ht->shift <= ht->p.min_shift) | 298 | if (ht->shift <= ht->p.min_shift) |
| 301 | return 0; | 299 | return 0; |
| 302 | 300 | ||
| 303 | ntbl = bucket_table_alloc(tbl->size / 2, flags); | 301 | ntbl = bucket_table_alloc(tbl->size / 2); |
| 304 | if (ntbl == NULL) | 302 | if (ntbl == NULL) |
| 305 | return -ENOMEM; | 303 | return -ENOMEM; |
| 306 | 304 | ||
| @@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
| 341 | * rhashtable_insert - insert object into hash hash table | 339 | * rhashtable_insert - insert object into hash hash table |
| 342 | * @ht: hash table | 340 | * @ht: hash table |
| 343 | * @obj: pointer to hash head inside object | 341 | * @obj: pointer to hash head inside object |
| 344 | * @flags: allocation flags (table expansion) | ||
| 345 | * | 342 | * |
| 346 | * Will automatically grow the table via rhashtable_expand() if the the | 343 | * Will automatically grow the table via rhashtable_expand() if the the |
| 347 | * grow_decision function specified at rhashtable_init() returns true. | 344 | * grow_decision function specified at rhashtable_init() returns true. |
| @@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
| 349 | * The caller must ensure that no concurrent table mutations occur. It is | 346 | * The caller must ensure that no concurrent table mutations occur. It is |
| 350 | * however valid to have concurrent lookups if they are RCU protected. | 347 | * however valid to have concurrent lookups if they are RCU protected. |
| 351 | */ | 348 | */ |
| 352 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | 349 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
| 353 | gfp_t flags) | ||
| 354 | { | 350 | { |
| 355 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 351 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
| 356 | u32 hash; | 352 | u32 hash; |
| @@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
| 363 | ht->nelems++; | 359 | ht->nelems++; |
| 364 | 360 | ||
| 365 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 361 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
| 366 | rhashtable_expand(ht, flags); | 362 | rhashtable_expand(ht); |
| 367 | } | 363 | } |
| 368 | EXPORT_SYMBOL_GPL(rhashtable_insert); | 364 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
| 369 | 365 | ||
| @@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); | |||
| 372 | * @ht: hash table | 368 | * @ht: hash table |
| 373 | * @obj: pointer to hash head inside object | 369 | * @obj: pointer to hash head inside object |
| 374 | * @pprev: pointer to previous element | 370 | * @pprev: pointer to previous element |
| 375 | * @flags: allocation flags (table expansion) | ||
| 376 | * | 371 | * |
| 377 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | 372 | * Identical to rhashtable_remove() but caller is alreayd aware of the element |
| 378 | * in front of the element to be deleted. This is in particular useful for | 373 | * in front of the element to be deleted. This is in particular useful for |
| 379 | * deletion when combined with walking or lookup. | 374 | * deletion when combined with walking or lookup. |
| 380 | */ | 375 | */ |
| 381 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 376 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
| 382 | struct rhash_head __rcu **pprev, gfp_t flags) | 377 | struct rhash_head __rcu **pprev) |
| 383 | { | 378 | { |
| 384 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 379 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
| 385 | 380 | ||
| @@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | |||
| 390 | 385 | ||
| 391 | if (ht->p.shrink_decision && | 386 | if (ht->p.shrink_decision && |
| 392 | ht->p.shrink_decision(ht, tbl->size)) | 387 | ht->p.shrink_decision(ht, tbl->size)) |
| 393 | rhashtable_shrink(ht, flags); | 388 | rhashtable_shrink(ht); |
| 394 | } | 389 | } |
| 395 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | 390 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); |
| 396 | 391 | ||
| @@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
| 398 | * rhashtable_remove - remove object from hash table | 393 | * rhashtable_remove - remove object from hash table |
| 399 | * @ht: hash table | 394 | * @ht: hash table |
| 400 | * @obj: pointer to hash head inside object | 395 | * @obj: pointer to hash head inside object |
| 401 | * @flags: allocation flags (table expansion) | ||
| 402 | * | 396 | * |
| 403 | * Since the hash chain is single linked, the removal operation needs to | 397 | * Since the hash chain is single linked, the removal operation needs to |
| 404 | * walk the bucket chain upon removal. The removal operation is thus | 398 | * walk the bucket chain upon removal. The removal operation is thus |
| @@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
| 410 | * The caller must ensure that no concurrent table mutations occur. It is | 404 | * The caller must ensure that no concurrent table mutations occur. It is |
| 411 | * however valid to have concurrent lookups if they are RCU protected. | 405 | * however valid to have concurrent lookups if they are RCU protected. |
| 412 | */ | 406 | */ |
| 413 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | 407 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
| 414 | gfp_t flags) | ||
| 415 | { | 408 | { |
| 416 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 409 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
| 417 | struct rhash_head __rcu **pprev; | 410 | struct rhash_head __rcu **pprev; |
| @@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | |||
| 429 | continue; | 422 | continue; |
| 430 | } | 423 | } |
| 431 | 424 | ||
| 432 | rhashtable_remove_pprev(ht, he, pprev, flags); | 425 | rhashtable_remove_pprev(ht, he, pprev); |
| 433 | return true; | 426 | return true; |
| 434 | } | 427 | } |
| 435 | 428 | ||
| @@ -531,8 +524,10 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) | |||
| 531 | * .head_offset = offsetof(struct test_obj, node), | 524 | * .head_offset = offsetof(struct test_obj, node), |
| 532 | * .key_offset = offsetof(struct test_obj, key), | 525 | * .key_offset = offsetof(struct test_obj, key), |
| 533 | * .key_len = sizeof(int), | 526 | * .key_len = sizeof(int), |
| 534 | * .hashfn = arch_fast_hash, | 527 | * .hashfn = jhash, |
| 528 | * #ifdef CONFIG_PROVE_LOCKING | ||
| 535 | * .mutex_is_held = &my_mutex_is_held, | 529 | * .mutex_is_held = &my_mutex_is_held, |
| 530 | * #endif | ||
| 536 | * }; | 531 | * }; |
| 537 | * | 532 | * |
| 538 | * Configuration Example 2: Variable length keys | 533 | * Configuration Example 2: Variable length keys |
| @@ -550,9 +545,11 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) | |||
| 550 | * | 545 | * |
| 551 | * struct rhashtable_params params = { | 546 | * struct rhashtable_params params = { |
| 552 | * .head_offset = offsetof(struct test_obj, node), | 547 | * .head_offset = offsetof(struct test_obj, node), |
| 553 | * .hashfn = arch_fast_hash, | 548 | * .hashfn = jhash, |
| 554 | * .obj_hashfn = my_hash_fn, | 549 | * .obj_hashfn = my_hash_fn, |
| 550 | * #ifdef CONFIG_PROVE_LOCKING | ||
| 555 | * .mutex_is_held = &my_mutex_is_held, | 551 | * .mutex_is_held = &my_mutex_is_held, |
| 552 | * #endif | ||
| 556 | * }; | 553 | * }; |
| 557 | */ | 554 | */ |
| 558 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | 555 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) |
| @@ -572,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
| 572 | if (params->nelem_hint) | 569 | if (params->nelem_hint) |
| 573 | size = rounded_hashtable_size(params); | 570 | size = rounded_hashtable_size(params); |
| 574 | 571 | ||
| 575 | tbl = bucket_table_alloc(size, GFP_KERNEL); | 572 | tbl = bucket_table_alloc(size); |
| 576 | if (tbl == NULL) | 573 | if (tbl == NULL) |
| 577 | return -ENOMEM; | 574 | return -ENOMEM; |
| 578 | 575 | ||
| @@ -613,10 +610,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); | |||
| 613 | #define TEST_PTR ((void *) 0xdeadbeef) | 610 | #define TEST_PTR ((void *) 0xdeadbeef) |
| 614 | #define TEST_NEXPANDS 4 | 611 | #define TEST_NEXPANDS 4 |
| 615 | 612 | ||
| 616 | static int test_mutex_is_held(void) | 613 | #ifdef CONFIG_PROVE_LOCKING |
| 614 | static int test_mutex_is_held(void *parent) | ||
| 617 | { | 615 | { |
| 618 | return 1; | 616 | return 1; |
| 619 | } | 617 | } |
| 618 | #endif | ||
| 620 | 619 | ||
| 621 | struct test_obj { | 620 | struct test_obj { |
| 622 | void *ptr; | 621 | void *ptr; |
| @@ -654,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht) | |||
| 654 | return 0; | 653 | return 0; |
| 655 | } | 654 | } |
| 656 | 655 | ||
| 657 | static void test_bucket_stats(struct rhashtable *ht, | 656 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) |
| 658 | struct bucket_table *tbl, | ||
| 659 | bool quiet) | ||
| 660 | { | 657 | { |
| 661 | unsigned int cnt, i, total = 0; | 658 | unsigned int cnt, rcu_cnt, i, total = 0; |
| 662 | struct test_obj *obj; | 659 | struct test_obj *obj; |
| 660 | struct bucket_table *tbl; | ||
| 663 | 661 | ||
| 662 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 664 | for (i = 0; i < tbl->size; i++) { | 663 | for (i = 0; i < tbl->size; i++) { |
| 665 | cnt = 0; | 664 | rcu_cnt = cnt = 0; |
| 666 | 665 | ||
| 667 | if (!quiet) | 666 | if (!quiet) |
| 668 | pr_info(" [%#4x/%zu]", i, tbl->size); | 667 | pr_info(" [%#4x/%zu]", i, tbl->size); |
| @@ -674,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht, | |||
| 674 | pr_cont(" [%p],", obj); | 673 | pr_cont(" [%p],", obj); |
| 675 | } | 674 | } |
| 676 | 675 | ||
| 676 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) | ||
| 677 | rcu_cnt++; | ||
| 678 | |||
| 679 | if (rcu_cnt != cnt) | ||
| 680 | pr_warn("Test failed: Chain count mismach %d != %d", | ||
| 681 | cnt, rcu_cnt); | ||
| 682 | |||
| 677 | if (!quiet) | 683 | if (!quiet) |
| 678 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | 684 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", |
| 679 | i, tbl->buckets[i], cnt); | 685 | i, tbl->buckets[i], cnt); |
| @@ -681,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht, | |||
| 681 | 687 | ||
| 682 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", | 688 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", |
| 683 | total, ht->nelems, TEST_ENTRIES); | 689 | total, ht->nelems, TEST_ENTRIES); |
| 690 | |||
| 691 | if (total != ht->nelems || total != TEST_ENTRIES) | ||
| 692 | pr_warn("Test failed: Total count mismatch ^^^"); | ||
| 684 | } | 693 | } |
| 685 | 694 | ||
| 686 | static int __init test_rhashtable(struct rhashtable *ht) | 695 | static int __init test_rhashtable(struct rhashtable *ht) |
| @@ -707,18 +716,17 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
| 707 | obj->ptr = TEST_PTR; | 716 | obj->ptr = TEST_PTR; |
| 708 | obj->value = i * 2; | 717 | obj->value = i * 2; |
| 709 | 718 | ||
| 710 | rhashtable_insert(ht, &obj->node, GFP_KERNEL); | 719 | rhashtable_insert(ht, &obj->node); |
| 711 | } | 720 | } |
| 712 | 721 | ||
| 713 | rcu_read_lock(); | 722 | rcu_read_lock(); |
| 714 | tbl = rht_dereference_rcu(ht->tbl, ht); | 723 | test_bucket_stats(ht, true); |
| 715 | test_bucket_stats(ht, tbl, true); | ||
| 716 | test_rht_lookup(ht); | 724 | test_rht_lookup(ht); |
| 717 | rcu_read_unlock(); | 725 | rcu_read_unlock(); |
| 718 | 726 | ||
| 719 | for (i = 0; i < TEST_NEXPANDS; i++) { | 727 | for (i = 0; i < TEST_NEXPANDS; i++) { |
| 720 | pr_info(" Table expansion iteration %u...\n", i); | 728 | pr_info(" Table expansion iteration %u...\n", i); |
| 721 | rhashtable_expand(ht, GFP_KERNEL); | 729 | rhashtable_expand(ht); |
| 722 | 730 | ||
| 723 | rcu_read_lock(); | 731 | rcu_read_lock(); |
| 724 | pr_info(" Verifying lookups...\n"); | 732 | pr_info(" Verifying lookups...\n"); |
| @@ -728,7 +736,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
| 728 | 736 | ||
| 729 | for (i = 0; i < TEST_NEXPANDS; i++) { | 737 | for (i = 0; i < TEST_NEXPANDS; i++) { |
| 730 | pr_info(" Table shrinkage iteration %u...\n", i); | 738 | pr_info(" Table shrinkage iteration %u...\n", i); |
| 731 | rhashtable_shrink(ht, GFP_KERNEL); | 739 | rhashtable_shrink(ht); |
| 732 | 740 | ||
| 733 | rcu_read_lock(); | 741 | rcu_read_lock(); |
| 734 | pr_info(" Verifying lookups...\n"); | 742 | pr_info(" Verifying lookups...\n"); |
| @@ -736,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
| 736 | rcu_read_unlock(); | 744 | rcu_read_unlock(); |
| 737 | } | 745 | } |
| 738 | 746 | ||
| 747 | rcu_read_lock(); | ||
| 748 | test_bucket_stats(ht, true); | ||
| 749 | rcu_read_unlock(); | ||
| 750 | |||
| 739 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | 751 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); |
| 740 | for (i = 0; i < TEST_ENTRIES; i++) { | 752 | for (i = 0; i < TEST_ENTRIES; i++) { |
| 741 | u32 key = i * 2; | 753 | u32 key = i * 2; |
| @@ -743,7 +755,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
| 743 | obj = rhashtable_lookup(ht, &key); | 755 | obj = rhashtable_lookup(ht, &key); |
| 744 | BUG_ON(!obj); | 756 | BUG_ON(!obj); |
| 745 | 757 | ||
| 746 | rhashtable_remove(ht, &obj->node, GFP_KERNEL); | 758 | rhashtable_remove(ht, &obj->node); |
| 747 | kfree(obj); | 759 | kfree(obj); |
| 748 | } | 760 | } |
| 749 | 761 | ||
| @@ -766,8 +778,10 @@ static int __init test_rht_init(void) | |||
| 766 | .head_offset = offsetof(struct test_obj, node), | 778 | .head_offset = offsetof(struct test_obj, node), |
| 767 | .key_offset = offsetof(struct test_obj, value), | 779 | .key_offset = offsetof(struct test_obj, value), |
| 768 | .key_len = sizeof(int), | 780 | .key_len = sizeof(int), |
| 769 | .hashfn = arch_fast_hash, | 781 | .hashfn = jhash, |
| 782 | #ifdef CONFIG_PROVE_LOCKING | ||
| 770 | .mutex_is_held = &test_mutex_is_held, | 783 | .mutex_is_held = &test_mutex_is_held, |
| 784 | #endif | ||
| 771 | .grow_decision = rht_grow_above_75, | 785 | .grow_decision = rht_grow_above_75, |
| 772 | .shrink_decision = rht_shrink_below_30, | 786 | .shrink_decision = rht_shrink_below_30, |
| 773 | }; | 787 | }; |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c new file mode 100644 index 000000000000..4eedfedb9e31 --- /dev/null +++ b/lib/seq_buf.c | |||
| @@ -0,0 +1,359 @@ | |||
| 1 | /* | ||
| 2 | * seq_buf.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
| 5 | * | ||
| 6 | * The seq_buf is a handy tool that allows you to pass a descriptor around | ||
| 7 | * to a buffer that other functions can write to. It is similar to the | ||
| 8 | * seq_file functionality but has some differences. | ||
| 9 | * | ||
| 10 | * To use it, the seq_buf must be initialized with seq_buf_init(). | ||
| 11 | * This will set up the counters within the descriptor. You can call | ||
| 12 | * seq_buf_init() more than once to reset the seq_buf to start | ||
| 13 | * from scratch. | ||
| 14 | */ | ||
| 15 | #include <linux/uaccess.h> | ||
| 16 | #include <linux/seq_file.h> | ||
| 17 | #include <linux/seq_buf.h> | ||
| 18 | |||
| 19 | /** | ||
| 20 | * seq_buf_can_fit - can the new data fit in the current buffer? | ||
| 21 | * @s: the seq_buf descriptor | ||
| 22 | * @len: The length to see if it can fit in the current buffer | ||
| 23 | * | ||
| 24 | * Returns true if there's enough unused space in the seq_buf buffer | ||
| 25 | * to fit the amount of new data according to @len. | ||
| 26 | */ | ||
| 27 | static bool seq_buf_can_fit(struct seq_buf *s, size_t len) | ||
| 28 | { | ||
| 29 | return s->len + len <= s->size; | ||
| 30 | } | ||
| 31 | |||
| 32 | /** | ||
| 33 | * seq_buf_print_seq - move the contents of seq_buf into a seq_file | ||
| 34 | * @m: the seq_file descriptor that is the destination | ||
| 35 | * @s: the seq_buf descriptor that is the source. | ||
| 36 | * | ||
| 37 | * Returns zero on success, non zero otherwise | ||
| 38 | */ | ||
| 39 | int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) | ||
| 40 | { | ||
| 41 | unsigned int len = seq_buf_used(s); | ||
| 42 | |||
| 43 | return seq_write(m, s->buffer, len); | ||
| 44 | } | ||
| 45 | |||
| 46 | /** | ||
| 47 | * seq_buf_vprintf - sequence printing of information. | ||
| 48 | * @s: seq_buf descriptor | ||
| 49 | * @fmt: printf format string | ||
| 50 | * @args: va_list of arguments from a printf() type function | ||
| 51 | * | ||
| 52 | * Writes a vnprintf() format into the sequencce buffer. | ||
| 53 | * | ||
| 54 | * Returns zero on success, -1 on overflow. | ||
| 55 | */ | ||
| 56 | int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | ||
| 57 | { | ||
| 58 | int len; | ||
| 59 | |||
| 60 | WARN_ON(s->size == 0); | ||
| 61 | |||
| 62 | if (s->len < s->size) { | ||
| 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | ||
| 64 | if (seq_buf_can_fit(s, len)) { | ||
| 65 | s->len += len; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | seq_buf_set_overflow(s); | ||
| 70 | return -1; | ||
| 71 | } | ||
| 72 | |||
| 73 | /** | ||
| 74 | * seq_buf_printf - sequence printing of information | ||
| 75 | * @s: seq_buf descriptor | ||
| 76 | * @fmt: printf format string | ||
| 77 | * | ||
| 78 | * Writes a printf() format into the sequence buffer. | ||
| 79 | * | ||
| 80 | * Returns zero on success, -1 on overflow. | ||
| 81 | */ | ||
| 82 | int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) | ||
| 83 | { | ||
| 84 | va_list ap; | ||
| 85 | int ret; | ||
| 86 | |||
| 87 | va_start(ap, fmt); | ||
| 88 | ret = seq_buf_vprintf(s, fmt, ap); | ||
| 89 | va_end(ap); | ||
| 90 | |||
| 91 | return ret; | ||
| 92 | } | ||
| 93 | |||
| 94 | /** | ||
| 95 | * seq_buf_bitmask - write a bitmask array in its ASCII representation | ||
| 96 | * @s: seq_buf descriptor | ||
| 97 | * @maskp: points to an array of unsigned longs that represent a bitmask | ||
| 98 | * @nmaskbits: The number of bits that are valid in @maskp | ||
| 99 | * | ||
| 100 | * Writes a ASCII representation of a bitmask string into @s. | ||
| 101 | * | ||
| 102 | * Returns zero on success, -1 on overflow. | ||
| 103 | */ | ||
| 104 | int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, | ||
| 105 | int nmaskbits) | ||
| 106 | { | ||
| 107 | unsigned int len = seq_buf_buffer_left(s); | ||
| 108 | int ret; | ||
| 109 | |||
| 110 | WARN_ON(s->size == 0); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Note, because bitmap_scnprintf() only returns the number of bytes | ||
| 114 | * written and not the number that would be written, we use the last | ||
| 115 | * byte of the buffer to let us know if we overflowed. There's a small | ||
| 116 | * chance that the bitmap could have fit exactly inside the buffer, but | ||
| 117 | * it's not that critical if that does happen. | ||
| 118 | */ | ||
| 119 | if (len > 1) { | ||
| 120 | ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); | ||
| 121 | if (ret < len) { | ||
| 122 | s->len += ret; | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | seq_buf_set_overflow(s); | ||
| 127 | return -1; | ||
| 128 | } | ||
| 129 | |||
| 130 | #ifdef CONFIG_BINARY_PRINTF | ||
| 131 | /** | ||
| 132 | * seq_buf_bprintf - Write the printf string from binary arguments | ||
| 133 | * @s: seq_buf descriptor | ||
| 134 | * @fmt: The format string for the @binary arguments | ||
| 135 | * @binary: The binary arguments for @fmt. | ||
| 136 | * | ||
| 137 | * When recording in a fast path, a printf may be recorded with just | ||
| 138 | * saving the format and the arguments as they were passed to the | ||
| 139 | * function, instead of wasting cycles converting the arguments into | ||
| 140 | * ASCII characters. Instead, the arguments are saved in a 32 bit | ||
| 141 | * word array that is defined by the format string constraints. | ||
| 142 | * | ||
| 143 | * This function will take the format and the binary array and finish | ||
| 144 | * the conversion into the ASCII string within the buffer. | ||
| 145 | * | ||
| 146 | * Returns zero on success, -1 on overflow. | ||
| 147 | */ | ||
| 148 | int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | ||
| 149 | { | ||
| 150 | unsigned int len = seq_buf_buffer_left(s); | ||
| 151 | int ret; | ||
| 152 | |||
| 153 | WARN_ON(s->size == 0); | ||
| 154 | |||
| 155 | if (s->len < s->size) { | ||
| 156 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | ||
| 157 | if (seq_buf_can_fit(s, ret)) { | ||
| 158 | s->len += ret; | ||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | seq_buf_set_overflow(s); | ||
| 163 | return -1; | ||
| 164 | } | ||
| 165 | #endif /* CONFIG_BINARY_PRINTF */ | ||
| 166 | |||
| 167 | /** | ||
| 168 | * seq_buf_puts - sequence printing of simple string | ||
| 169 | * @s: seq_buf descriptor | ||
| 170 | * @str: simple string to record | ||
| 171 | * | ||
| 172 | * Copy a simple string into the sequence buffer. | ||
| 173 | * | ||
| 174 | * Returns zero on success, -1 on overflow | ||
| 175 | */ | ||
| 176 | int seq_buf_puts(struct seq_buf *s, const char *str) | ||
| 177 | { | ||
| 178 | unsigned int len = strlen(str); | ||
| 179 | |||
| 180 | WARN_ON(s->size == 0); | ||
| 181 | |||
| 182 | if (seq_buf_can_fit(s, len)) { | ||
| 183 | memcpy(s->buffer + s->len, str, len); | ||
| 184 | s->len += len; | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | seq_buf_set_overflow(s); | ||
| 188 | return -1; | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * seq_buf_putc - sequence printing of simple character | ||
| 193 | * @s: seq_buf descriptor | ||
| 194 | * @c: simple character to record | ||
| 195 | * | ||
| 196 | * Copy a single character into the sequence buffer. | ||
| 197 | * | ||
| 198 | * Returns zero on success, -1 on overflow | ||
| 199 | */ | ||
| 200 | int seq_buf_putc(struct seq_buf *s, unsigned char c) | ||
| 201 | { | ||
| 202 | WARN_ON(s->size == 0); | ||
| 203 | |||
| 204 | if (seq_buf_can_fit(s, 1)) { | ||
| 205 | s->buffer[s->len++] = c; | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | seq_buf_set_overflow(s); | ||
| 209 | return -1; | ||
| 210 | } | ||
| 211 | |||
| 212 | /** | ||
| 213 | * seq_buf_putmem - write raw data into the sequenc buffer | ||
| 214 | * @s: seq_buf descriptor | ||
| 215 | * @mem: The raw memory to copy into the buffer | ||
| 216 | * @len: The length of the raw memory to copy (in bytes) | ||
| 217 | * | ||
| 218 | * There may be cases where raw memory needs to be written into the | ||
| 219 | * buffer and a strcpy() would not work. Using this function allows | ||
| 220 | * for such cases. | ||
| 221 | * | ||
| 222 | * Returns zero on success, -1 on overflow | ||
| 223 | */ | ||
| 224 | int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) | ||
| 225 | { | ||
| 226 | WARN_ON(s->size == 0); | ||
| 227 | |||
| 228 | if (seq_buf_can_fit(s, len)) { | ||
| 229 | memcpy(s->buffer + s->len, mem, len); | ||
| 230 | s->len += len; | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | seq_buf_set_overflow(s); | ||
| 234 | return -1; | ||
| 235 | } | ||
| 236 | |||
| 237 | #define MAX_MEMHEX_BYTES 8U | ||
| 238 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 239 | |||
| 240 | /** | ||
| 241 | * seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex | ||
| 242 | * @s: seq_buf descriptor | ||
| 243 | * @mem: The raw memory to write its hex ASCII representation of | ||
| 244 | * @len: The length of the raw memory to copy (in bytes) | ||
| 245 | * | ||
| 246 | * This is similar to seq_buf_putmem() except instead of just copying the | ||
| 247 | * raw memory into the buffer it writes its ASCII representation of it | ||
| 248 | * in hex characters. | ||
| 249 | * | ||
| 250 | * Returns zero on success, -1 on overflow | ||
| 251 | */ | ||
| 252 | int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, | ||
| 253 | unsigned int len) | ||
| 254 | { | ||
| 255 | unsigned char hex[HEX_CHARS]; | ||
| 256 | const unsigned char *data = mem; | ||
| 257 | unsigned int start_len; | ||
| 258 | int i, j; | ||
| 259 | |||
| 260 | WARN_ON(s->size == 0); | ||
| 261 | |||
| 262 | while (len) { | ||
| 263 | start_len = min(len, HEX_CHARS - 1); | ||
| 264 | #ifdef __BIG_ENDIAN | ||
| 265 | for (i = 0, j = 0; i < start_len; i++) { | ||
| 266 | #else | ||
| 267 | for (i = start_len-1, j = 0; i >= 0; i--) { | ||
| 268 | #endif | ||
| 269 | hex[j++] = hex_asc_hi(data[i]); | ||
| 270 | hex[j++] = hex_asc_lo(data[i]); | ||
| 271 | } | ||
| 272 | if (WARN_ON_ONCE(j == 0 || j/2 > len)) | ||
| 273 | break; | ||
| 274 | |||
| 275 | /* j increments twice per loop */ | ||
| 276 | len -= j / 2; | ||
| 277 | hex[j++] = ' '; | ||
| 278 | |||
| 279 | seq_buf_putmem(s, hex, j); | ||
| 280 | if (seq_buf_has_overflowed(s)) | ||
| 281 | return -1; | ||
| 282 | } | ||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | /** | ||
| 287 | * seq_buf_path - copy a path into the sequence buffer | ||
| 288 | * @s: seq_buf descriptor | ||
| 289 | * @path: path to write into the sequence buffer. | ||
| 290 | * @esc: set of characters to escape in the output | ||
| 291 | * | ||
| 292 | * Write a path name into the sequence buffer. | ||
| 293 | * | ||
| 294 | * Returns the number of written bytes on success, -1 on overflow | ||
| 295 | */ | ||
| 296 | int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) | ||
| 297 | { | ||
| 298 | char *buf; | ||
| 299 | size_t size = seq_buf_get_buf(s, &buf); | ||
| 300 | int res = -1; | ||
| 301 | |||
| 302 | WARN_ON(s->size == 0); | ||
| 303 | |||
| 304 | if (size) { | ||
| 305 | char *p = d_path(path, buf, size); | ||
| 306 | if (!IS_ERR(p)) { | ||
| 307 | char *end = mangle_path(buf, p, esc); | ||
| 308 | if (end) | ||
| 309 | res = end - buf; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | seq_buf_commit(s, res); | ||
| 313 | |||
| 314 | return res; | ||
| 315 | } | ||
| 316 | |||
| 317 | /** | ||
| 318 | * seq_buf_to_user - copy the squence buffer to user space | ||
| 319 | * @s: seq_buf descriptor | ||
| 320 | * @ubuf: The userspace memory location to copy to | ||
| 321 | * @cnt: The amount to copy | ||
| 322 | * | ||
| 323 | * Copies the sequence buffer into the userspace memory pointed to | ||
| 324 | * by @ubuf. It starts from the last read position (@s->readpos) | ||
| 325 | * and writes up to @cnt characters or till it reaches the end of | ||
| 326 | * the content in the buffer (@s->len), which ever comes first. | ||
| 327 | * | ||
| 328 | * On success, it returns a positive number of the number of bytes | ||
| 329 | * it copied. | ||
| 330 | * | ||
| 331 | * On failure it returns -EBUSY if all of the content in the | ||
| 332 | * sequence has been already read, which includes nothing in the | ||
| 333 | * sequence (@s->len == @s->readpos). | ||
| 334 | * | ||
| 335 | * Returns -EFAULT if the copy to userspace fails. | ||
| 336 | */ | ||
| 337 | int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) | ||
| 338 | { | ||
| 339 | int len; | ||
| 340 | int ret; | ||
| 341 | |||
| 342 | if (!cnt) | ||
| 343 | return 0; | ||
| 344 | |||
| 345 | if (s->len <= s->readpos) | ||
| 346 | return -EBUSY; | ||
| 347 | |||
| 348 | len = seq_buf_used(s) - s->readpos; | ||
| 349 | if (cnt > len) | ||
| 350 | cnt = len; | ||
| 351 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | ||
| 352 | if (ret == cnt) | ||
| 353 | return -EFAULT; | ||
| 354 | |||
| 355 | cnt -= ret; | ||
| 356 | |||
| 357 | s->readpos += cnt; | ||
| 358 | return cnt; | ||
| 359 | } | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 5e256271b47b..7de89f4a36cf 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/nmi.h> | 9 | #include <linux/nmi.h> |
| 10 | #include <linux/quicklist.h> | 10 | #include <linux/quicklist.h> |
| 11 | #include <linux/cma.h> | ||
| 11 | 12 | ||
| 12 | void show_mem(unsigned int filter) | 13 | void show_mem(unsigned int filter) |
| 13 | { | 14 | { |
| @@ -38,7 +39,12 @@ void show_mem(unsigned int filter) | |||
| 38 | 39 | ||
| 39 | printk("%lu pages RAM\n", total); | 40 | printk("%lu pages RAM\n", total); |
| 40 | printk("%lu pages HighMem/MovableOnly\n", highmem); | 41 | printk("%lu pages HighMem/MovableOnly\n", highmem); |
| 42 | #ifdef CONFIG_CMA | ||
| 43 | printk("%lu pages reserved\n", (reserved - totalcma_pages)); | ||
| 44 | printk("%lu pages cma reserved\n", totalcma_pages); | ||
| 45 | #else | ||
| 41 | printk("%lu pages reserved\n", reserved); | 46 | printk("%lu pages reserved\n", reserved); |
| 47 | #endif | ||
| 42 | #ifdef CONFIG_QUICKLIST | 48 | #ifdef CONFIG_QUICKLIST |
| 43 | printk("%lu pages in pagetable cache\n", | 49 | printk("%lu pages in pagetable cache\n", |
| 44 | quicklist_total_size()); | 50 | quicklist_total_size()); |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 23e070bcf72d..80d78c51f65f 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -124,7 +124,7 @@ static struct bpf_test tests[] = { | |||
| 124 | { { 0, 0xfffffffd } } | 124 | { { 0, 0xfffffffd } } |
| 125 | }, | 125 | }, |
| 126 | { | 126 | { |
| 127 | "DIV_KX", | 127 | "DIV_MOD_KX", |
| 128 | .u.insns = { | 128 | .u.insns = { |
| 129 | BPF_STMT(BPF_LD | BPF_IMM, 8), | 129 | BPF_STMT(BPF_LD | BPF_IMM, 8), |
| 130 | BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), | 130 | BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), |
| @@ -134,12 +134,18 @@ static struct bpf_test tests[] = { | |||
| 134 | BPF_STMT(BPF_MISC | BPF_TAX, 0), | 134 | BPF_STMT(BPF_MISC | BPF_TAX, 0), |
| 135 | BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), | 135 | BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), |
| 136 | BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), | 136 | BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), |
| 137 | BPF_STMT(BPF_MISC | BPF_TAX, 0), | ||
| 138 | BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), | ||
| 139 | BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), | ||
| 140 | BPF_STMT(BPF_MISC | BPF_TAX, 0), | ||
| 141 | BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), | ||
| 142 | BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), | ||
| 137 | BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), | 143 | BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), |
| 138 | BPF_STMT(BPF_RET | BPF_A, 0) | 144 | BPF_STMT(BPF_RET | BPF_A, 0) |
| 139 | }, | 145 | }, |
| 140 | CLASSIC | FLAG_NO_DATA, | 146 | CLASSIC | FLAG_NO_DATA, |
| 141 | { }, | 147 | { }, |
| 142 | { { 0, 0x40000001 } } | 148 | { { 0, 0x20000000 } } |
| 143 | }, | 149 | }, |
| 144 | { | 150 | { |
| 145 | "AND_OR_LSH_K", | 151 | "AND_OR_LSH_K", |
| @@ -1756,6 +1762,49 @@ static struct bpf_test tests[] = { | |||
| 1756 | { }, | 1762 | { }, |
| 1757 | { { 0, 1 } } | 1763 | { { 0, 1 } } |
| 1758 | }, | 1764 | }, |
| 1765 | { | ||
| 1766 | "nmap reduced", | ||
| 1767 | .u.insns_int = { | ||
| 1768 | BPF_MOV64_REG(R6, R1), | ||
| 1769 | BPF_LD_ABS(BPF_H, 12), | ||
| 1770 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), | ||
| 1771 | BPF_LD_ABS(BPF_H, 12), | ||
| 1772 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), | ||
| 1773 | BPF_MOV32_IMM(R0, 18), | ||
| 1774 | BPF_STX_MEM(BPF_W, R10, R0, -64), | ||
| 1775 | BPF_LDX_MEM(BPF_W, R7, R10, -64), | ||
| 1776 | BPF_LD_IND(BPF_W, R7, 14), | ||
| 1777 | BPF_STX_MEM(BPF_W, R10, R0, -60), | ||
| 1778 | BPF_MOV32_IMM(R0, 280971478), | ||
| 1779 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
| 1780 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
| 1781 | BPF_LDX_MEM(BPF_W, R0, R10, -60), | ||
| 1782 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
| 1783 | BPF_JMP_IMM(BPF_JNE, R0, 0, 15), | ||
| 1784 | BPF_LD_ABS(BPF_H, 12), | ||
| 1785 | BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), | ||
| 1786 | BPF_MOV32_IMM(R0, 22), | ||
| 1787 | BPF_STX_MEM(BPF_W, R10, R0, -56), | ||
| 1788 | BPF_LDX_MEM(BPF_W, R7, R10, -56), | ||
| 1789 | BPF_LD_IND(BPF_H, R7, 14), | ||
| 1790 | BPF_STX_MEM(BPF_W, R10, R0, -52), | ||
| 1791 | BPF_MOV32_IMM(R0, 17366), | ||
| 1792 | BPF_STX_MEM(BPF_W, R10, R0, -48), | ||
| 1793 | BPF_LDX_MEM(BPF_W, R7, R10, -48), | ||
| 1794 | BPF_LDX_MEM(BPF_W, R0, R10, -52), | ||
| 1795 | BPF_ALU32_REG(BPF_SUB, R0, R7), | ||
| 1796 | BPF_JMP_IMM(BPF_JNE, R0, 0, 2), | ||
| 1797 | BPF_MOV32_IMM(R0, 256), | ||
| 1798 | BPF_EXIT_INSN(), | ||
| 1799 | BPF_MOV32_IMM(R0, 0), | ||
| 1800 | BPF_EXIT_INSN(), | ||
| 1801 | }, | ||
| 1802 | INTERNAL, | ||
| 1803 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, | ||
| 1804 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 1805 | 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, | ||
| 1806 | { { 38, 256 } } | ||
| 1807 | }, | ||
| 1759 | }; | 1808 | }; |
| 1760 | 1809 | ||
| 1761 | static struct net_device dev; | 1810 | static struct net_device dev; |
