diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 14 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/dma-debug.c | 43 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 4 | ||||
| -rw-r--r-- | lib/genalloc.c | 1 | ||||
| -rw-r--r-- | lib/lcm.c | 8 | ||||
| -rw-r--r-- | lib/seq_buf.c | 359 | ||||
| -rw-r--r-- | lib/show_mem.c | 2 |
8 files changed, 396 insertions, 37 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4e35a5d767ed..d780351835e9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1238,21 +1238,9 @@ config RCU_CPU_STALL_TIMEOUT | |||
| 1238 | RCU grace period persists, additional CPU stall warnings are | 1238 | RCU grace period persists, additional CPU stall warnings are |
| 1239 | printed at more widely spaced intervals. | 1239 | printed at more widely spaced intervals. |
| 1240 | 1240 | ||
| 1241 | config RCU_CPU_STALL_VERBOSE | ||
| 1242 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
| 1243 | depends on TREE_PREEMPT_RCU | ||
| 1244 | default y | ||
| 1245 | help | ||
| 1246 | This option causes RCU to printk detailed per-task information | ||
| 1247 | for any tasks that are stalling the current RCU grace period. | ||
| 1248 | |||
| 1249 | Say N if you are unsure. | ||
| 1250 | |||
| 1251 | Say Y if you want to enable such checks. | ||
| 1252 | |||
| 1253 | config RCU_CPU_STALL_INFO | 1241 | config RCU_CPU_STALL_INFO |
| 1254 | bool "Print additional diagnostics on RCU CPU stall" | 1242 | bool "Print additional diagnostics on RCU CPU stall" |
| 1255 | depends on (TREE_RCU || TREE_PREEMPT_RCU) && DEBUG_KERNEL | 1243 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL |
| 1256 | default n | 1244 | default n |
| 1257 | help | 1245 | help |
| 1258 | For each stalled CPU that is aware of the current RCU grace | 1246 | For each stalled CPU that is aware of the current RCU grace |
diff --git a/lib/Makefile b/lib/Makefile index 0211d2bd5e17..923a191eaf71 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 13 | sha1.o md5.o irq_regs.o argv_split.o \ | 13 | sha1.o md5.o irq_regs.o argv_split.o \ |
| 14 | proportions.o flex_proportions.o ratelimit.o show_mem.o \ | 14 | proportions.o flex_proportions.o ratelimit.o show_mem.o \ |
| 15 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 15 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
| 16 | earlycpio.o | 16 | earlycpio.o seq_buf.o |
| 17 | 17 | ||
| 18 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o | 18 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o |
| 19 | lib-$(CONFIG_MMU) += ioremap.o | 19 | lib-$(CONFIG_MMU) += ioremap.o |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index add80cc02dbe..9722bd2dbc9b 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -102,6 +102,14 @@ static DEFINE_SPINLOCK(free_entries_lock); | |||
| 102 | /* Global disable flag - will be set in case of an error */ | 102 | /* Global disable flag - will be set in case of an error */ |
| 103 | static u32 global_disable __read_mostly; | 103 | static u32 global_disable __read_mostly; |
| 104 | 104 | ||
| 105 | /* Early initialization disable flag, set at the end of dma_debug_init */ | ||
| 106 | static bool dma_debug_initialized __read_mostly; | ||
| 107 | |||
| 108 | static inline bool dma_debug_disabled(void) | ||
| 109 | { | ||
| 110 | return global_disable || !dma_debug_initialized; | ||
| 111 | } | ||
| 112 | |||
| 105 | /* Global error count */ | 113 | /* Global error count */ |
| 106 | static u32 error_count; | 114 | static u32 error_count; |
| 107 | 115 | ||
| @@ -945,7 +953,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti | |||
| 945 | struct dma_debug_entry *uninitialized_var(entry); | 953 | struct dma_debug_entry *uninitialized_var(entry); |
| 946 | int count; | 954 | int count; |
| 947 | 955 | ||
| 948 | if (global_disable) | 956 | if (dma_debug_disabled()) |
| 949 | return 0; | 957 | return 0; |
| 950 | 958 | ||
| 951 | switch (action) { | 959 | switch (action) { |
| @@ -973,7 +981,7 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
| 973 | { | 981 | { |
| 974 | struct notifier_block *nb; | 982 | struct notifier_block *nb; |
| 975 | 983 | ||
| 976 | if (global_disable) | 984 | if (dma_debug_disabled()) |
| 977 | return; | 985 | return; |
| 978 | 986 | ||
| 979 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | 987 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
| @@ -994,6 +1002,9 @@ void dma_debug_init(u32 num_entries) | |||
| 994 | { | 1002 | { |
| 995 | int i; | 1003 | int i; |
| 996 | 1004 | ||
| 1005 | /* Do not use dma_debug_initialized here, since we really want to be | ||
| 1006 | * called to set dma_debug_initialized | ||
| 1007 | */ | ||
| 997 | if (global_disable) | 1008 | if (global_disable) |
| 998 | return; | 1009 | return; |
| 999 | 1010 | ||
| @@ -1021,6 +1032,8 @@ void dma_debug_init(u32 num_entries) | |||
| 1021 | 1032 | ||
| 1022 | nr_total_entries = num_free_entries; | 1033 | nr_total_entries = num_free_entries; |
| 1023 | 1034 | ||
| 1035 | dma_debug_initialized = true; | ||
| 1036 | |||
| 1024 | pr_info("DMA-API: debugging enabled by kernel config\n"); | 1037 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
| 1025 | } | 1038 | } |
| 1026 | 1039 | ||
| @@ -1243,7 +1256,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
| 1243 | { | 1256 | { |
| 1244 | struct dma_debug_entry *entry; | 1257 | struct dma_debug_entry *entry; |
| 1245 | 1258 | ||
| 1246 | if (unlikely(global_disable)) | 1259 | if (unlikely(dma_debug_disabled())) |
| 1247 | return; | 1260 | return; |
| 1248 | 1261 | ||
| 1249 | if (dma_mapping_error(dev, dma_addr)) | 1262 | if (dma_mapping_error(dev, dma_addr)) |
| @@ -1283,7 +1296,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
| 1283 | struct hash_bucket *bucket; | 1296 | struct hash_bucket *bucket; |
| 1284 | unsigned long flags; | 1297 | unsigned long flags; |
| 1285 | 1298 | ||
| 1286 | if (unlikely(global_disable)) | 1299 | if (unlikely(dma_debug_disabled())) |
| 1287 | return; | 1300 | return; |
| 1288 | 1301 | ||
| 1289 | ref.dev = dev; | 1302 | ref.dev = dev; |
| @@ -1325,7 +1338,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | |||
| 1325 | .direction = direction, | 1338 | .direction = direction, |
| 1326 | }; | 1339 | }; |
| 1327 | 1340 | ||
| 1328 | if (unlikely(global_disable)) | 1341 | if (unlikely(dma_debug_disabled())) |
| 1329 | return; | 1342 | return; |
| 1330 | 1343 | ||
| 1331 | if (map_single) | 1344 | if (map_single) |
| @@ -1342,7 +1355,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1342 | struct scatterlist *s; | 1355 | struct scatterlist *s; |
| 1343 | int i; | 1356 | int i; |
| 1344 | 1357 | ||
| 1345 | if (unlikely(global_disable)) | 1358 | if (unlikely(dma_debug_disabled())) |
| 1346 | return; | 1359 | return; |
| 1347 | 1360 | ||
| 1348 | for_each_sg(sg, s, mapped_ents, i) { | 1361 | for_each_sg(sg, s, mapped_ents, i) { |
| @@ -1395,7 +1408,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1395 | struct scatterlist *s; | 1408 | struct scatterlist *s; |
| 1396 | int mapped_ents = 0, i; | 1409 | int mapped_ents = 0, i; |
| 1397 | 1410 | ||
| 1398 | if (unlikely(global_disable)) | 1411 | if (unlikely(dma_debug_disabled())) |
| 1399 | return; | 1412 | return; |
| 1400 | 1413 | ||
| 1401 | for_each_sg(sglist, s, nelems, i) { | 1414 | for_each_sg(sglist, s, nelems, i) { |
| @@ -1427,7 +1440,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1427 | { | 1440 | { |
| 1428 | struct dma_debug_entry *entry; | 1441 | struct dma_debug_entry *entry; |
| 1429 | 1442 | ||
| 1430 | if (unlikely(global_disable)) | 1443 | if (unlikely(dma_debug_disabled())) |
| 1431 | return; | 1444 | return; |
| 1432 | 1445 | ||
| 1433 | if (unlikely(virt == NULL)) | 1446 | if (unlikely(virt == NULL)) |
| @@ -1462,7 +1475,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1462 | .direction = DMA_BIDIRECTIONAL, | 1475 | .direction = DMA_BIDIRECTIONAL, |
| 1463 | }; | 1476 | }; |
| 1464 | 1477 | ||
| 1465 | if (unlikely(global_disable)) | 1478 | if (unlikely(dma_debug_disabled())) |
| 1466 | return; | 1479 | return; |
| 1467 | 1480 | ||
| 1468 | check_unmap(&ref); | 1481 | check_unmap(&ref); |
| @@ -1474,7 +1487,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |||
| 1474 | { | 1487 | { |
| 1475 | struct dma_debug_entry ref; | 1488 | struct dma_debug_entry ref; |
| 1476 | 1489 | ||
| 1477 | if (unlikely(global_disable)) | 1490 | if (unlikely(dma_debug_disabled())) |
| 1478 | return; | 1491 | return; |
| 1479 | 1492 | ||
| 1480 | ref.type = dma_debug_single; | 1493 | ref.type = dma_debug_single; |
| @@ -1494,7 +1507,7 @@ void debug_dma_sync_single_for_device(struct device *dev, | |||
| 1494 | { | 1507 | { |
| 1495 | struct dma_debug_entry ref; | 1508 | struct dma_debug_entry ref; |
| 1496 | 1509 | ||
| 1497 | if (unlikely(global_disable)) | 1510 | if (unlikely(dma_debug_disabled())) |
| 1498 | return; | 1511 | return; |
| 1499 | 1512 | ||
| 1500 | ref.type = dma_debug_single; | 1513 | ref.type = dma_debug_single; |
| @@ -1515,7 +1528,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, | |||
| 1515 | { | 1528 | { |
| 1516 | struct dma_debug_entry ref; | 1529 | struct dma_debug_entry ref; |
| 1517 | 1530 | ||
| 1518 | if (unlikely(global_disable)) | 1531 | if (unlikely(dma_debug_disabled())) |
| 1519 | return; | 1532 | return; |
| 1520 | 1533 | ||
| 1521 | ref.type = dma_debug_single; | 1534 | ref.type = dma_debug_single; |
| @@ -1536,7 +1549,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev, | |||
| 1536 | { | 1549 | { |
| 1537 | struct dma_debug_entry ref; | 1550 | struct dma_debug_entry ref; |
| 1538 | 1551 | ||
| 1539 | if (unlikely(global_disable)) | 1552 | if (unlikely(dma_debug_disabled())) |
| 1540 | return; | 1553 | return; |
| 1541 | 1554 | ||
| 1542 | ref.type = dma_debug_single; | 1555 | ref.type = dma_debug_single; |
| @@ -1556,7 +1569,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
| 1556 | struct scatterlist *s; | 1569 | struct scatterlist *s; |
| 1557 | int mapped_ents = 0, i; | 1570 | int mapped_ents = 0, i; |
| 1558 | 1571 | ||
| 1559 | if (unlikely(global_disable)) | 1572 | if (unlikely(dma_debug_disabled())) |
| 1560 | return; | 1573 | return; |
| 1561 | 1574 | ||
| 1562 | for_each_sg(sg, s, nelems, i) { | 1575 | for_each_sg(sg, s, nelems, i) { |
| @@ -1589,7 +1602,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 1589 | struct scatterlist *s; | 1602 | struct scatterlist *s; |
| 1590 | int mapped_ents = 0, i; | 1603 | int mapped_ents = 0, i; |
| 1591 | 1604 | ||
| 1592 | if (unlikely(global_disable)) | 1605 | if (unlikely(dma_debug_disabled())) |
| 1593 | return; | 1606 | return; |
| 1594 | 1607 | ||
| 1595 | for_each_sg(sg, s, nelems, i) { | 1608 | for_each_sg(sg, s, nelems, i) { |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index dfba05521748..527799d44476 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -576,7 +576,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor, | |||
| 576 | } else { | 576 | } else { |
| 577 | char buf[PREFIX_SIZE]; | 577 | char buf[PREFIX_SIZE]; |
| 578 | 578 | ||
| 579 | dev_printk_emit(7, dev, "%s%s %s: %pV", | 579 | dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV", |
| 580 | dynamic_emit_prefix(descriptor, buf), | 580 | dynamic_emit_prefix(descriptor, buf), |
| 581 | dev_driver_string(dev), dev_name(dev), | 581 | dev_driver_string(dev), dev_name(dev), |
| 582 | &vaf); | 582 | &vaf); |
| @@ -605,7 +605,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
| 605 | if (dev && dev->dev.parent) { | 605 | if (dev && dev->dev.parent) { |
| 606 | char buf[PREFIX_SIZE]; | 606 | char buf[PREFIX_SIZE]; |
| 607 | 607 | ||
| 608 | dev_printk_emit(7, dev->dev.parent, | 608 | dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent, |
| 609 | "%s%s %s %s%s: %pV", | 609 | "%s%s %s %s%s: %pV", |
| 610 | dynamic_emit_prefix(descriptor, buf), | 610 | dynamic_emit_prefix(descriptor, buf), |
| 611 | dev_driver_string(dev->dev.parent), | 611 | dev_driver_string(dev->dev.parent), |
diff --git a/lib/genalloc.c b/lib/genalloc.c index cce4dd68c40d..2e65d206b01c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, | |||
| 598 | 598 | ||
| 599 | return pool; | 599 | return pool; |
| 600 | } | 600 | } |
| 601 | EXPORT_SYMBOL(devm_gen_pool_create); | ||
| 601 | 602 | ||
| 602 | /** | 603 | /** |
| 603 | * dev_get_gen_pool - Obtain the gen_pool (if any) for a device | 604 | * dev_get_gen_pool - Obtain the gen_pool (if any) for a device |
| @@ -7,10 +7,8 @@ | |||
| 7 | unsigned long lcm(unsigned long a, unsigned long b) | 7 | unsigned long lcm(unsigned long a, unsigned long b) |
| 8 | { | 8 | { |
| 9 | if (a && b) | 9 | if (a && b) |
| 10 | return (a * b) / gcd(a, b); | 10 | return (a / gcd(a, b)) * b; |
| 11 | else if (b) | 11 | else |
| 12 | return b; | 12 | return 0; |
| 13 | |||
| 14 | return a; | ||
| 15 | } | 13 | } |
| 16 | EXPORT_SYMBOL_GPL(lcm); | 14 | EXPORT_SYMBOL_GPL(lcm); |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c new file mode 100644 index 000000000000..4eedfedb9e31 --- /dev/null +++ b/lib/seq_buf.c | |||
| @@ -0,0 +1,359 @@ | |||
| 1 | /* | ||
| 2 | * seq_buf.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
| 5 | * | ||
| 6 | * The seq_buf is a handy tool that allows you to pass a descriptor around | ||
| 7 | * to a buffer that other functions can write to. It is similar to the | ||
| 8 | * seq_file functionality but has some differences. | ||
| 9 | * | ||
| 10 | * To use it, the seq_buf must be initialized with seq_buf_init(). | ||
| 11 | * This will set up the counters within the descriptor. You can call | ||
| 12 | * seq_buf_init() more than once to reset the seq_buf to start | ||
| 13 | * from scratch. | ||
| 14 | */ | ||
| 15 | #include <linux/uaccess.h> | ||
| 16 | #include <linux/seq_file.h> | ||
| 17 | #include <linux/seq_buf.h> | ||
| 18 | |||
| 19 | /** | ||
| 20 | * seq_buf_can_fit - can the new data fit in the current buffer? | ||
| 21 | * @s: the seq_buf descriptor | ||
| 22 | * @len: The length to see if it can fit in the current buffer | ||
| 23 | * | ||
| 24 | * Returns true if there's enough unused space in the seq_buf buffer | ||
| 25 | * to fit the amount of new data according to @len. | ||
| 26 | */ | ||
| 27 | static bool seq_buf_can_fit(struct seq_buf *s, size_t len) | ||
| 28 | { | ||
| 29 | return s->len + len <= s->size; | ||
| 30 | } | ||
| 31 | |||
| 32 | /** | ||
| 33 | * seq_buf_print_seq - move the contents of seq_buf into a seq_file | ||
| 34 | * @m: the seq_file descriptor that is the destination | ||
| 35 | * @s: the seq_buf descriptor that is the source. | ||
| 36 | * | ||
| 37 | * Returns zero on success, non zero otherwise | ||
| 38 | */ | ||
| 39 | int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) | ||
| 40 | { | ||
| 41 | unsigned int len = seq_buf_used(s); | ||
| 42 | |||
| 43 | return seq_write(m, s->buffer, len); | ||
| 44 | } | ||
| 45 | |||
| 46 | /** | ||
| 47 | * seq_buf_vprintf - sequence printing of information. | ||
| 48 | * @s: seq_buf descriptor | ||
| 49 | * @fmt: printf format string | ||
| 50 | * @args: va_list of arguments from a printf() type function | ||
| 51 | * | ||
| 52 | * Writes a vnprintf() format into the sequencce buffer. | ||
| 53 | * | ||
| 54 | * Returns zero on success, -1 on overflow. | ||
| 55 | */ | ||
| 56 | int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | ||
| 57 | { | ||
| 58 | int len; | ||
| 59 | |||
| 60 | WARN_ON(s->size == 0); | ||
| 61 | |||
| 62 | if (s->len < s->size) { | ||
| 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | ||
| 64 | if (seq_buf_can_fit(s, len)) { | ||
| 65 | s->len += len; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | } | ||
| 69 | seq_buf_set_overflow(s); | ||
| 70 | return -1; | ||
| 71 | } | ||
| 72 | |||
| 73 | /** | ||
| 74 | * seq_buf_printf - sequence printing of information | ||
| 75 | * @s: seq_buf descriptor | ||
| 76 | * @fmt: printf format string | ||
| 77 | * | ||
| 78 | * Writes a printf() format into the sequence buffer. | ||
| 79 | * | ||
| 80 | * Returns zero on success, -1 on overflow. | ||
| 81 | */ | ||
| 82 | int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) | ||
| 83 | { | ||
| 84 | va_list ap; | ||
| 85 | int ret; | ||
| 86 | |||
| 87 | va_start(ap, fmt); | ||
| 88 | ret = seq_buf_vprintf(s, fmt, ap); | ||
| 89 | va_end(ap); | ||
| 90 | |||
| 91 | return ret; | ||
| 92 | } | ||
| 93 | |||
| 94 | /** | ||
| 95 | * seq_buf_bitmask - write a bitmask array in its ASCII representation | ||
| 96 | * @s: seq_buf descriptor | ||
| 97 | * @maskp: points to an array of unsigned longs that represent a bitmask | ||
| 98 | * @nmaskbits: The number of bits that are valid in @maskp | ||
| 99 | * | ||
| 100 | * Writes a ASCII representation of a bitmask string into @s. | ||
| 101 | * | ||
| 102 | * Returns zero on success, -1 on overflow. | ||
| 103 | */ | ||
| 104 | int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, | ||
| 105 | int nmaskbits) | ||
| 106 | { | ||
| 107 | unsigned int len = seq_buf_buffer_left(s); | ||
| 108 | int ret; | ||
| 109 | |||
| 110 | WARN_ON(s->size == 0); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Note, because bitmap_scnprintf() only returns the number of bytes | ||
| 114 | * written and not the number that would be written, we use the last | ||
| 115 | * byte of the buffer to let us know if we overflowed. There's a small | ||
| 116 | * chance that the bitmap could have fit exactly inside the buffer, but | ||
| 117 | * it's not that critical if that does happen. | ||
| 118 | */ | ||
| 119 | if (len > 1) { | ||
| 120 | ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); | ||
| 121 | if (ret < len) { | ||
| 122 | s->len += ret; | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | seq_buf_set_overflow(s); | ||
| 127 | return -1; | ||
| 128 | } | ||
| 129 | |||
| 130 | #ifdef CONFIG_BINARY_PRINTF | ||
| 131 | /** | ||
| 132 | * seq_buf_bprintf - Write the printf string from binary arguments | ||
| 133 | * @s: seq_buf descriptor | ||
| 134 | * @fmt: The format string for the @binary arguments | ||
| 135 | * @binary: The binary arguments for @fmt. | ||
| 136 | * | ||
| 137 | * When recording in a fast path, a printf may be recorded with just | ||
| 138 | * saving the format and the arguments as they were passed to the | ||
| 139 | * function, instead of wasting cycles converting the arguments into | ||
| 140 | * ASCII characters. Instead, the arguments are saved in a 32 bit | ||
| 141 | * word array that is defined by the format string constraints. | ||
| 142 | * | ||
| 143 | * This function will take the format and the binary array and finish | ||
| 144 | * the conversion into the ASCII string within the buffer. | ||
| 145 | * | ||
| 146 | * Returns zero on success, -1 on overflow. | ||
| 147 | */ | ||
| 148 | int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | ||
| 149 | { | ||
| 150 | unsigned int len = seq_buf_buffer_left(s); | ||
| 151 | int ret; | ||
| 152 | |||
| 153 | WARN_ON(s->size == 0); | ||
| 154 | |||
| 155 | if (s->len < s->size) { | ||
| 156 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | ||
| 157 | if (seq_buf_can_fit(s, ret)) { | ||
| 158 | s->len += ret; | ||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | seq_buf_set_overflow(s); | ||
| 163 | return -1; | ||
| 164 | } | ||
| 165 | #endif /* CONFIG_BINARY_PRINTF */ | ||
| 166 | |||
| 167 | /** | ||
| 168 | * seq_buf_puts - sequence printing of simple string | ||
| 169 | * @s: seq_buf descriptor | ||
| 170 | * @str: simple string to record | ||
| 171 | * | ||
| 172 | * Copy a simple string into the sequence buffer. | ||
| 173 | * | ||
| 174 | * Returns zero on success, -1 on overflow | ||
| 175 | */ | ||
| 176 | int seq_buf_puts(struct seq_buf *s, const char *str) | ||
| 177 | { | ||
| 178 | unsigned int len = strlen(str); | ||
| 179 | |||
| 180 | WARN_ON(s->size == 0); | ||
| 181 | |||
| 182 | if (seq_buf_can_fit(s, len)) { | ||
| 183 | memcpy(s->buffer + s->len, str, len); | ||
| 184 | s->len += len; | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | seq_buf_set_overflow(s); | ||
| 188 | return -1; | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * seq_buf_putc - sequence printing of simple character | ||
| 193 | * @s: seq_buf descriptor | ||
| 194 | * @c: simple character to record | ||
| 195 | * | ||
| 196 | * Copy a single character into the sequence buffer. | ||
| 197 | * | ||
| 198 | * Returns zero on success, -1 on overflow | ||
| 199 | */ | ||
| 200 | int seq_buf_putc(struct seq_buf *s, unsigned char c) | ||
| 201 | { | ||
| 202 | WARN_ON(s->size == 0); | ||
| 203 | |||
| 204 | if (seq_buf_can_fit(s, 1)) { | ||
| 205 | s->buffer[s->len++] = c; | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | seq_buf_set_overflow(s); | ||
| 209 | return -1; | ||
| 210 | } | ||
| 211 | |||
| 212 | /** | ||
| 213 | * seq_buf_putmem - write raw data into the sequenc buffer | ||
| 214 | * @s: seq_buf descriptor | ||
| 215 | * @mem: The raw memory to copy into the buffer | ||
| 216 | * @len: The length of the raw memory to copy (in bytes) | ||
| 217 | * | ||
| 218 | * There may be cases where raw memory needs to be written into the | ||
| 219 | * buffer and a strcpy() would not work. Using this function allows | ||
| 220 | * for such cases. | ||
| 221 | * | ||
| 222 | * Returns zero on success, -1 on overflow | ||
| 223 | */ | ||
| 224 | int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) | ||
| 225 | { | ||
| 226 | WARN_ON(s->size == 0); | ||
| 227 | |||
| 228 | if (seq_buf_can_fit(s, len)) { | ||
| 229 | memcpy(s->buffer + s->len, mem, len); | ||
| 230 | s->len += len; | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | seq_buf_set_overflow(s); | ||
| 234 | return -1; | ||
| 235 | } | ||
| 236 | |||
| 237 | #define MAX_MEMHEX_BYTES 8U | ||
| 238 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 239 | |||
| 240 | /** | ||
| 241 | * seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex | ||
| 242 | * @s: seq_buf descriptor | ||
| 243 | * @mem: The raw memory to write its hex ASCII representation of | ||
| 244 | * @len: The length of the raw memory to copy (in bytes) | ||
| 245 | * | ||
| 246 | * This is similar to seq_buf_putmem() except instead of just copying the | ||
| 247 | * raw memory into the buffer it writes its ASCII representation of it | ||
| 248 | * in hex characters. | ||
| 249 | * | ||
| 250 | * Returns zero on success, -1 on overflow | ||
| 251 | */ | ||
| 252 | int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, | ||
| 253 | unsigned int len) | ||
| 254 | { | ||
| 255 | unsigned char hex[HEX_CHARS]; | ||
| 256 | const unsigned char *data = mem; | ||
| 257 | unsigned int start_len; | ||
| 258 | int i, j; | ||
| 259 | |||
| 260 | WARN_ON(s->size == 0); | ||
| 261 | |||
| 262 | while (len) { | ||
| 263 | start_len = min(len, HEX_CHARS - 1); | ||
| 264 | #ifdef __BIG_ENDIAN | ||
| 265 | for (i = 0, j = 0; i < start_len; i++) { | ||
| 266 | #else | ||
| 267 | for (i = start_len-1, j = 0; i >= 0; i--) { | ||
| 268 | #endif | ||
| 269 | hex[j++] = hex_asc_hi(data[i]); | ||
| 270 | hex[j++] = hex_asc_lo(data[i]); | ||
| 271 | } | ||
| 272 | if (WARN_ON_ONCE(j == 0 || j/2 > len)) | ||
| 273 | break; | ||
| 274 | |||
| 275 | /* j increments twice per loop */ | ||
| 276 | len -= j / 2; | ||
| 277 | hex[j++] = ' '; | ||
| 278 | |||
| 279 | seq_buf_putmem(s, hex, j); | ||
| 280 | if (seq_buf_has_overflowed(s)) | ||
| 281 | return -1; | ||
| 282 | } | ||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | /** | ||
| 287 | * seq_buf_path - copy a path into the sequence buffer | ||
| 288 | * @s: seq_buf descriptor | ||
| 289 | * @path: path to write into the sequence buffer. | ||
| 290 | * @esc: set of characters to escape in the output | ||
| 291 | * | ||
| 292 | * Write a path name into the sequence buffer. | ||
| 293 | * | ||
| 294 | * Returns the number of written bytes on success, -1 on overflow | ||
| 295 | */ | ||
| 296 | int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) | ||
| 297 | { | ||
| 298 | char *buf; | ||
| 299 | size_t size = seq_buf_get_buf(s, &buf); | ||
| 300 | int res = -1; | ||
| 301 | |||
| 302 | WARN_ON(s->size == 0); | ||
| 303 | |||
| 304 | if (size) { | ||
| 305 | char *p = d_path(path, buf, size); | ||
| 306 | if (!IS_ERR(p)) { | ||
| 307 | char *end = mangle_path(buf, p, esc); | ||
| 308 | if (end) | ||
| 309 | res = end - buf; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | seq_buf_commit(s, res); | ||
| 313 | |||
| 314 | return res; | ||
| 315 | } | ||
| 316 | |||
| 317 | /** | ||
| 318 | * seq_buf_to_user - copy the squence buffer to user space | ||
| 319 | * @s: seq_buf descriptor | ||
| 320 | * @ubuf: The userspace memory location to copy to | ||
| 321 | * @cnt: The amount to copy | ||
| 322 | * | ||
| 323 | * Copies the sequence buffer into the userspace memory pointed to | ||
| 324 | * by @ubuf. It starts from the last read position (@s->readpos) | ||
| 325 | * and writes up to @cnt characters or till it reaches the end of | ||
| 326 | * the content in the buffer (@s->len), which ever comes first. | ||
| 327 | * | ||
| 328 | * On success, it returns a positive number of the number of bytes | ||
| 329 | * it copied. | ||
| 330 | * | ||
| 331 | * On failure it returns -EBUSY if all of the content in the | ||
| 332 | * sequence has been already read, which includes nothing in the | ||
| 333 | * sequence (@s->len == @s->readpos). | ||
| 334 | * | ||
| 335 | * Returns -EFAULT if the copy to userspace fails. | ||
| 336 | */ | ||
| 337 | int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) | ||
| 338 | { | ||
| 339 | int len; | ||
| 340 | int ret; | ||
| 341 | |||
| 342 | if (!cnt) | ||
| 343 | return 0; | ||
| 344 | |||
| 345 | if (s->len <= s->readpos) | ||
| 346 | return -EBUSY; | ||
| 347 | |||
| 348 | len = seq_buf_used(s) - s->readpos; | ||
| 349 | if (cnt > len) | ||
| 350 | cnt = len; | ||
| 351 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | ||
| 352 | if (ret == cnt) | ||
| 353 | return -EFAULT; | ||
| 354 | |||
| 355 | cnt -= ret; | ||
| 356 | |||
| 357 | s->readpos += cnt; | ||
| 358 | return cnt; | ||
| 359 | } | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 09225796991a..5e256271b47b 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -28,7 +28,7 @@ void show_mem(unsigned int filter) | |||
| 28 | continue; | 28 | continue; |
| 29 | 29 | ||
| 30 | total += zone->present_pages; | 30 | total += zone->present_pages; |
| 31 | reserved = zone->present_pages - zone->managed_pages; | 31 | reserved += zone->present_pages - zone->managed_pages; |
| 32 | 32 | ||
| 33 | if (is_highmem_idx(zoneid)) | 33 | if (is_highmem_idx(zoneid)) |
| 34 | highmem += zone->present_pages; | 34 | highmem += zone->present_pages; |
