diff options
author | Leon Romanovsky <leon@leon.nu> | 2015-06-24 19:57:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 20:49:44 -0400 |
commit | 22cc877b32202b6d82e580bc6b3b445531659d3e (patch) | |
tree | 96a41efd14dfa51ac3a735fde2705c2d8b16ee97 /mm/nommu.c | |
parent | 8809aa2d28d74111ff2f1928edaa4e9845c97a7d (diff) |
mm: nommu: refactor debug and warning prints
kenter/kleave/kdebug are wrapper macros to print functions flow and debug
information. This set was written before pr_devel() was introduced, so it
was controlled by "#if 0" construction. It is questionable if anyone is
using them [1] now.
This patch removes these macros, converts numerous printk(KERN_WARNING,
...) to use general pr_warn(...) and removes debug print line from
validate_mmap_request() function.
Signed-off-by: Leon Romanovsky <leon@leon.nu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 112 |
1 files changed, 20 insertions, 92 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index e544508e2a4b..05e7447d960b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -42,22 +42,6 @@ | |||
42 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
43 | #include "internal.h" | 43 | #include "internal.h" |
44 | 44 | ||
45 | #if 0 | ||
46 | #define kenter(FMT, ...) \ | ||
47 | printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | ||
48 | #define kleave(FMT, ...) \ | ||
49 | printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | ||
50 | #define kdebug(FMT, ...) \ | ||
51 | printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) | ||
52 | #else | ||
53 | #define kenter(FMT, ...) \ | ||
54 | no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | ||
55 | #define kleave(FMT, ...) \ | ||
56 | no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | ||
57 | #define kdebug(FMT, ...) \ | ||
58 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | ||
59 | #endif | ||
60 | |||
61 | void *high_memory; | 45 | void *high_memory; |
62 | EXPORT_SYMBOL(high_memory); | 46 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 47 | struct page *mem_map; |
@@ -665,11 +649,7 @@ static void free_page_series(unsigned long from, unsigned long to) | |||
665 | for (; from < to; from += PAGE_SIZE) { | 649 | for (; from < to; from += PAGE_SIZE) { |
666 | struct page *page = virt_to_page(from); | 650 | struct page *page = virt_to_page(from); |
667 | 651 | ||
668 | kdebug("- free %lx", from); | ||
669 | atomic_long_dec(&mmap_pages_allocated); | 652 | atomic_long_dec(&mmap_pages_allocated); |
670 | if (page_count(page) != 1) | ||
671 | kdebug("free page %p: refcount not one: %d", | ||
672 | page, page_count(page)); | ||
673 | put_page(page); | 653 | put_page(page); |
674 | } | 654 | } |
675 | } | 655 | } |
@@ -683,8 +663,6 @@ static void free_page_series(unsigned long from, unsigned long to) | |||
683 | static void __put_nommu_region(struct vm_region *region) | 663 | static void __put_nommu_region(struct vm_region *region) |
684 | __releases(nommu_region_sem) | 664 | __releases(nommu_region_sem) |
685 | { | 665 | { |
686 | kenter("%p{%d}", region, region->vm_usage); | ||
687 | |||
688 | BUG_ON(!nommu_region_tree.rb_node); | 666 | BUG_ON(!nommu_region_tree.rb_node); |
689 | 667 | ||
690 | if (--region->vm_usage == 0) { | 668 | if (--region->vm_usage == 0) { |
@@ -697,10 +675,8 @@ static void __put_nommu_region(struct vm_region *region) | |||
697 | 675 | ||
698 | /* IO memory and memory shared directly out of the pagecache | 676 | /* IO memory and memory shared directly out of the pagecache |
699 | * from ramfs/tmpfs mustn't be released here */ | 677 | * from ramfs/tmpfs mustn't be released here */ |
700 | if (region->vm_flags & VM_MAPPED_COPY) { | 678 | if (region->vm_flags & VM_MAPPED_COPY) |
701 | kdebug("free series"); | ||
702 | free_page_series(region->vm_start, region->vm_top); | 679 | free_page_series(region->vm_start, region->vm_top); |
703 | } | ||
704 | kmem_cache_free(vm_region_jar, region); | 680 | kmem_cache_free(vm_region_jar, region); |
705 | } else { | 681 | } else { |
706 | up_write(&nommu_region_sem); | 682 | up_write(&nommu_region_sem); |
@@ -744,8 +720,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
744 | struct address_space *mapping; | 720 | struct address_space *mapping; |
745 | struct rb_node **p, *parent, *rb_prev; | 721 | struct rb_node **p, *parent, *rb_prev; |
746 | 722 | ||
747 | kenter(",%p", vma); | ||
748 | |||
749 | BUG_ON(!vma->vm_region); | 723 | BUG_ON(!vma->vm_region); |
750 | 724 | ||
751 | mm->map_count++; | 725 | mm->map_count++; |
@@ -813,8 +787,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
813 | struct mm_struct *mm = vma->vm_mm; | 787 | struct mm_struct *mm = vma->vm_mm; |
814 | struct task_struct *curr = current; | 788 | struct task_struct *curr = current; |
815 | 789 | ||
816 | kenter("%p", vma); | ||
817 | |||
818 | protect_vma(vma, 0); | 790 | protect_vma(vma, 0); |
819 | 791 | ||
820 | mm->map_count--; | 792 | mm->map_count--; |
@@ -854,7 +826,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
854 | */ | 826 | */ |
855 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | 827 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) |
856 | { | 828 | { |
857 | kenter("%p", vma); | ||
858 | if (vma->vm_ops && vma->vm_ops->close) | 829 | if (vma->vm_ops && vma->vm_ops->close) |
859 | vma->vm_ops->close(vma); | 830 | vma->vm_ops->close(vma); |
860 | if (vma->vm_file) | 831 | if (vma->vm_file) |
@@ -957,12 +928,8 @@ static int validate_mmap_request(struct file *file, | |||
957 | int ret; | 928 | int ret; |
958 | 929 | ||
959 | /* do the simple checks first */ | 930 | /* do the simple checks first */ |
960 | if (flags & MAP_FIXED) { | 931 | if (flags & MAP_FIXED) |
961 | printk(KERN_DEBUG | ||
962 | "%d: Can't do fixed-address/overlay mmap of RAM\n", | ||
963 | current->pid); | ||
964 | return -EINVAL; | 932 | return -EINVAL; |
965 | } | ||
966 | 933 | ||
967 | if ((flags & MAP_TYPE) != MAP_PRIVATE && | 934 | if ((flags & MAP_TYPE) != MAP_PRIVATE && |
968 | (flags & MAP_TYPE) != MAP_SHARED) | 935 | (flags & MAP_TYPE) != MAP_SHARED) |
@@ -1060,8 +1027,7 @@ static int validate_mmap_request(struct file *file, | |||
1060 | ) { | 1027 | ) { |
1061 | capabilities &= ~NOMMU_MAP_DIRECT; | 1028 | capabilities &= ~NOMMU_MAP_DIRECT; |
1062 | if (flags & MAP_SHARED) { | 1029 | if (flags & MAP_SHARED) { |
1063 | printk(KERN_WARNING | 1030 | pr_warn("MAP_SHARED not completely supported on !MMU\n"); |
1064 | "MAP_SHARED not completely supported on !MMU\n"); | ||
1065 | return -EINVAL; | 1031 | return -EINVAL; |
1066 | } | 1032 | } |
1067 | } | 1033 | } |
@@ -1205,16 +1171,12 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1205 | * we're allocating is smaller than a page | 1171 | * we're allocating is smaller than a page |
1206 | */ | 1172 | */ |
1207 | order = get_order(len); | 1173 | order = get_order(len); |
1208 | kdebug("alloc order %d for %lx", order, len); | ||
1209 | |||
1210 | total = 1 << order; | 1174 | total = 1 << order; |
1211 | point = len >> PAGE_SHIFT; | 1175 | point = len >> PAGE_SHIFT; |
1212 | 1176 | ||
1213 | /* we don't want to allocate a power-of-2 sized page set */ | 1177 | /* we don't want to allocate a power-of-2 sized page set */ |
1214 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 1178 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) |
1215 | total = point; | 1179 | total = point; |
1216 | kdebug("try to alloc exact %lu pages", total); | ||
1217 | } | ||
1218 | 1180 | ||
1219 | base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); | 1181 | base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); |
1220 | if (!base) | 1182 | if (!base) |
@@ -1285,18 +1247,14 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1285 | unsigned long capabilities, vm_flags, result; | 1247 | unsigned long capabilities, vm_flags, result; |
1286 | int ret; | 1248 | int ret; |
1287 | 1249 | ||
1288 | kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); | ||
1289 | |||
1290 | *populate = 0; | 1250 | *populate = 0; |
1291 | 1251 | ||
1292 | /* decide whether we should attempt the mapping, and if so what sort of | 1252 | /* decide whether we should attempt the mapping, and if so what sort of |
1293 | * mapping */ | 1253 | * mapping */ |
1294 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 1254 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
1295 | &capabilities); | 1255 | &capabilities); |
1296 | if (ret < 0) { | 1256 | if (ret < 0) |
1297 | kleave(" = %d [val]", ret); | ||
1298 | return ret; | 1257 | return ret; |
1299 | } | ||
1300 | 1258 | ||
1301 | /* we ignore the address hint */ | 1259 | /* we ignore the address hint */ |
1302 | addr = 0; | 1260 | addr = 0; |
@@ -1383,11 +1341,9 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1383 | vma->vm_start = start; | 1341 | vma->vm_start = start; |
1384 | vma->vm_end = start + len; | 1342 | vma->vm_end = start + len; |
1385 | 1343 | ||
1386 | if (pregion->vm_flags & VM_MAPPED_COPY) { | 1344 | if (pregion->vm_flags & VM_MAPPED_COPY) |
1387 | kdebug("share copy"); | ||
1388 | vma->vm_flags |= VM_MAPPED_COPY; | 1345 | vma->vm_flags |= VM_MAPPED_COPY; |
1389 | } else { | 1346 | else { |
1390 | kdebug("share mmap"); | ||
1391 | ret = do_mmap_shared_file(vma); | 1347 | ret = do_mmap_shared_file(vma); |
1392 | if (ret < 0) { | 1348 | if (ret < 0) { |
1393 | vma->vm_region = NULL; | 1349 | vma->vm_region = NULL; |
@@ -1467,7 +1423,6 @@ share: | |||
1467 | 1423 | ||
1468 | up_write(&nommu_region_sem); | 1424 | up_write(&nommu_region_sem); |
1469 | 1425 | ||
1470 | kleave(" = %lx", result); | ||
1471 | return result; | 1426 | return result; |
1472 | 1427 | ||
1473 | error_just_free: | 1428 | error_just_free: |
@@ -1479,27 +1434,24 @@ error: | |||
1479 | if (vma->vm_file) | 1434 | if (vma->vm_file) |
1480 | fput(vma->vm_file); | 1435 | fput(vma->vm_file); |
1481 | kmem_cache_free(vm_area_cachep, vma); | 1436 | kmem_cache_free(vm_area_cachep, vma); |
1482 | kleave(" = %d", ret); | ||
1483 | return ret; | 1437 | return ret; |
1484 | 1438 | ||
1485 | sharing_violation: | 1439 | sharing_violation: |
1486 | up_write(&nommu_region_sem); | 1440 | up_write(&nommu_region_sem); |
1487 | printk(KERN_WARNING "Attempt to share mismatched mappings\n"); | 1441 | pr_warn("Attempt to share mismatched mappings\n"); |
1488 | ret = -EINVAL; | 1442 | ret = -EINVAL; |
1489 | goto error; | 1443 | goto error; |
1490 | 1444 | ||
1491 | error_getting_vma: | 1445 | error_getting_vma: |
1492 | kmem_cache_free(vm_region_jar, region); | 1446 | kmem_cache_free(vm_region_jar, region); |
1493 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | 1447 | pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", |
1494 | " from process %d failed\n", | 1448 | len, current->pid); |
1495 | len, current->pid); | ||
1496 | show_free_areas(0); | 1449 | show_free_areas(0); |
1497 | return -ENOMEM; | 1450 | return -ENOMEM; |
1498 | 1451 | ||
1499 | error_getting_region: | 1452 | error_getting_region: |
1500 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" | 1453 | pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", |
1501 | " from process %d failed\n", | 1454 | len, current->pid); |
1502 | len, current->pid); | ||
1503 | show_free_areas(0); | 1455 | show_free_areas(0); |
1504 | return -ENOMEM; | 1456 | return -ENOMEM; |
1505 | } | 1457 | } |
@@ -1563,8 +1515,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1563 | struct vm_region *region; | 1515 | struct vm_region *region; |
1564 | unsigned long npages; | 1516 | unsigned long npages; |
1565 | 1517 | ||
1566 | kenter(""); | ||
1567 | |||
1568 | /* we're only permitted to split anonymous regions (these should have | 1518 | /* we're only permitted to split anonymous regions (these should have |
1569 | * only a single usage on the region) */ | 1519 | * only a single usage on the region) */ |
1570 | if (vma->vm_file) | 1520 | if (vma->vm_file) |
@@ -1628,8 +1578,6 @@ static int shrink_vma(struct mm_struct *mm, | |||
1628 | { | 1578 | { |
1629 | struct vm_region *region; | 1579 | struct vm_region *region; |
1630 | 1580 | ||
1631 | kenter(""); | ||
1632 | |||
1633 | /* adjust the VMA's pointers, which may reposition it in the MM's tree | 1581 | /* adjust the VMA's pointers, which may reposition it in the MM's tree |
1634 | * and list */ | 1582 | * and list */ |
1635 | delete_vma_from_mm(vma); | 1583 | delete_vma_from_mm(vma); |
@@ -1669,8 +1617,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1669 | unsigned long end; | 1617 | unsigned long end; |
1670 | int ret; | 1618 | int ret; |
1671 | 1619 | ||
1672 | kenter(",%lx,%zx", start, len); | ||
1673 | |||
1674 | len = PAGE_ALIGN(len); | 1620 | len = PAGE_ALIGN(len); |
1675 | if (len == 0) | 1621 | if (len == 0) |
1676 | return -EINVAL; | 1622 | return -EINVAL; |
@@ -1682,11 +1628,9 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1682 | if (!vma) { | 1628 | if (!vma) { |
1683 | static int limit; | 1629 | static int limit; |
1684 | if (limit < 5) { | 1630 | if (limit < 5) { |
1685 | printk(KERN_WARNING | 1631 | pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", |
1686 | "munmap of memory not mmapped by process %d" | 1632 | current->pid, current->comm, |
1687 | " (%s): 0x%lx-0x%lx\n", | 1633 | start, start + len - 1); |
1688 | current->pid, current->comm, | ||
1689 | start, start + len - 1); | ||
1690 | limit++; | 1634 | limit++; |
1691 | } | 1635 | } |
1692 | return -EINVAL; | 1636 | return -EINVAL; |
@@ -1695,38 +1639,27 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1695 | /* we're allowed to split an anonymous VMA but not a file-backed one */ | 1639 | /* we're allowed to split an anonymous VMA but not a file-backed one */ |
1696 | if (vma->vm_file) { | 1640 | if (vma->vm_file) { |
1697 | do { | 1641 | do { |
1698 | if (start > vma->vm_start) { | 1642 | if (start > vma->vm_start) |
1699 | kleave(" = -EINVAL [miss]"); | ||
1700 | return -EINVAL; | 1643 | return -EINVAL; |
1701 | } | ||
1702 | if (end == vma->vm_end) | 1644 | if (end == vma->vm_end) |
1703 | goto erase_whole_vma; | 1645 | goto erase_whole_vma; |
1704 | vma = vma->vm_next; | 1646 | vma = vma->vm_next; |
1705 | } while (vma); | 1647 | } while (vma); |
1706 | kleave(" = -EINVAL [split file]"); | ||
1707 | return -EINVAL; | 1648 | return -EINVAL; |
1708 | } else { | 1649 | } else { |
1709 | /* the chunk must be a subset of the VMA found */ | 1650 | /* the chunk must be a subset of the VMA found */ |
1710 | if (start == vma->vm_start && end == vma->vm_end) | 1651 | if (start == vma->vm_start && end == vma->vm_end) |
1711 | goto erase_whole_vma; | 1652 | goto erase_whole_vma; |
1712 | if (start < vma->vm_start || end > vma->vm_end) { | 1653 | if (start < vma->vm_start || end > vma->vm_end) |
1713 | kleave(" = -EINVAL [superset]"); | ||
1714 | return -EINVAL; | 1654 | return -EINVAL; |
1715 | } | 1655 | if (start & ~PAGE_MASK) |
1716 | if (start & ~PAGE_MASK) { | ||
1717 | kleave(" = -EINVAL [unaligned start]"); | ||
1718 | return -EINVAL; | 1656 | return -EINVAL; |
1719 | } | 1657 | if (end != vma->vm_end && end & ~PAGE_MASK) |
1720 | if (end != vma->vm_end && end & ~PAGE_MASK) { | ||
1721 | kleave(" = -EINVAL [unaligned split]"); | ||
1722 | return -EINVAL; | 1658 | return -EINVAL; |
1723 | } | ||
1724 | if (start != vma->vm_start && end != vma->vm_end) { | 1659 | if (start != vma->vm_start && end != vma->vm_end) { |
1725 | ret = split_vma(mm, vma, start, 1); | 1660 | ret = split_vma(mm, vma, start, 1); |
1726 | if (ret < 0) { | 1661 | if (ret < 0) |
1727 | kleave(" = %d [split]", ret); | ||
1728 | return ret; | 1662 | return ret; |
1729 | } | ||
1730 | } | 1663 | } |
1731 | return shrink_vma(mm, vma, start, end); | 1664 | return shrink_vma(mm, vma, start, end); |
1732 | } | 1665 | } |
@@ -1734,7 +1667,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1734 | erase_whole_vma: | 1667 | erase_whole_vma: |
1735 | delete_vma_from_mm(vma); | 1668 | delete_vma_from_mm(vma); |
1736 | delete_vma(mm, vma); | 1669 | delete_vma(mm, vma); |
1737 | kleave(" = 0"); | ||
1738 | return 0; | 1670 | return 0; |
1739 | } | 1671 | } |
1740 | EXPORT_SYMBOL(do_munmap); | 1672 | EXPORT_SYMBOL(do_munmap); |
@@ -1766,8 +1698,6 @@ void exit_mmap(struct mm_struct *mm) | |||
1766 | if (!mm) | 1698 | if (!mm) |
1767 | return; | 1699 | return; |
1768 | 1700 | ||
1769 | kenter(""); | ||
1770 | |||
1771 | mm->total_vm = 0; | 1701 | mm->total_vm = 0; |
1772 | 1702 | ||
1773 | while ((vma = mm->mmap)) { | 1703 | while ((vma = mm->mmap)) { |
@@ -1776,8 +1706,6 @@ void exit_mmap(struct mm_struct *mm) | |||
1776 | delete_vma(mm, vma); | 1706 | delete_vma(mm, vma); |
1777 | cond_resched(); | 1707 | cond_resched(); |
1778 | } | 1708 | } |
1779 | |||
1780 | kleave(""); | ||
1781 | } | 1709 | } |
1782 | 1710 | ||
1783 | unsigned long vm_brk(unsigned long addr, unsigned long len) | 1711 | unsigned long vm_brk(unsigned long addr, unsigned long len) |