diff options
| -rw-r--r-- | MAINTAINERS | 4 | ||||
| -rw-r--r-- | drivers/char/agp/frontend.c | 1 | ||||
| -rw-r--r-- | drivers/rtc/rtc-pcf8523.c | 4 | ||||
| -rw-r--r-- | fs/affs/super.c | 2 | ||||
| -rw-r--r-- | fs/autofs4/root.c | 4 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 5 | ||||
| -rw-r--r-- | fs/notify/fanotify/fanotify_user.c | 2 | ||||
| -rw-r--r-- | include/linux/hugetlb.h | 10 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 9 | ||||
| -rw-r--r-- | mm/compaction.c | 22 | ||||
| -rw-r--r-- | mm/filemap.c | 49 | ||||
| -rw-r--r-- | mm/hugetlb.c | 19 | ||||
| -rw-r--r-- | mm/memcontrol.c | 20 | ||||
| -rw-r--r-- | mm/page-writeback.c | 6 | ||||
| -rw-r--r-- | mm/slab.h | 1 | ||||
| -rw-r--r-- | mm/slab_common.c | 13 | ||||
| -rw-r--r-- | mm/slub.c | 41 | ||||
| -rw-r--r-- | mm/truncate.c | 8 | ||||
| -rw-r--r-- | mm/vmscan.c | 18 |
19 files changed, 147 insertions, 91 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 7578deb8ff20..51ebb779c5f3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -9960,7 +9960,7 @@ F: drivers/net/hamradio/*scc.c | |||
| 9960 | F: drivers/net/hamradio/z8530.h | 9960 | F: drivers/net/hamradio/z8530.h |
| 9961 | 9961 | ||
| 9962 | ZBUD COMPRESSED PAGE ALLOCATOR | 9962 | ZBUD COMPRESSED PAGE ALLOCATOR |
| 9963 | M: Seth Jennings <sjenning@linux.vnet.ibm.com> | 9963 | M: Seth Jennings <sjennings@variantweb.net> |
| 9964 | L: linux-mm@kvack.org | 9964 | L: linux-mm@kvack.org |
| 9965 | S: Maintained | 9965 | S: Maintained |
| 9966 | F: mm/zbud.c | 9966 | F: mm/zbud.c |
| @@ -10005,7 +10005,7 @@ F: mm/zsmalloc.c | |||
| 10005 | F: include/linux/zsmalloc.h | 10005 | F: include/linux/zsmalloc.h |
| 10006 | 10006 | ||
| 10007 | ZSWAP COMPRESSED SWAP CACHING | 10007 | ZSWAP COMPRESSED SWAP CACHING |
| 10008 | M: Seth Jennings <sjenning@linux.vnet.ibm.com> | 10008 | M: Seth Jennings <sjennings@variantweb.net> |
| 10009 | L: linux-mm@kvack.org | 10009 | L: linux-mm@kvack.org |
| 10010 | S: Maintained | 10010 | S: Maintained |
| 10011 | F: mm/zswap.c | 10011 | F: mm/zswap.c |
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 8121b4c70ede..b29703324e94 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c | |||
| @@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) | |||
| 730 | 730 | ||
| 731 | agp_copy_info(agp_bridge, &kerninfo); | 731 | agp_copy_info(agp_bridge, &kerninfo); |
| 732 | 732 | ||
| 733 | memset(&userinfo, 0, sizeof(userinfo)); | ||
| 733 | userinfo.version.major = kerninfo.version.major; | 734 | userinfo.version.major = kerninfo.version.major; |
| 734 | userinfo.version.minor = kerninfo.version.minor; | 735 | userinfo.version.minor = kerninfo.version.minor; |
| 735 | userinfo.bridge_id = kerninfo.device->vendor | | 736 | userinfo.bridge_id = kerninfo.device->vendor | |
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 5c8f8226c848..4cdb64be061b 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c | |||
| @@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
| 206 | tm->tm_hour = bcd2bin(regs[2] & 0x3f); | 206 | tm->tm_hour = bcd2bin(regs[2] & 0x3f); |
| 207 | tm->tm_mday = bcd2bin(regs[3] & 0x3f); | 207 | tm->tm_mday = bcd2bin(regs[3] & 0x3f); |
| 208 | tm->tm_wday = regs[4] & 0x7; | 208 | tm->tm_wday = regs[4] & 0x7; |
| 209 | tm->tm_mon = bcd2bin(regs[5] & 0x1f); | 209 | tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1; |
| 210 | tm->tm_year = bcd2bin(regs[6]) + 100; | 210 | tm->tm_year = bcd2bin(regs[6]) + 100; |
| 211 | 211 | ||
| 212 | return rtc_valid_tm(tm); | 212 | return rtc_valid_tm(tm); |
| @@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 229 | regs[3] = bin2bcd(tm->tm_hour); | 229 | regs[3] = bin2bcd(tm->tm_hour); |
| 230 | regs[4] = bin2bcd(tm->tm_mday); | 230 | regs[4] = bin2bcd(tm->tm_mday); |
| 231 | regs[5] = tm->tm_wday; | 231 | regs[5] = tm->tm_wday; |
| 232 | regs[6] = bin2bcd(tm->tm_mon); | 232 | regs[6] = bin2bcd(tm->tm_mon + 1); |
| 233 | regs[7] = bin2bcd(tm->tm_year - 100); | 233 | regs[7] = bin2bcd(tm->tm_year - 100); |
| 234 | 234 | ||
| 235 | msg.addr = client->addr; | 235 | msg.addr = client->addr; |
diff --git a/fs/affs/super.c b/fs/affs/super.c index 6d589f28bf9b..895ac7dc9dbf 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c | |||
| @@ -340,8 +340,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 340 | &blocksize,&sbi->s_prefix, | 340 | &blocksize,&sbi->s_prefix, |
| 341 | sbi->s_volume, &mount_flags)) { | 341 | sbi->s_volume, &mount_flags)) { |
| 342 | printk(KERN_ERR "AFFS: Error parsing options\n"); | 342 | printk(KERN_ERR "AFFS: Error parsing options\n"); |
| 343 | kfree(sbi->s_prefix); | ||
| 344 | kfree(sbi); | ||
| 345 | return -EINVAL; | 343 | return -EINVAL; |
| 346 | } | 344 | } |
| 347 | /* N.B. after this point s_prefix must be released */ | 345 | /* N.B. after this point s_prefix must be released */ |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 2caf36ac3e93..cc87c1abac97 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
| @@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry) | |||
| 179 | spin_lock(&active->d_lock); | 179 | spin_lock(&active->d_lock); |
| 180 | 180 | ||
| 181 | /* Already gone? */ | 181 | /* Already gone? */ |
| 182 | if (!d_count(active)) | 182 | if ((int) d_count(active) <= 0) |
| 183 | goto next; | 183 | goto next; |
| 184 | 184 | ||
| 185 | qstr = &active->d_name; | 185 | qstr = &active->d_name; |
| @@ -230,7 +230,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry) | |||
| 230 | 230 | ||
| 231 | spin_lock(&expiring->d_lock); | 231 | spin_lock(&expiring->d_lock); |
| 232 | 232 | ||
| 233 | /* Bad luck, we've already been dentry_iput */ | 233 | /* We've already been dentry_iput or unlinked */ |
| 234 | if (!expiring->d_inode) | 234 | if (!expiring->d_inode) |
| 235 | goto next; | 235 | goto next; |
| 236 | 236 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 204027520937..e19d4c0cacae 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -1030,6 +1030,11 @@ static int __init init_hugetlbfs_fs(void) | |||
| 1030 | int error; | 1030 | int error; |
| 1031 | int i; | 1031 | int i; |
| 1032 | 1032 | ||
| 1033 | if (!hugepages_supported()) { | ||
| 1034 | pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n"); | ||
| 1035 | return -ENOTSUPP; | ||
| 1036 | } | ||
| 1037 | |||
| 1033 | error = bdi_init(&hugetlbfs_backing_dev_info); | 1038 | error = bdi_init(&hugetlbfs_backing_dev_info); |
| 1034 | if (error) | 1039 | if (error) |
| 1035 | return error; | 1040 | return error; |
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 4e565c814309..732648b270dc 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c | |||
| @@ -698,6 +698,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) | |||
| 698 | } | 698 | } |
| 699 | group->overflow_event = &oevent->fse; | 699 | group->overflow_event = &oevent->fse; |
| 700 | 700 | ||
| 701 | if (force_o_largefile()) | ||
| 702 | event_f_flags |= O_LARGEFILE; | ||
| 701 | group->fanotify_data.f_flags = event_f_flags; | 703 | group->fanotify_data.f_flags = event_f_flags; |
| 702 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 704 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 703 | spin_lock_init(&group->fanotify_data.access_lock); | 705 | spin_lock_init(&group->fanotify_data.access_lock); |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5b337cf8fb86..b65166de1d9d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -412,6 +412,16 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | |||
| 412 | return &mm->page_table_lock; | 412 | return &mm->page_table_lock; |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | static inline bool hugepages_supported(void) | ||
| 416 | { | ||
| 417 | /* | ||
| 418 | * Some platform decide whether they support huge pages at boot | ||
| 419 | * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when | ||
| 420 | * there is no such support | ||
| 421 | */ | ||
| 422 | return HPAGE_SHIFT != 0; | ||
| 423 | } | ||
| 424 | |||
| 415 | #else /* CONFIG_HUGETLB_PAGE */ | 425 | #else /* CONFIG_HUGETLB_PAGE */ |
| 416 | struct hstate {}; | 426 | struct hstate {}; |
| 417 | #define alloc_huge_page_node(h, nid) NULL | 427 | #define alloc_huge_page_node(h, nid) NULL |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f2f7398848cf..d82abd40a3c0 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -101,4 +101,13 @@ struct kmem_cache { | |||
| 101 | struct kmem_cache_node *node[MAX_NUMNODES]; | 101 | struct kmem_cache_node *node[MAX_NUMNODES]; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | #ifdef CONFIG_SYSFS | ||
| 105 | #define SLAB_SUPPORTS_SYSFS | ||
| 106 | void sysfs_slab_remove(struct kmem_cache *); | ||
| 107 | #else | ||
| 108 | static inline void sysfs_slab_remove(struct kmem_cache *s) | ||
| 109 | { | ||
| 110 | } | ||
| 111 | #endif | ||
| 112 | |||
| 104 | #endif /* _LINUX_SLUB_DEF_H */ | 113 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/mm/compaction.c b/mm/compaction.c index 37f976287068..627dc2e4320f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -671,16 +671,20 @@ static void isolate_freepages(struct zone *zone, | |||
| 671 | struct compact_control *cc) | 671 | struct compact_control *cc) |
| 672 | { | 672 | { |
| 673 | struct page *page; | 673 | struct page *page; |
| 674 | unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn; | 674 | unsigned long high_pfn, low_pfn, pfn, z_end_pfn; |
| 675 | int nr_freepages = cc->nr_freepages; | 675 | int nr_freepages = cc->nr_freepages; |
| 676 | struct list_head *freelist = &cc->freepages; | 676 | struct list_head *freelist = &cc->freepages; |
| 677 | 677 | ||
| 678 | /* | 678 | /* |
| 679 | * Initialise the free scanner. The starting point is where we last | 679 | * Initialise the free scanner. The starting point is where we last |
| 680 | * scanned from (or the end of the zone if starting). The low point | 680 | * successfully isolated from, zone-cached value, or the end of the |
| 681 | * is the end of the pageblock the migration scanner is using. | 681 | * zone when isolating for the first time. We need this aligned to |
| 682 | * the pageblock boundary, because we do pfn -= pageblock_nr_pages | ||
| 683 | * in the for loop. | ||
| 684 | * The low boundary is the end of the pageblock the migration scanner | ||
| 685 | * is using. | ||
| 682 | */ | 686 | */ |
| 683 | pfn = cc->free_pfn; | 687 | pfn = cc->free_pfn & ~(pageblock_nr_pages-1); |
| 684 | low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); | 688 | low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); |
| 685 | 689 | ||
| 686 | /* | 690 | /* |
| @@ -700,6 +704,7 @@ static void isolate_freepages(struct zone *zone, | |||
| 700 | for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; | 704 | for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; |
| 701 | pfn -= pageblock_nr_pages) { | 705 | pfn -= pageblock_nr_pages) { |
| 702 | unsigned long isolated; | 706 | unsigned long isolated; |
| 707 | unsigned long end_pfn; | ||
| 703 | 708 | ||
| 704 | /* | 709 | /* |
| 705 | * This can iterate a massively long zone without finding any | 710 | * This can iterate a massively long zone without finding any |
| @@ -734,13 +739,10 @@ static void isolate_freepages(struct zone *zone, | |||
| 734 | isolated = 0; | 739 | isolated = 0; |
| 735 | 740 | ||
| 736 | /* | 741 | /* |
| 737 | * As pfn may not start aligned, pfn+pageblock_nr_page | 742 | * Take care when isolating in last pageblock of a zone which |
| 738 | * may cross a MAX_ORDER_NR_PAGES boundary and miss | 743 | * ends in the middle of a pageblock. |
| 739 | * a pfn_valid check. Ensure isolate_freepages_block() | ||
| 740 | * only scans within a pageblock | ||
| 741 | */ | 744 | */ |
| 742 | end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | 745 | end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn); |
| 743 | end_pfn = min(end_pfn, z_end_pfn); | ||
| 744 | isolated = isolate_freepages_block(cc, pfn, end_pfn, | 746 | isolated = isolate_freepages_block(cc, pfn, end_pfn, |
| 745 | freelist, false); | 747 | freelist, false); |
| 746 | nr_freepages += isolated; | 748 | nr_freepages += isolated; |
diff --git a/mm/filemap.c b/mm/filemap.c index 5020b280a771..000a220e2a41 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -906,8 +906,8 @@ EXPORT_SYMBOL(page_cache_prev_hole); | |||
| 906 | * Looks up the page cache slot at @mapping & @offset. If there is a | 906 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 907 | * page cache page, it is returned with an increased refcount. | 907 | * page cache page, it is returned with an increased refcount. |
| 908 | * | 908 | * |
| 909 | * If the slot holds a shadow entry of a previously evicted page, it | 909 | * If the slot holds a shadow entry of a previously evicted page, or a |
| 910 | * is returned. | 910 | * swap entry from shmem/tmpfs, it is returned. |
| 911 | * | 911 | * |
| 912 | * Otherwise, %NULL is returned. | 912 | * Otherwise, %NULL is returned. |
| 913 | */ | 913 | */ |
| @@ -928,9 +928,9 @@ repeat: | |||
| 928 | if (radix_tree_deref_retry(page)) | 928 | if (radix_tree_deref_retry(page)) |
| 929 | goto repeat; | 929 | goto repeat; |
| 930 | /* | 930 | /* |
| 931 | * Otherwise, shmem/tmpfs must be storing a swap entry | 931 | * A shadow entry of a recently evicted page, |
| 932 | * here as an exceptional entry: so return it without | 932 | * or a swap entry from shmem/tmpfs. Return |
| 933 | * attempting to raise page count. | 933 | * it without attempting to raise page count. |
| 934 | */ | 934 | */ |
| 935 | goto out; | 935 | goto out; |
| 936 | } | 936 | } |
| @@ -983,8 +983,8 @@ EXPORT_SYMBOL(find_get_page); | |||
| 983 | * page cache page, it is returned locked and with an increased | 983 | * page cache page, it is returned locked and with an increased |
| 984 | * refcount. | 984 | * refcount. |
| 985 | * | 985 | * |
| 986 | * If the slot holds a shadow entry of a previously evicted page, it | 986 | * If the slot holds a shadow entry of a previously evicted page, or a |
| 987 | * is returned. | 987 | * swap entry from shmem/tmpfs, it is returned. |
| 988 | * | 988 | * |
| 989 | * Otherwise, %NULL is returned. | 989 | * Otherwise, %NULL is returned. |
| 990 | * | 990 | * |
| @@ -1099,8 +1099,8 @@ EXPORT_SYMBOL(find_or_create_page); | |||
| 1099 | * with ascending indexes. There may be holes in the indices due to | 1099 | * with ascending indexes. There may be holes in the indices due to |
| 1100 | * not-present pages. | 1100 | * not-present pages. |
| 1101 | * | 1101 | * |
| 1102 | * Any shadow entries of evicted pages are included in the returned | 1102 | * Any shadow entries of evicted pages, or swap entries from |
| 1103 | * array. | 1103 | * shmem/tmpfs, are included in the returned array. |
| 1104 | * | 1104 | * |
| 1105 | * find_get_entries() returns the number of pages and shadow entries | 1105 | * find_get_entries() returns the number of pages and shadow entries |
| 1106 | * which were found. | 1106 | * which were found. |
| @@ -1128,9 +1128,9 @@ repeat: | |||
| 1128 | if (radix_tree_deref_retry(page)) | 1128 | if (radix_tree_deref_retry(page)) |
| 1129 | goto restart; | 1129 | goto restart; |
| 1130 | /* | 1130 | /* |
| 1131 | * Otherwise, we must be storing a swap entry | 1131 | * A shadow entry of a recently evicted page, |
| 1132 | * here as an exceptional entry: so return it | 1132 | * or a swap entry from shmem/tmpfs. Return |
| 1133 | * without attempting to raise page count. | 1133 | * it without attempting to raise page count. |
| 1134 | */ | 1134 | */ |
| 1135 | goto export; | 1135 | goto export; |
| 1136 | } | 1136 | } |
| @@ -1198,9 +1198,9 @@ repeat: | |||
| 1198 | goto restart; | 1198 | goto restart; |
| 1199 | } | 1199 | } |
| 1200 | /* | 1200 | /* |
| 1201 | * Otherwise, shmem/tmpfs must be storing a swap entry | 1201 | * A shadow entry of a recently evicted page, |
| 1202 | * here as an exceptional entry: so skip over it - | 1202 | * or a swap entry from shmem/tmpfs. Skip |
| 1203 | * we only reach this from invalidate_mapping_pages(). | 1203 | * over it. |
| 1204 | */ | 1204 | */ |
| 1205 | continue; | 1205 | continue; |
| 1206 | } | 1206 | } |
| @@ -1265,9 +1265,9 @@ repeat: | |||
| 1265 | goto restart; | 1265 | goto restart; |
| 1266 | } | 1266 | } |
| 1267 | /* | 1267 | /* |
| 1268 | * Otherwise, shmem/tmpfs must be storing a swap entry | 1268 | * A shadow entry of a recently evicted page, |
| 1269 | * here as an exceptional entry: so stop looking for | 1269 | * or a swap entry from shmem/tmpfs. Stop |
| 1270 | * contiguous pages. | 1270 | * looking for contiguous pages. |
| 1271 | */ | 1271 | */ |
| 1272 | break; | 1272 | break; |
| 1273 | } | 1273 | } |
| @@ -1341,10 +1341,17 @@ repeat: | |||
| 1341 | goto restart; | 1341 | goto restart; |
| 1342 | } | 1342 | } |
| 1343 | /* | 1343 | /* |
| 1344 | * This function is never used on a shmem/tmpfs | 1344 | * A shadow entry of a recently evicted page. |
| 1345 | * mapping, so a swap entry won't be found here. | 1345 | * |
| 1346 | * Those entries should never be tagged, but | ||
| 1347 | * this tree walk is lockless and the tags are | ||
| 1348 | * looked up in bulk, one radix tree node at a | ||
| 1349 | * time, so there is a sizable window for page | ||
| 1350 | * reclaim to evict a page we saw tagged. | ||
| 1351 | * | ||
| 1352 | * Skip over it. | ||
| 1346 | */ | 1353 | */ |
| 1347 | BUG(); | 1354 | continue; |
| 1348 | } | 1355 | } |
| 1349 | 1356 | ||
| 1350 | if (!page_cache_get_speculative(page)) | 1357 | if (!page_cache_get_speculative(page)) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 246192929a2d..c82290b9c1fc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -1981,11 +1981,7 @@ static int __init hugetlb_init(void) | |||
| 1981 | { | 1981 | { |
| 1982 | int i; | 1982 | int i; |
| 1983 | 1983 | ||
| 1984 | /* Some platform decide whether they support huge pages at boot | 1984 | if (!hugepages_supported()) |
| 1985 | * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when | ||
| 1986 | * there is no such support | ||
| 1987 | */ | ||
| 1988 | if (HPAGE_SHIFT == 0) | ||
| 1989 | return 0; | 1985 | return 0; |
| 1990 | 1986 | ||
| 1991 | if (!size_to_hstate(default_hstate_size)) { | 1987 | if (!size_to_hstate(default_hstate_size)) { |
| @@ -2112,6 +2108,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, | |||
| 2112 | unsigned long tmp; | 2108 | unsigned long tmp; |
| 2113 | int ret; | 2109 | int ret; |
| 2114 | 2110 | ||
| 2111 | if (!hugepages_supported()) | ||
| 2112 | return -ENOTSUPP; | ||
| 2113 | |||
| 2115 | tmp = h->max_huge_pages; | 2114 | tmp = h->max_huge_pages; |
| 2116 | 2115 | ||
| 2117 | if (write && h->order >= MAX_ORDER) | 2116 | if (write && h->order >= MAX_ORDER) |
| @@ -2165,6 +2164,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, | |||
| 2165 | unsigned long tmp; | 2164 | unsigned long tmp; |
| 2166 | int ret; | 2165 | int ret; |
| 2167 | 2166 | ||
| 2167 | if (!hugepages_supported()) | ||
| 2168 | return -ENOTSUPP; | ||
| 2169 | |||
| 2168 | tmp = h->nr_overcommit_huge_pages; | 2170 | tmp = h->nr_overcommit_huge_pages; |
| 2169 | 2171 | ||
| 2170 | if (write && h->order >= MAX_ORDER) | 2172 | if (write && h->order >= MAX_ORDER) |
| @@ -2190,6 +2192,8 @@ out: | |||
| 2190 | void hugetlb_report_meminfo(struct seq_file *m) | 2192 | void hugetlb_report_meminfo(struct seq_file *m) |
| 2191 | { | 2193 | { |
| 2192 | struct hstate *h = &default_hstate; | 2194 | struct hstate *h = &default_hstate; |
| 2195 | if (!hugepages_supported()) | ||
| 2196 | return; | ||
| 2193 | seq_printf(m, | 2197 | seq_printf(m, |
| 2194 | "HugePages_Total: %5lu\n" | 2198 | "HugePages_Total: %5lu\n" |
| 2195 | "HugePages_Free: %5lu\n" | 2199 | "HugePages_Free: %5lu\n" |
| @@ -2206,6 +2210,8 @@ void hugetlb_report_meminfo(struct seq_file *m) | |||
| 2206 | int hugetlb_report_node_meminfo(int nid, char *buf) | 2210 | int hugetlb_report_node_meminfo(int nid, char *buf) |
| 2207 | { | 2211 | { |
| 2208 | struct hstate *h = &default_hstate; | 2212 | struct hstate *h = &default_hstate; |
| 2213 | if (!hugepages_supported()) | ||
| 2214 | return 0; | ||
| 2209 | return sprintf(buf, | 2215 | return sprintf(buf, |
| 2210 | "Node %d HugePages_Total: %5u\n" | 2216 | "Node %d HugePages_Total: %5u\n" |
| 2211 | "Node %d HugePages_Free: %5u\n" | 2217 | "Node %d HugePages_Free: %5u\n" |
| @@ -2220,6 +2226,9 @@ void hugetlb_show_meminfo(void) | |||
| 2220 | struct hstate *h; | 2226 | struct hstate *h; |
| 2221 | int nid; | 2227 | int nid; |
| 2222 | 2228 | ||
| 2229 | if (!hugepages_supported()) | ||
| 2230 | return; | ||
| 2231 | |||
| 2223 | for_each_node_state(nid, N_MEMORY) | 2232 | for_each_node_state(nid, N_MEMORY) |
| 2224 | for_each_hstate(h) | 2233 | for_each_hstate(h) |
| 2225 | pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", | 2234 | pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 29501f040568..c47dffdcb246 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -6686,16 +6686,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, | |||
| 6686 | pgoff = pte_to_pgoff(ptent); | 6686 | pgoff = pte_to_pgoff(ptent); |
| 6687 | 6687 | ||
| 6688 | /* page is moved even if it's not RSS of this task(page-faulted). */ | 6688 | /* page is moved even if it's not RSS of this task(page-faulted). */ |
| 6689 | page = find_get_page(mapping, pgoff); | ||
| 6690 | |||
| 6691 | #ifdef CONFIG_SWAP | 6689 | #ifdef CONFIG_SWAP |
| 6692 | /* shmem/tmpfs may report page out on swap: account for that too. */ | 6690 | /* shmem/tmpfs may report page out on swap: account for that too. */ |
| 6693 | if (radix_tree_exceptional_entry(page)) { | 6691 | if (shmem_mapping(mapping)) { |
| 6694 | swp_entry_t swap = radix_to_swp_entry(page); | 6692 | page = find_get_entry(mapping, pgoff); |
| 6695 | if (do_swap_account) | 6693 | if (radix_tree_exceptional_entry(page)) { |
| 6696 | *entry = swap; | 6694 | swp_entry_t swp = radix_to_swp_entry(page); |
| 6697 | page = find_get_page(swap_address_space(swap), swap.val); | 6695 | if (do_swap_account) |
| 6698 | } | 6696 | *entry = swp; |
| 6697 | page = find_get_page(swap_address_space(swp), swp.val); | ||
| 6698 | } | ||
| 6699 | } else | ||
| 6700 | page = find_get_page(mapping, pgoff); | ||
| 6701 | #else | ||
| 6702 | page = find_get_page(mapping, pgoff); | ||
| 6699 | #endif | 6703 | #endif |
| 6700 | return page; | 6704 | return page; |
| 6701 | } | 6705 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ef413492a149..a4317da60532 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -593,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) | |||
| 593 | * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) | 593 | * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) |
| 594 | * => fast response on large errors; small oscillation near setpoint | 594 | * => fast response on large errors; small oscillation near setpoint |
| 595 | */ | 595 | */ |
| 596 | static inline long long pos_ratio_polynom(unsigned long setpoint, | 596 | static long long pos_ratio_polynom(unsigned long setpoint, |
| 597 | unsigned long dirty, | 597 | unsigned long dirty, |
| 598 | unsigned long limit) | 598 | unsigned long limit) |
| 599 | { | 599 | { |
| 600 | long long pos_ratio; | 600 | long long pos_ratio; |
| 601 | long x; | 601 | long x; |
| 602 | 602 | ||
| 603 | x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, | 603 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, |
| 604 | limit - setpoint + 1); | 604 | limit - setpoint + 1); |
| 605 | pos_ratio = x; | 605 | pos_ratio = x; |
| 606 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 606 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; |
| @@ -842,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
| 842 | x_intercept = bdi_setpoint + span; | 842 | x_intercept = bdi_setpoint + span; |
| 843 | 843 | ||
| 844 | if (bdi_dirty < x_intercept - span / 4) { | 844 | if (bdi_dirty < x_intercept - span / 4) { |
| 845 | pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), | 845 | pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), |
| 846 | x_intercept - bdi_setpoint + 1); | 846 | x_intercept - bdi_setpoint + 1); |
| 847 | } else | 847 | } else |
| 848 | pos_ratio /= 4; | 848 | pos_ratio /= 4; |
| @@ -91,6 +91,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, | |||
| 91 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | 91 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
| 92 | 92 | ||
| 93 | int __kmem_cache_shutdown(struct kmem_cache *); | 93 | int __kmem_cache_shutdown(struct kmem_cache *); |
| 94 | void slab_kmem_cache_release(struct kmem_cache *); | ||
| 94 | 95 | ||
| 95 | struct seq_file; | 96 | struct seq_file; |
| 96 | struct file; | 97 | struct file; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index f3cfccf76dda..102cc6fca3d3 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -323,6 +323,12 @@ static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |||
| 323 | } | 323 | } |
| 324 | #endif /* CONFIG_MEMCG_KMEM */ | 324 | #endif /* CONFIG_MEMCG_KMEM */ |
| 325 | 325 | ||
| 326 | void slab_kmem_cache_release(struct kmem_cache *s) | ||
| 327 | { | ||
| 328 | kfree(s->name); | ||
| 329 | kmem_cache_free(kmem_cache, s); | ||
| 330 | } | ||
| 331 | |||
| 326 | void kmem_cache_destroy(struct kmem_cache *s) | 332 | void kmem_cache_destroy(struct kmem_cache *s) |
| 327 | { | 333 | { |
| 328 | get_online_cpus(); | 334 | get_online_cpus(); |
| @@ -352,8 +358,11 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
| 352 | rcu_barrier(); | 358 | rcu_barrier(); |
| 353 | 359 | ||
| 354 | memcg_free_cache_params(s); | 360 | memcg_free_cache_params(s); |
| 355 | kfree(s->name); | 361 | #ifdef SLAB_SUPPORTS_SYSFS |
| 356 | kmem_cache_free(kmem_cache, s); | 362 | sysfs_slab_remove(s); |
| 363 | #else | ||
| 364 | slab_kmem_cache_release(s); | ||
| 365 | #endif | ||
| 357 | goto out_put_cpus; | 366 | goto out_put_cpus; |
| 358 | 367 | ||
| 359 | out_unlock: | 368 | out_unlock: |
| @@ -210,14 +210,11 @@ enum track_item { TRACK_ALLOC, TRACK_FREE }; | |||
| 210 | #ifdef CONFIG_SYSFS | 210 | #ifdef CONFIG_SYSFS |
| 211 | static int sysfs_slab_add(struct kmem_cache *); | 211 | static int sysfs_slab_add(struct kmem_cache *); |
| 212 | static int sysfs_slab_alias(struct kmem_cache *, const char *); | 212 | static int sysfs_slab_alias(struct kmem_cache *, const char *); |
| 213 | static void sysfs_slab_remove(struct kmem_cache *); | ||
| 214 | static void memcg_propagate_slab_attrs(struct kmem_cache *s); | 213 | static void memcg_propagate_slab_attrs(struct kmem_cache *s); |
| 215 | #else | 214 | #else |
| 216 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } | 215 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } |
| 217 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) | 216 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) |
| 218 | { return 0; } | 217 | { return 0; } |
| 219 | static inline void sysfs_slab_remove(struct kmem_cache *s) { } | ||
| 220 | |||
| 221 | static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } | 218 | static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } |
| 222 | #endif | 219 | #endif |
| 223 | 220 | ||
| @@ -3238,24 +3235,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
| 3238 | 3235 | ||
| 3239 | int __kmem_cache_shutdown(struct kmem_cache *s) | 3236 | int __kmem_cache_shutdown(struct kmem_cache *s) |
| 3240 | { | 3237 | { |
| 3241 | int rc = kmem_cache_close(s); | 3238 | return kmem_cache_close(s); |
| 3242 | |||
| 3243 | if (!rc) { | ||
| 3244 | /* | ||
| 3245 | * Since slab_attr_store may take the slab_mutex, we should | ||
| 3246 | * release the lock while removing the sysfs entry in order to | ||
| 3247 | * avoid a deadlock. Because this is pretty much the last | ||
| 3248 | * operation we do and the lock will be released shortly after | ||
| 3249 | * that in slab_common.c, we could just move sysfs_slab_remove | ||
| 3250 | * to a later point in common code. We should do that when we | ||
| 3251 | * have a common sysfs framework for all allocators. | ||
| 3252 | */ | ||
| 3253 | mutex_unlock(&slab_mutex); | ||
| 3254 | sysfs_slab_remove(s); | ||
| 3255 | mutex_lock(&slab_mutex); | ||
| 3256 | } | ||
| 3257 | |||
| 3258 | return rc; | ||
| 3259 | } | 3239 | } |
| 3260 | 3240 | ||
| 3261 | /******************************************************************** | 3241 | /******************************************************************** |
| @@ -5071,15 +5051,18 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
| 5071 | #ifdef CONFIG_MEMCG_KMEM | 5051 | #ifdef CONFIG_MEMCG_KMEM |
| 5072 | int i; | 5052 | int i; |
| 5073 | char *buffer = NULL; | 5053 | char *buffer = NULL; |
| 5054 | struct kmem_cache *root_cache; | ||
| 5074 | 5055 | ||
| 5075 | if (!is_root_cache(s)) | 5056 | if (is_root_cache(s)) |
| 5076 | return; | 5057 | return; |
| 5077 | 5058 | ||
| 5059 | root_cache = s->memcg_params->root_cache; | ||
| 5060 | |||
| 5078 | /* | 5061 | /* |
| 5079 | * This mean this cache had no attribute written. Therefore, no point | 5062 | * This mean this cache had no attribute written. Therefore, no point |
| 5080 | * in copying default values around | 5063 | * in copying default values around |
| 5081 | */ | 5064 | */ |
| 5082 | if (!s->max_attr_size) | 5065 | if (!root_cache->max_attr_size) |
| 5083 | return; | 5066 | return; |
| 5084 | 5067 | ||
| 5085 | for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { | 5068 | for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { |
| @@ -5101,7 +5084,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
| 5101 | */ | 5084 | */ |
| 5102 | if (buffer) | 5085 | if (buffer) |
| 5103 | buf = buffer; | 5086 | buf = buffer; |
| 5104 | else if (s->max_attr_size < ARRAY_SIZE(mbuf)) | 5087 | else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) |
| 5105 | buf = mbuf; | 5088 | buf = mbuf; |
| 5106 | else { | 5089 | else { |
| 5107 | buffer = (char *) get_zeroed_page(GFP_KERNEL); | 5090 | buffer = (char *) get_zeroed_page(GFP_KERNEL); |
| @@ -5110,7 +5093,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
| 5110 | buf = buffer; | 5093 | buf = buffer; |
| 5111 | } | 5094 | } |
| 5112 | 5095 | ||
| 5113 | attr->show(s->memcg_params->root_cache, buf); | 5096 | attr->show(root_cache, buf); |
| 5114 | attr->store(s, buf, strlen(buf)); | 5097 | attr->store(s, buf, strlen(buf)); |
| 5115 | } | 5098 | } |
| 5116 | 5099 | ||
| @@ -5119,6 +5102,11 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
| 5119 | #endif | 5102 | #endif |
| 5120 | } | 5103 | } |
| 5121 | 5104 | ||
| 5105 | static void kmem_cache_release(struct kobject *k) | ||
| 5106 | { | ||
| 5107 | slab_kmem_cache_release(to_slab(k)); | ||
| 5108 | } | ||
| 5109 | |||
| 5122 | static const struct sysfs_ops slab_sysfs_ops = { | 5110 | static const struct sysfs_ops slab_sysfs_ops = { |
| 5123 | .show = slab_attr_show, | 5111 | .show = slab_attr_show, |
| 5124 | .store = slab_attr_store, | 5112 | .store = slab_attr_store, |
| @@ -5126,6 +5114,7 @@ static const struct sysfs_ops slab_sysfs_ops = { | |||
| 5126 | 5114 | ||
| 5127 | static struct kobj_type slab_ktype = { | 5115 | static struct kobj_type slab_ktype = { |
| 5128 | .sysfs_ops = &slab_sysfs_ops, | 5116 | .sysfs_ops = &slab_sysfs_ops, |
| 5117 | .release = kmem_cache_release, | ||
| 5129 | }; | 5118 | }; |
| 5130 | 5119 | ||
| 5131 | static int uevent_filter(struct kset *kset, struct kobject *kobj) | 5120 | static int uevent_filter(struct kset *kset, struct kobject *kobj) |
| @@ -5252,7 +5241,7 @@ out_put_kobj: | |||
| 5252 | goto out; | 5241 | goto out; |
| 5253 | } | 5242 | } |
| 5254 | 5243 | ||
| 5255 | static void sysfs_slab_remove(struct kmem_cache *s) | 5244 | void sysfs_slab_remove(struct kmem_cache *s) |
| 5256 | { | 5245 | { |
| 5257 | if (slab_state < FULL) | 5246 | if (slab_state < FULL) |
| 5258 | /* | 5247 | /* |
diff --git a/mm/truncate.c b/mm/truncate.c index e5cc39ab0751..6a78c814bebf 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
| @@ -484,14 +484,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, | |||
| 484 | unsigned long count = 0; | 484 | unsigned long count = 0; |
| 485 | int i; | 485 | int i; |
| 486 | 486 | ||
| 487 | /* | ||
| 488 | * Note: this function may get called on a shmem/tmpfs mapping: | ||
| 489 | * pagevec_lookup() might then return 0 prematurely (because it | ||
| 490 | * got a gangful of swap entries); but it's hardly worth worrying | ||
| 491 | * about - it can rarely have anything to free from such a mapping | ||
| 492 | * (most pages are dirty), and already skips over any difficulties. | ||
| 493 | */ | ||
| 494 | |||
| 495 | pagevec_init(&pvec, 0); | 487 | pagevec_init(&pvec, 0); |
| 496 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, | 488 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, |
| 497 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, | 489 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f56c8deb3c0..32c661d66a45 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | |||
| 1916 | get_lru_size(lruvec, LRU_INACTIVE_FILE); | 1916 | get_lru_size(lruvec, LRU_INACTIVE_FILE); |
| 1917 | 1917 | ||
| 1918 | /* | 1918 | /* |
| 1919 | * Prevent the reclaimer from falling into the cache trap: as | ||
| 1920 | * cache pages start out inactive, every cache fault will tip | ||
| 1921 | * the scan balance towards the file LRU. And as the file LRU | ||
| 1922 | * shrinks, so does the window for rotation from references. | ||
| 1923 | * This means we have a runaway feedback loop where a tiny | ||
| 1924 | * thrashing file LRU becomes infinitely more attractive than | ||
| 1925 | * anon pages. Try to detect this based on file LRU size. | ||
| 1926 | */ | ||
| 1927 | if (global_reclaim(sc)) { | ||
| 1928 | unsigned long free = zone_page_state(zone, NR_FREE_PAGES); | ||
| 1929 | |||
| 1930 | if (unlikely(file + free <= high_wmark_pages(zone))) { | ||
| 1931 | scan_balance = SCAN_ANON; | ||
| 1932 | goto out; | ||
| 1933 | } | ||
| 1934 | } | ||
| 1935 | |||
| 1936 | /* | ||
| 1919 | * There is enough inactive page cache, do not reclaim | 1937 | * There is enough inactive page cache, do not reclaim |
| 1920 | * anything from the anonymous working set right now. | 1938 | * anything from the anonymous working set right now. |
| 1921 | */ | 1939 | */ |
