diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 20 | ||||
-rw-r--r-- | mm/filemap_xip.c | 3 | ||||
-rw-r--r-- | mm/hugetlb.c | 10 | ||||
-rw-r--r-- | mm/memory.c | 32 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 198 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
7 files changed, 169 insertions, 98 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 5357fcc4643b..b7b1be6dbd83 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -875,9 +875,7 @@ static void shrink_readahead_size_eio(struct file *filp, | |||
875 | } | 875 | } |
876 | 876 | ||
877 | /** | 877 | /** |
878 | * do_generic_mapping_read - generic file read routine | 878 | * do_generic_file_read - generic file read routine |
879 | * @mapping: address_space to be read | ||
880 | * @ra: file's readahead state | ||
881 | * @filp: the file to read | 879 | * @filp: the file to read |
882 | * @ppos: current file position | 880 | * @ppos: current file position |
883 | * @desc: read_descriptor | 881 | * @desc: read_descriptor |
@@ -888,18 +886,13 @@ static void shrink_readahead_size_eio(struct file *filp, | |||
888 | * | 886 | * |
889 | * This is really ugly. But the goto's actually try to clarify some | 887 | * This is really ugly. But the goto's actually try to clarify some |
890 | * of the logic when it comes to error handling etc. | 888 | * of the logic when it comes to error handling etc. |
891 | * | ||
892 | * Note the struct file* is only passed for the use of readpage. | ||
893 | * It may be NULL. | ||
894 | */ | 889 | */ |
895 | void do_generic_mapping_read(struct address_space *mapping, | 890 | static void do_generic_file_read(struct file *filp, loff_t *ppos, |
896 | struct file_ra_state *ra, | 891 | read_descriptor_t *desc, read_actor_t actor) |
897 | struct file *filp, | ||
898 | loff_t *ppos, | ||
899 | read_descriptor_t *desc, | ||
900 | read_actor_t actor) | ||
901 | { | 892 | { |
893 | struct address_space *mapping = filp->f_mapping; | ||
902 | struct inode *inode = mapping->host; | 894 | struct inode *inode = mapping->host; |
895 | struct file_ra_state *ra = &filp->f_ra; | ||
903 | pgoff_t index; | 896 | pgoff_t index; |
904 | pgoff_t last_index; | 897 | pgoff_t last_index; |
905 | pgoff_t prev_index; | 898 | pgoff_t prev_index; |
@@ -1091,7 +1084,6 @@ out: | |||
1091 | if (filp) | 1084 | if (filp) |
1092 | file_accessed(filp); | 1085 | file_accessed(filp); |
1093 | } | 1086 | } |
1094 | EXPORT_SYMBOL(do_generic_mapping_read); | ||
1095 | 1087 | ||
1096 | int file_read_actor(read_descriptor_t *desc, struct page *page, | 1088 | int file_read_actor(read_descriptor_t *desc, struct page *page, |
1097 | unsigned long offset, unsigned long size) | 1089 | unsigned long offset, unsigned long size) |
@@ -1332,7 +1324,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1332 | struct file_ra_state *ra = &file->f_ra; | 1324 | struct file_ra_state *ra = &file->f_ra; |
1333 | struct inode *inode = mapping->host; | 1325 | struct inode *inode = mapping->host; |
1334 | struct page *page; | 1326 | struct page *page; |
1335 | unsigned long size; | 1327 | pgoff_t size; |
1336 | int did_readaround = 0; | 1328 | int did_readaround = 0; |
1337 | int ret = 0; | 1329 | int ret = 0; |
1338 | 1330 | ||
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 0420a0292b03..5e598c42afd7 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -56,7 +56,8 @@ do_xip_mapping_read(struct address_space *mapping, | |||
56 | read_actor_t actor) | 56 | read_actor_t actor) |
57 | { | 57 | { |
58 | struct inode *inode = mapping->host; | 58 | struct inode *inode = mapping->host; |
59 | unsigned long index, end_index, offset; | 59 | pgoff_t index, end_index; |
60 | unsigned long offset; | ||
60 | loff_t isize; | 61 | loff_t isize; |
61 | 62 | ||
62 | BUG_ON(!mapping->a_ops->get_xip_page); | 63 | BUG_ON(!mapping->a_ops->get_xip_page); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1a5642074e34..d9a380312467 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -605,6 +605,16 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write, | |||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | int hugetlb_overcommit_handler(struct ctl_table *table, int write, | ||
609 | struct file *file, void __user *buffer, | ||
610 | size_t *length, loff_t *ppos) | ||
611 | { | ||
612 | spin_lock(&hugetlb_lock); | ||
613 | proc_doulongvec_minmax(table, write, file, buffer, length, ppos); | ||
614 | spin_unlock(&hugetlb_lock); | ||
615 | return 0; | ||
616 | } | ||
617 | |||
608 | #endif /* CONFIG_SYSCTL */ | 618 | #endif /* CONFIG_SYSCTL */ |
609 | 619 | ||
610 | int hugetlb_report_meminfo(char *buf) | 620 | int hugetlb_report_meminfo(char *buf) |
diff --git a/mm/memory.c b/mm/memory.c index 153a54b2013c..e5628a5fd678 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -134,11 +134,9 @@ void pmd_clear_bad(pmd_t *pmd) | |||
134 | */ | 134 | */ |
135 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) | 135 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) |
136 | { | 136 | { |
137 | struct page *page = pmd_page(*pmd); | 137 | pgtable_t token = pmd_pgtable(*pmd); |
138 | pmd_clear(pmd); | 138 | pmd_clear(pmd); |
139 | pte_lock_deinit(page); | 139 | pte_free_tlb(tlb, token); |
140 | pte_free_tlb(tlb, page); | ||
141 | dec_zone_page_state(page, NR_PAGETABLE); | ||
142 | tlb->mm->nr_ptes--; | 140 | tlb->mm->nr_ptes--; |
143 | } | 141 | } |
144 | 142 | ||
@@ -309,21 +307,19 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |||
309 | 307 | ||
310 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | 308 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) |
311 | { | 309 | { |
312 | struct page *new = pte_alloc_one(mm, address); | 310 | pgtable_t new = pte_alloc_one(mm, address); |
313 | if (!new) | 311 | if (!new) |
314 | return -ENOMEM; | 312 | return -ENOMEM; |
315 | 313 | ||
316 | pte_lock_init(new); | ||
317 | spin_lock(&mm->page_table_lock); | 314 | spin_lock(&mm->page_table_lock); |
318 | if (pmd_present(*pmd)) { /* Another has populated it */ | 315 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ |
319 | pte_lock_deinit(new); | ||
320 | pte_free(mm, new); | ||
321 | } else { | ||
322 | mm->nr_ptes++; | 316 | mm->nr_ptes++; |
323 | inc_zone_page_state(new, NR_PAGETABLE); | ||
324 | pmd_populate(mm, pmd, new); | 317 | pmd_populate(mm, pmd, new); |
318 | new = NULL; | ||
325 | } | 319 | } |
326 | spin_unlock(&mm->page_table_lock); | 320 | spin_unlock(&mm->page_table_lock); |
321 | if (new) | ||
322 | pte_free(mm, new); | ||
327 | return 0; | 323 | return 0; |
328 | } | 324 | } |
329 | 325 | ||
@@ -334,11 +330,13 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) | |||
334 | return -ENOMEM; | 330 | return -ENOMEM; |
335 | 331 | ||
336 | spin_lock(&init_mm.page_table_lock); | 332 | spin_lock(&init_mm.page_table_lock); |
337 | if (pmd_present(*pmd)) /* Another has populated it */ | 333 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ |
338 | pte_free_kernel(&init_mm, new); | ||
339 | else | ||
340 | pmd_populate_kernel(&init_mm, pmd, new); | 334 | pmd_populate_kernel(&init_mm, pmd, new); |
335 | new = NULL; | ||
336 | } | ||
341 | spin_unlock(&init_mm.page_table_lock); | 337 | spin_unlock(&init_mm.page_table_lock); |
338 | if (new) | ||
339 | pte_free_kernel(&init_mm, new); | ||
342 | return 0; | 340 | return 0; |
343 | } | 341 | } |
344 | 342 | ||
@@ -1390,7 +1388,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
1390 | { | 1388 | { |
1391 | pte_t *pte; | 1389 | pte_t *pte; |
1392 | int err; | 1390 | int err; |
1393 | struct page *pmd_page; | 1391 | pgtable_t token; |
1394 | spinlock_t *uninitialized_var(ptl); | 1392 | spinlock_t *uninitialized_var(ptl); |
1395 | 1393 | ||
1396 | pte = (mm == &init_mm) ? | 1394 | pte = (mm == &init_mm) ? |
@@ -1401,10 +1399,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, | |||
1401 | 1399 | ||
1402 | BUG_ON(pmd_huge(*pmd)); | 1400 | BUG_ON(pmd_huge(*pmd)); |
1403 | 1401 | ||
1404 | pmd_page = pmd_page(*pmd); | 1402 | token = pmd_pgtable(*pmd); |
1405 | 1403 | ||
1406 | do { | 1404 | do { |
1407 | err = fn(pte, pmd_page, addr, data); | 1405 | err = fn(pte, token, addr, data); |
1408 | if (err) | 1406 | if (err) |
1409 | break; | 1407 | break; |
1410 | } while (pte++, addr += PAGE_SIZE, addr != end); | 1408 | } while (pte++, addr += PAGE_SIZE, addr != end); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 26a54a17dc9f..75b979313346 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1451,7 +1451,7 @@ try_next_zone: | |||
1451 | /* | 1451 | /* |
1452 | * This is the 'heart' of the zoned buddy allocator. | 1452 | * This is the 'heart' of the zoned buddy allocator. |
1453 | */ | 1453 | */ |
1454 | struct page * fastcall | 1454 | struct page * |
1455 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 1455 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
1456 | struct zonelist *zonelist) | 1456 | struct zonelist *zonelist) |
1457 | { | 1457 | { |
diff --git a/mm/shmem.c b/mm/shmem.c index 85bed948fafc..90b576cbc06e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/ctype.h> | 49 | #include <linux/ctype.h> |
50 | #include <linux/migrate.h> | 50 | #include <linux/migrate.h> |
51 | #include <linux/highmem.h> | 51 | #include <linux/highmem.h> |
52 | #include <linux/seq_file.h> | ||
52 | 53 | ||
53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
54 | #include <asm/div64.h> | 55 | #include <asm/div64.h> |
@@ -84,6 +85,18 @@ enum sgp_type { | |||
84 | SGP_WRITE, /* may exceed i_size, may allocate page */ | 85 | SGP_WRITE, /* may exceed i_size, may allocate page */ |
85 | }; | 86 | }; |
86 | 87 | ||
88 | #ifdef CONFIG_TMPFS | ||
89 | static unsigned long shmem_default_max_blocks(void) | ||
90 | { | ||
91 | return totalram_pages / 2; | ||
92 | } | ||
93 | |||
94 | static unsigned long shmem_default_max_inodes(void) | ||
95 | { | ||
96 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); | ||
97 | } | ||
98 | #endif | ||
99 | |||
87 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 100 | static int shmem_getpage(struct inode *inode, unsigned long idx, |
88 | struct page **pagep, enum sgp_type sgp, int *type); | 101 | struct page **pagep, enum sgp_type sgp, int *type); |
89 | 102 | ||
@@ -1068,7 +1081,8 @@ redirty: | |||
1068 | } | 1081 | } |
1069 | 1082 | ||
1070 | #ifdef CONFIG_NUMA | 1083 | #ifdef CONFIG_NUMA |
1071 | static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) | 1084 | #ifdef CONFIG_TMPFS |
1085 | static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) | ||
1072 | { | 1086 | { |
1073 | char *nodelist = strchr(value, ':'); | 1087 | char *nodelist = strchr(value, ':'); |
1074 | int err = 1; | 1088 | int err = 1; |
@@ -1117,6 +1131,42 @@ out: | |||
1117 | return err; | 1131 | return err; |
1118 | } | 1132 | } |
1119 | 1133 | ||
1134 | static void shmem_show_mpol(struct seq_file *seq, int policy, | ||
1135 | const nodemask_t policy_nodes) | ||
1136 | { | ||
1137 | char *policy_string; | ||
1138 | |||
1139 | switch (policy) { | ||
1140 | case MPOL_PREFERRED: | ||
1141 | policy_string = "prefer"; | ||
1142 | break; | ||
1143 | case MPOL_BIND: | ||
1144 | policy_string = "bind"; | ||
1145 | break; | ||
1146 | case MPOL_INTERLEAVE: | ||
1147 | policy_string = "interleave"; | ||
1148 | break; | ||
1149 | default: | ||
1150 | /* MPOL_DEFAULT */ | ||
1151 | return; | ||
1152 | } | ||
1153 | |||
1154 | seq_printf(seq, ",mpol=%s", policy_string); | ||
1155 | |||
1156 | if (policy != MPOL_INTERLEAVE || | ||
1157 | !nodes_equal(policy_nodes, node_states[N_HIGH_MEMORY])) { | ||
1158 | char buffer[64]; | ||
1159 | int len; | ||
1160 | |||
1161 | len = nodelist_scnprintf(buffer, sizeof(buffer), policy_nodes); | ||
1162 | if (len < sizeof(buffer)) | ||
1163 | seq_printf(seq, ":%s", buffer); | ||
1164 | else | ||
1165 | seq_printf(seq, ":?"); | ||
1166 | } | ||
1167 | } | ||
1168 | #endif /* CONFIG_TMPFS */ | ||
1169 | |||
1120 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 1170 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1121 | struct shmem_inode_info *info, unsigned long idx) | 1171 | struct shmem_inode_info *info, unsigned long idx) |
1122 | { | 1172 | { |
@@ -1148,13 +1198,20 @@ static struct page *shmem_alloc_page(gfp_t gfp, | |||
1148 | mpol_free(pvma.vm_policy); | 1198 | mpol_free(pvma.vm_policy); |
1149 | return page; | 1199 | return page; |
1150 | } | 1200 | } |
1151 | #else | 1201 | #else /* !CONFIG_NUMA */ |
1202 | #ifdef CONFIG_TMPFS | ||
1152 | static inline int shmem_parse_mpol(char *value, int *policy, | 1203 | static inline int shmem_parse_mpol(char *value, int *policy, |
1153 | nodemask_t *policy_nodes) | 1204 | nodemask_t *policy_nodes) |
1154 | { | 1205 | { |
1155 | return 1; | 1206 | return 1; |
1156 | } | 1207 | } |
1157 | 1208 | ||
1209 | static inline void shmem_show_mpol(struct seq_file *seq, int policy, | ||
1210 | const nodemask_t policy_nodes) | ||
1211 | { | ||
1212 | } | ||
1213 | #endif /* CONFIG_TMPFS */ | ||
1214 | |||
1158 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 1215 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1159 | struct shmem_inode_info *info, unsigned long idx) | 1216 | struct shmem_inode_info *info, unsigned long idx) |
1160 | { | 1217 | { |
@@ -1166,7 +1223,7 @@ static inline struct page *shmem_alloc_page(gfp_t gfp, | |||
1166 | { | 1223 | { |
1167 | return alloc_page(gfp); | 1224 | return alloc_page(gfp); |
1168 | } | 1225 | } |
1169 | #endif | 1226 | #endif /* CONFIG_NUMA */ |
1170 | 1227 | ||
1171 | /* | 1228 | /* |
1172 | * shmem_getpage - either get the page from swap or allocate a new one | 1229 | * shmem_getpage - either get the page from swap or allocate a new one |
@@ -2077,9 +2134,8 @@ static const struct export_operations shmem_export_ops = { | |||
2077 | .fh_to_dentry = shmem_fh_to_dentry, | 2134 | .fh_to_dentry = shmem_fh_to_dentry, |
2078 | }; | 2135 | }; |
2079 | 2136 | ||
2080 | static int shmem_parse_options(char *options, int *mode, uid_t *uid, | 2137 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, |
2081 | gid_t *gid, unsigned long *blocks, unsigned long *inodes, | 2138 | bool remount) |
2082 | int *policy, nodemask_t *policy_nodes) | ||
2083 | { | 2139 | { |
2084 | char *this_char, *value, *rest; | 2140 | char *this_char, *value, *rest; |
2085 | 2141 | ||
@@ -2122,35 +2178,37 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, | |||
2122 | } | 2178 | } |
2123 | if (*rest) | 2179 | if (*rest) |
2124 | goto bad_val; | 2180 | goto bad_val; |
2125 | *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | 2181 | sbinfo->max_blocks = |
2182 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | ||
2126 | } else if (!strcmp(this_char,"nr_blocks")) { | 2183 | } else if (!strcmp(this_char,"nr_blocks")) { |
2127 | *blocks = memparse(value,&rest); | 2184 | sbinfo->max_blocks = memparse(value, &rest); |
2128 | if (*rest) | 2185 | if (*rest) |
2129 | goto bad_val; | 2186 | goto bad_val; |
2130 | } else if (!strcmp(this_char,"nr_inodes")) { | 2187 | } else if (!strcmp(this_char,"nr_inodes")) { |
2131 | *inodes = memparse(value,&rest); | 2188 | sbinfo->max_inodes = memparse(value, &rest); |
2132 | if (*rest) | 2189 | if (*rest) |
2133 | goto bad_val; | 2190 | goto bad_val; |
2134 | } else if (!strcmp(this_char,"mode")) { | 2191 | } else if (!strcmp(this_char,"mode")) { |
2135 | if (!mode) | 2192 | if (remount) |
2136 | continue; | 2193 | continue; |
2137 | *mode = simple_strtoul(value,&rest,8); | 2194 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; |
2138 | if (*rest) | 2195 | if (*rest) |
2139 | goto bad_val; | 2196 | goto bad_val; |
2140 | } else if (!strcmp(this_char,"uid")) { | 2197 | } else if (!strcmp(this_char,"uid")) { |
2141 | if (!uid) | 2198 | if (remount) |
2142 | continue; | 2199 | continue; |
2143 | *uid = simple_strtoul(value,&rest,0); | 2200 | sbinfo->uid = simple_strtoul(value, &rest, 0); |
2144 | if (*rest) | 2201 | if (*rest) |
2145 | goto bad_val; | 2202 | goto bad_val; |
2146 | } else if (!strcmp(this_char,"gid")) { | 2203 | } else if (!strcmp(this_char,"gid")) { |
2147 | if (!gid) | 2204 | if (remount) |
2148 | continue; | 2205 | continue; |
2149 | *gid = simple_strtoul(value,&rest,0); | 2206 | sbinfo->gid = simple_strtoul(value, &rest, 0); |
2150 | if (*rest) | 2207 | if (*rest) |
2151 | goto bad_val; | 2208 | goto bad_val; |
2152 | } else if (!strcmp(this_char,"mpol")) { | 2209 | } else if (!strcmp(this_char,"mpol")) { |
2153 | if (shmem_parse_mpol(value,policy,policy_nodes)) | 2210 | if (shmem_parse_mpol(value, &sbinfo->policy, |
2211 | &sbinfo->policy_nodes)) | ||
2154 | goto bad_val; | 2212 | goto bad_val; |
2155 | } else { | 2213 | } else { |
2156 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | 2214 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", |
@@ -2170,24 +2228,20 @@ bad_val: | |||
2170 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | 2228 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2171 | { | 2229 | { |
2172 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 2230 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
2173 | unsigned long max_blocks = sbinfo->max_blocks; | 2231 | struct shmem_sb_info config = *sbinfo; |
2174 | unsigned long max_inodes = sbinfo->max_inodes; | ||
2175 | int policy = sbinfo->policy; | ||
2176 | nodemask_t policy_nodes = sbinfo->policy_nodes; | ||
2177 | unsigned long blocks; | 2232 | unsigned long blocks; |
2178 | unsigned long inodes; | 2233 | unsigned long inodes; |
2179 | int error = -EINVAL; | 2234 | int error = -EINVAL; |
2180 | 2235 | ||
2181 | if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, | 2236 | if (shmem_parse_options(data, &config, true)) |
2182 | &max_inodes, &policy, &policy_nodes)) | ||
2183 | return error; | 2237 | return error; |
2184 | 2238 | ||
2185 | spin_lock(&sbinfo->stat_lock); | 2239 | spin_lock(&sbinfo->stat_lock); |
2186 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; | 2240 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; |
2187 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | 2241 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
2188 | if (max_blocks < blocks) | 2242 | if (config.max_blocks < blocks) |
2189 | goto out; | 2243 | goto out; |
2190 | if (max_inodes < inodes) | 2244 | if (config.max_inodes < inodes) |
2191 | goto out; | 2245 | goto out; |
2192 | /* | 2246 | /* |
2193 | * Those tests also disallow limited->unlimited while any are in | 2247 | * Those tests also disallow limited->unlimited while any are in |
@@ -2195,23 +2249,42 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | |||
2195 | * but we must separately disallow unlimited->limited, because | 2249 | * but we must separately disallow unlimited->limited, because |
2196 | * in that case we have no record of how much is already in use. | 2250 | * in that case we have no record of how much is already in use. |
2197 | */ | 2251 | */ |
2198 | if (max_blocks && !sbinfo->max_blocks) | 2252 | if (config.max_blocks && !sbinfo->max_blocks) |
2199 | goto out; | 2253 | goto out; |
2200 | if (max_inodes && !sbinfo->max_inodes) | 2254 | if (config.max_inodes && !sbinfo->max_inodes) |
2201 | goto out; | 2255 | goto out; |
2202 | 2256 | ||
2203 | error = 0; | 2257 | error = 0; |
2204 | sbinfo->max_blocks = max_blocks; | 2258 | sbinfo->max_blocks = config.max_blocks; |
2205 | sbinfo->free_blocks = max_blocks - blocks; | 2259 | sbinfo->free_blocks = config.max_blocks - blocks; |
2206 | sbinfo->max_inodes = max_inodes; | 2260 | sbinfo->max_inodes = config.max_inodes; |
2207 | sbinfo->free_inodes = max_inodes - inodes; | 2261 | sbinfo->free_inodes = config.max_inodes - inodes; |
2208 | sbinfo->policy = policy; | 2262 | sbinfo->policy = config.policy; |
2209 | sbinfo->policy_nodes = policy_nodes; | 2263 | sbinfo->policy_nodes = config.policy_nodes; |
2210 | out: | 2264 | out: |
2211 | spin_unlock(&sbinfo->stat_lock); | 2265 | spin_unlock(&sbinfo->stat_lock); |
2212 | return error; | 2266 | return error; |
2213 | } | 2267 | } |
2214 | #endif | 2268 | |
2269 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) | ||
2270 | { | ||
2271 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); | ||
2272 | |||
2273 | if (sbinfo->max_blocks != shmem_default_max_blocks()) | ||
2274 | seq_printf(seq, ",size=%luk", | ||
2275 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); | ||
2276 | if (sbinfo->max_inodes != shmem_default_max_inodes()) | ||
2277 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); | ||
2278 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) | ||
2279 | seq_printf(seq, ",mode=%03o", sbinfo->mode); | ||
2280 | if (sbinfo->uid != 0) | ||
2281 | seq_printf(seq, ",uid=%u", sbinfo->uid); | ||
2282 | if (sbinfo->gid != 0) | ||
2283 | seq_printf(seq, ",gid=%u", sbinfo->gid); | ||
2284 | shmem_show_mpol(seq, sbinfo->policy, sbinfo->policy_nodes); | ||
2285 | return 0; | ||
2286 | } | ||
2287 | #endif /* CONFIG_TMPFS */ | ||
2215 | 2288 | ||
2216 | static void shmem_put_super(struct super_block *sb) | 2289 | static void shmem_put_super(struct super_block *sb) |
2217 | { | 2290 | { |
@@ -2224,15 +2297,23 @@ static int shmem_fill_super(struct super_block *sb, | |||
2224 | { | 2297 | { |
2225 | struct inode *inode; | 2298 | struct inode *inode; |
2226 | struct dentry *root; | 2299 | struct dentry *root; |
2227 | int mode = S_IRWXUGO | S_ISVTX; | ||
2228 | uid_t uid = current->fsuid; | ||
2229 | gid_t gid = current->fsgid; | ||
2230 | int err = -ENOMEM; | ||
2231 | struct shmem_sb_info *sbinfo; | 2300 | struct shmem_sb_info *sbinfo; |
2232 | unsigned long blocks = 0; | 2301 | int err = -ENOMEM; |
2233 | unsigned long inodes = 0; | 2302 | |
2234 | int policy = MPOL_DEFAULT; | 2303 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
2235 | nodemask_t policy_nodes = node_states[N_HIGH_MEMORY]; | 2304 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), |
2305 | L1_CACHE_BYTES), GFP_KERNEL); | ||
2306 | if (!sbinfo) | ||
2307 | return -ENOMEM; | ||
2308 | |||
2309 | sbinfo->max_blocks = 0; | ||
2310 | sbinfo->max_inodes = 0; | ||
2311 | sbinfo->mode = S_IRWXUGO | S_ISVTX; | ||
2312 | sbinfo->uid = current->fsuid; | ||
2313 | sbinfo->gid = current->fsgid; | ||
2314 | sbinfo->policy = MPOL_DEFAULT; | ||
2315 | sbinfo->policy_nodes = node_states[N_HIGH_MEMORY]; | ||
2316 | sb->s_fs_info = sbinfo; | ||
2236 | 2317 | ||
2237 | #ifdef CONFIG_TMPFS | 2318 | #ifdef CONFIG_TMPFS |
2238 | /* | 2319 | /* |
@@ -2241,34 +2322,22 @@ static int shmem_fill_super(struct super_block *sb, | |||
2241 | * but the internal instance is left unlimited. | 2322 | * but the internal instance is left unlimited. |
2242 | */ | 2323 | */ |
2243 | if (!(sb->s_flags & MS_NOUSER)) { | 2324 | if (!(sb->s_flags & MS_NOUSER)) { |
2244 | blocks = totalram_pages / 2; | 2325 | sbinfo->max_blocks = shmem_default_max_blocks(); |
2245 | inodes = totalram_pages - totalhigh_pages; | 2326 | sbinfo->max_inodes = shmem_default_max_inodes(); |
2246 | if (inodes > blocks) | 2327 | if (shmem_parse_options(data, sbinfo, false)) { |
2247 | inodes = blocks; | 2328 | err = -EINVAL; |
2248 | if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, | 2329 | goto failed; |
2249 | &inodes, &policy, &policy_nodes)) | 2330 | } |
2250 | return -EINVAL; | ||
2251 | } | 2331 | } |
2252 | sb->s_export_op = &shmem_export_ops; | 2332 | sb->s_export_op = &shmem_export_ops; |
2253 | #else | 2333 | #else |
2254 | sb->s_flags |= MS_NOUSER; | 2334 | sb->s_flags |= MS_NOUSER; |
2255 | #endif | 2335 | #endif |
2256 | 2336 | ||
2257 | /* Round up to L1_CACHE_BYTES to resist false sharing */ | ||
2258 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), | ||
2259 | L1_CACHE_BYTES), GFP_KERNEL); | ||
2260 | if (!sbinfo) | ||
2261 | return -ENOMEM; | ||
2262 | |||
2263 | spin_lock_init(&sbinfo->stat_lock); | 2337 | spin_lock_init(&sbinfo->stat_lock); |
2264 | sbinfo->max_blocks = blocks; | 2338 | sbinfo->free_blocks = sbinfo->max_blocks; |
2265 | sbinfo->free_blocks = blocks; | 2339 | sbinfo->free_inodes = sbinfo->max_inodes; |
2266 | sbinfo->max_inodes = inodes; | ||
2267 | sbinfo->free_inodes = inodes; | ||
2268 | sbinfo->policy = policy; | ||
2269 | sbinfo->policy_nodes = policy_nodes; | ||
2270 | 2340 | ||
2271 | sb->s_fs_info = sbinfo; | ||
2272 | sb->s_maxbytes = SHMEM_MAX_BYTES; | 2341 | sb->s_maxbytes = SHMEM_MAX_BYTES; |
2273 | sb->s_blocksize = PAGE_CACHE_SIZE; | 2342 | sb->s_blocksize = PAGE_CACHE_SIZE; |
2274 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 2343 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
@@ -2280,11 +2349,11 @@ static int shmem_fill_super(struct super_block *sb, | |||
2280 | sb->s_flags |= MS_POSIXACL; | 2349 | sb->s_flags |= MS_POSIXACL; |
2281 | #endif | 2350 | #endif |
2282 | 2351 | ||
2283 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); | 2352 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); |
2284 | if (!inode) | 2353 | if (!inode) |
2285 | goto failed; | 2354 | goto failed; |
2286 | inode->i_uid = uid; | 2355 | inode->i_uid = sbinfo->uid; |
2287 | inode->i_gid = gid; | 2356 | inode->i_gid = sbinfo->gid; |
2288 | root = d_alloc_root(inode); | 2357 | root = d_alloc_root(inode); |
2289 | if (!root) | 2358 | if (!root) |
2290 | goto failed_iput; | 2359 | goto failed_iput; |
@@ -2420,6 +2489,7 @@ static const struct super_operations shmem_ops = { | |||
2420 | #ifdef CONFIG_TMPFS | 2489 | #ifdef CONFIG_TMPFS |
2421 | .statfs = shmem_statfs, | 2490 | .statfs = shmem_statfs, |
2422 | .remount_fs = shmem_remount_fs, | 2491 | .remount_fs = shmem_remount_fs, |
2492 | .show_options = shmem_show_options, | ||
2423 | #endif | 2493 | #endif |
2424 | .delete_inode = shmem_delete_inode, | 2494 | .delete_inode = shmem_delete_inode, |
2425 | .drop_inode = generic_delete_inode, | 2495 | .drop_inode = generic_delete_inode, |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0536dde139d1..950c0be9ca81 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -820,7 +820,7 @@ void __attribute__((weak)) vmalloc_sync_all(void) | |||
820 | } | 820 | } |
821 | 821 | ||
822 | 822 | ||
823 | static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) | 823 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) |
824 | { | 824 | { |
825 | /* apply_to_page_range() does all the hard work. */ | 825 | /* apply_to_page_range() does all the hard work. */ |
826 | return 0; | 826 | return 0; |