diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 64 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/pdflush.c | 13 | ||||
-rw-r--r-- | mm/swap.c | 4 | ||||
-rw-r--r-- | mm/tiny-shmem.c | 5 | ||||
-rw-r--r-- | mm/truncate.c | 11 |
7 files changed, 88 insertions, 15 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 768687f1d46b..5d6e4c2000dc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1030,8 +1030,8 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1030 | desc.error = 0; | 1030 | desc.error = 0; |
1031 | do_generic_file_read(filp,ppos,&desc,file_read_actor); | 1031 | do_generic_file_read(filp,ppos,&desc,file_read_actor); |
1032 | retval += desc.written; | 1032 | retval += desc.written; |
1033 | if (!retval) { | 1033 | if (desc.error) { |
1034 | retval = desc.error; | 1034 | retval = retval ?: desc.error; |
1035 | break; | 1035 | break; |
1036 | } | 1036 | } |
1037 | } | 1037 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 2076b1542b8a..5abc57c2b8bd 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -457,6 +457,7 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask, | |||
457 | struct vm_area_struct *vma = NULL; | 457 | struct vm_area_struct *vma = NULL; |
458 | struct mempolicy *pol = current->mempolicy; | 458 | struct mempolicy *pol = current->mempolicy; |
459 | 459 | ||
460 | cpuset_update_current_mems_allowed(); | ||
460 | if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) | 461 | if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) |
461 | return -EINVAL; | 462 | return -EINVAL; |
462 | if (flags & MPOL_F_ADDR) { | 463 | if (flags & MPOL_F_ADDR) { |
@@ -1206,3 +1207,66 @@ void numa_default_policy(void) | |||
1206 | { | 1207 | { |
1207 | do_set_mempolicy(MPOL_DEFAULT, NULL); | 1208 | do_set_mempolicy(MPOL_DEFAULT, NULL); |
1208 | } | 1209 | } |
1210 | |||
1211 | /* Migrate a policy to a different set of nodes */ | ||
1212 | static void rebind_policy(struct mempolicy *pol, const nodemask_t *old, | ||
1213 | const nodemask_t *new) | ||
1214 | { | ||
1215 | nodemask_t tmp; | ||
1216 | |||
1217 | if (!pol) | ||
1218 | return; | ||
1219 | |||
1220 | switch (pol->policy) { | ||
1221 | case MPOL_DEFAULT: | ||
1222 | break; | ||
1223 | case MPOL_INTERLEAVE: | ||
1224 | nodes_remap(tmp, pol->v.nodes, *old, *new); | ||
1225 | pol->v.nodes = tmp; | ||
1226 | current->il_next = node_remap(current->il_next, *old, *new); | ||
1227 | break; | ||
1228 | case MPOL_PREFERRED: | ||
1229 | pol->v.preferred_node = node_remap(pol->v.preferred_node, | ||
1230 | *old, *new); | ||
1231 | break; | ||
1232 | case MPOL_BIND: { | ||
1233 | nodemask_t nodes; | ||
1234 | struct zone **z; | ||
1235 | struct zonelist *zonelist; | ||
1236 | |||
1237 | nodes_clear(nodes); | ||
1238 | for (z = pol->v.zonelist->zones; *z; z++) | ||
1239 | node_set((*z)->zone_pgdat->node_id, nodes); | ||
1240 | nodes_remap(tmp, nodes, *old, *new); | ||
1241 | nodes = tmp; | ||
1242 | |||
1243 | zonelist = bind_zonelist(&nodes); | ||
1244 | |||
1245 | /* If no mem, then zonelist is NULL and we keep old zonelist. | ||
1246 | * If that old zonelist has no remaining mems_allowed nodes, | ||
1247 | * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT. | ||
1248 | */ | ||
1249 | |||
1250 | if (zonelist) { | ||
1251 | /* Good - got mem - substitute new zonelist */ | ||
1252 | kfree(pol->v.zonelist); | ||
1253 | pol->v.zonelist = zonelist; | ||
1254 | } | ||
1255 | break; | ||
1256 | } | ||
1257 | default: | ||
1258 | BUG(); | ||
1259 | break; | ||
1260 | } | ||
1261 | } | ||
1262 | |||
1263 | /* | ||
1264 | * Someone moved this task to different nodes. Fixup mempolicies. | ||
1265 | * | ||
1266 | * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well, | ||
1267 | * once we have a cpuset mechanism to mark which cpuset subtree is migrating. | ||
1268 | */ | ||
1269 | void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new) | ||
1270 | { | ||
1271 | rebind_policy(current->mempolicy, old, new); | ||
1272 | } | ||
@@ -1840,7 +1840,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) | |||
1840 | 1840 | ||
1841 | static inline void verify_mm_writelocked(struct mm_struct *mm) | 1841 | static inline void verify_mm_writelocked(struct mm_struct *mm) |
1842 | { | 1842 | { |
1843 | #ifdef CONFIG_DEBUG_KERNEL | 1843 | #ifdef CONFIG_DEBUG_VM |
1844 | if (unlikely(down_read_trylock(&mm->mmap_sem))) { | 1844 | if (unlikely(down_read_trylock(&mm->mmap_sem))) { |
1845 | WARN_ON(1); | 1845 | WARN_ON(1); |
1846 | up_read(&mm->mmap_sem); | 1846 | up_read(&mm->mmap_sem); |
diff --git a/mm/pdflush.c b/mm/pdflush.c index d6781951267e..52822c98c489 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/fs.h> // Needed by writeback.h | 20 | #include <linux/fs.h> // Needed by writeback.h |
21 | #include <linux/writeback.h> // Prototypes pdflush_operation() | 21 | #include <linux/writeback.h> // Prototypes pdflush_operation() |
22 | #include <linux/kthread.h> | 22 | #include <linux/kthread.h> |
23 | #include <linux/cpuset.h> | ||
23 | 24 | ||
24 | 25 | ||
25 | /* | 26 | /* |
@@ -170,12 +171,24 @@ static int __pdflush(struct pdflush_work *my_work) | |||
170 | static int pdflush(void *dummy) | 171 | static int pdflush(void *dummy) |
171 | { | 172 | { |
172 | struct pdflush_work my_work; | 173 | struct pdflush_work my_work; |
174 | cpumask_t cpus_allowed; | ||
173 | 175 | ||
174 | /* | 176 | /* |
175 | * pdflush can spend a lot of time doing encryption via dm-crypt. We | 177 | * pdflush can spend a lot of time doing encryption via dm-crypt. We |
176 | * don't want to do that at keventd's priority. | 178 | * don't want to do that at keventd's priority. |
177 | */ | 179 | */ |
178 | set_user_nice(current, 0); | 180 | set_user_nice(current, 0); |
181 | |||
182 | /* | ||
183 | * Some configs put our parent kthread in a limited cpuset, | ||
184 | * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL. | ||
185 | * Our needs are more modest - cut back to our cpusets cpus_allowed. | ||
186 | * This is needed as pdflush's are dynamically created and destroyed. | ||
187 | * The boottime pdflush's are easily placed w/o these 2 lines. | ||
188 | */ | ||
189 | cpus_allowed = cpuset_cpus_allowed(current); | ||
190 | set_cpus_allowed(current, cpus_allowed); | ||
191 | |||
179 | return __pdflush(&my_work); | 192 | return __pdflush(&my_work); |
180 | } | 193 | } |
181 | 194 | ||
@@ -259,6 +259,8 @@ void __pagevec_release(struct pagevec *pvec) | |||
259 | pagevec_reinit(pvec); | 259 | pagevec_reinit(pvec); |
260 | } | 260 | } |
261 | 261 | ||
262 | EXPORT_SYMBOL(__pagevec_release); | ||
263 | |||
262 | /* | 264 | /* |
263 | * pagevec_release() for pages which are known to not be on the LRU | 265 | * pagevec_release() for pages which are known to not be on the LRU |
264 | * | 266 | * |
@@ -270,7 +272,6 @@ void __pagevec_release_nonlru(struct pagevec *pvec) | |||
270 | struct pagevec pages_to_free; | 272 | struct pagevec pages_to_free; |
271 | 273 | ||
272 | pagevec_init(&pages_to_free, pvec->cold); | 274 | pagevec_init(&pages_to_free, pvec->cold); |
273 | pages_to_free.cold = pvec->cold; | ||
274 | for (i = 0; i < pagevec_count(pvec); i++) { | 275 | for (i = 0; i < pagevec_count(pvec); i++) { |
275 | struct page *page = pvec->pages[i]; | 276 | struct page *page = pvec->pages[i]; |
276 | 277 | ||
@@ -388,6 +389,7 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | |||
388 | return pagevec_count(pvec); | 389 | return pagevec_count(pvec); |
389 | } | 390 | } |
390 | 391 | ||
392 | EXPORT_SYMBOL(pagevec_lookup_tag); | ||
391 | 393 | ||
392 | #ifdef CONFIG_SMP | 394 | #ifdef CONFIG_SMP |
393 | /* | 395 | /* |
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index c13a2161bca2..b58abcf44ed6 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c | |||
@@ -31,11 +31,14 @@ static struct vfsmount *shm_mnt; | |||
31 | 31 | ||
32 | static int __init init_tmpfs(void) | 32 | static int __init init_tmpfs(void) |
33 | { | 33 | { |
34 | register_filesystem(&tmpfs_fs_type); | 34 | BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); |
35 | |||
35 | #ifdef CONFIG_TMPFS | 36 | #ifdef CONFIG_TMPFS |
36 | devfs_mk_dir("shm"); | 37 | devfs_mk_dir("shm"); |
37 | #endif | 38 | #endif |
38 | shm_mnt = kern_mount(&tmpfs_fs_type); | 39 | shm_mnt = kern_mount(&tmpfs_fs_type); |
40 | BUG_ON(IS_ERR(shm_mnt)); | ||
41 | |||
39 | return 0; | 42 | return 0; |
40 | } | 43 | } |
41 | module_init(init_tmpfs) | 44 | module_init(init_tmpfs) |
diff --git a/mm/truncate.c b/mm/truncate.c index 60c8764bfac2..29c18f68dc35 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -13,18 +13,9 @@ | |||
13 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
14 | #include <linux/pagevec.h> | 14 | #include <linux/pagevec.h> |
15 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | 15 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
16 | block_invalidatepage */ | 16 | do_invalidatepage */ |
17 | 17 | ||
18 | 18 | ||
19 | static int do_invalidatepage(struct page *page, unsigned long offset) | ||
20 | { | ||
21 | int (*invalidatepage)(struct page *, unsigned long); | ||
22 | invalidatepage = page->mapping->a_ops->invalidatepage; | ||
23 | if (invalidatepage == NULL) | ||
24 | invalidatepage = block_invalidatepage; | ||
25 | return (*invalidatepage)(page, offset); | ||
26 | } | ||
27 | |||
28 | static inline void truncate_partial_page(struct page *page, unsigned partial) | 19 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
29 | { | 20 | { |
30 | memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); | 21 | memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); |