diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2008-10-18 23:26:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:52:31 -0400 |
commit | 9978ad583e100945b74e4f33e73317983ea32df9 (patch) | |
tree | 132d3a06664e04cac4635ddba55a0ec36ff2a001 /mm/mlock.c | |
parent | c11d69d8c830e09a0e7b3935c952afb26c48bba8 (diff) |
mlock: make mlock error return Posixly Correct
Rework Posix error return for mlock().
Posix requires error code for mlock*() system calls for some conditions
that differ from what kernel low level functions, such as
get_user_pages(), return for those conditions. For more info, see:
http://marc.info/?l=linux-kernel&m=121750892930775&w=2
This patch provides the same translation of get_user_pages()
error codes to posix specified error codes in the context
of the mlock rework for unevictable lru.
[akpm@linux-foundation.org: fix build]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index bce1b22c36c2..008ea70b7afa 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -248,11 +248,24 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
248 | addr += PAGE_SIZE; /* for next get_user_pages() */ | 248 | addr += PAGE_SIZE; /* for next get_user_pages() */ |
249 | nr_pages--; | 249 | nr_pages--; |
250 | } | 250 | } |
251 | ret = 0; | ||
251 | } | 252 | } |
252 | 253 | ||
253 | lru_add_drain_all(); /* to update stats */ | 254 | lru_add_drain_all(); /* to update stats */ |
254 | 255 | ||
255 | return 0; /* count entire vma as locked_vm */ | 256 | return ret; /* count entire vma as locked_vm */ |
257 | } | ||
258 | |||
259 | /* | ||
260 | * convert get_user_pages() return value to posix mlock() error | ||
261 | */ | ||
262 | static int __mlock_posix_error_return(long retval) | ||
263 | { | ||
264 | if (retval == -EFAULT) | ||
265 | retval = -ENOMEM; | ||
266 | else if (retval == -ENOMEM) | ||
267 | retval = -EAGAIN; | ||
268 | return retval; | ||
256 | } | 269 | } |
257 | 270 | ||
258 | #else /* CONFIG_UNEVICTABLE_LRU */ | 271 | #else /* CONFIG_UNEVICTABLE_LRU */ |
@@ -265,9 +278,15 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
265 | int mlock) | 278 | int mlock) |
266 | { | 279 | { |
267 | if (mlock && (vma->vm_flags & VM_LOCKED)) | 280 | if (mlock && (vma->vm_flags & VM_LOCKED)) |
268 | make_pages_present(start, end); | 281 | return make_pages_present(start, end); |
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static inline int __mlock_posix_error_return(long retval) | ||
286 | { | ||
269 | return 0; | 287 | return 0; |
270 | } | 288 | } |
289 | |||
271 | #endif /* CONFIG_UNEVICTABLE_LRU */ | 290 | #endif /* CONFIG_UNEVICTABLE_LRU */ |
272 | 291 | ||
273 | /** | 292 | /** |
@@ -434,10 +453,7 @@ success: | |||
434 | downgrade_write(&mm->mmap_sem); | 453 | downgrade_write(&mm->mmap_sem); |
435 | 454 | ||
436 | ret = __mlock_vma_pages_range(vma, start, end, 1); | 455 | ret = __mlock_vma_pages_range(vma, start, end, 1); |
437 | if (ret > 0) { | 456 | |
438 | mm->locked_vm -= ret; | ||
439 | ret = 0; | ||
440 | } | ||
441 | /* | 457 | /* |
442 | * Need to reacquire mmap sem in write mode, as our callers | 458 | * Need to reacquire mmap sem in write mode, as our callers |
443 | * expect this. We have no support for atomically upgrading | 459 | * expect this. We have no support for atomically upgrading |
@@ -451,6 +467,11 @@ success: | |||
451 | /* non-NULL *prev must contain @start, but need to check @end */ | 467 | /* non-NULL *prev must contain @start, but need to check @end */ |
452 | if (!(*prev) || end > (*prev)->vm_end) | 468 | if (!(*prev) || end > (*prev)->vm_end) |
453 | ret = -ENOMEM; | 469 | ret = -ENOMEM; |
470 | else if (ret > 0) { | ||
471 | mm->locked_vm -= ret; | ||
472 | ret = 0; | ||
473 | } else | ||
474 | ret = __mlock_posix_error_return(ret); /* translate if needed */ | ||
454 | } else { | 475 | } else { |
455 | /* | 476 | /* |
456 | * TODO: for unlocking, pages will already be resident, so | 477 | * TODO: for unlocking, pages will already be resident, so |