summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorJoel Fernandes (Google) <joel@joelfernandes.org>2019-03-05 18:47:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:19 -0500
commitab3948f58ff841e51feb845720624665ef5b7ef3 (patch)
tree7258ed463b4e48b91add44c6ee09217ef8611679 /mm/shmem.c
parent7f18825174203526a47c127c12a50f897ee0b511 (diff)
mm/memfd: add an F_SEAL_FUTURE_WRITE seal to memfd
Android uses ashmem for sharing memory regions. We are looking forward to migrating all usecases of ashmem to memfd so that we can possibly remove the ashmem driver in the future from staging while also benefiting from using memfd and contributing to it. Note staging drivers are also not ABI and generally can be removed at anytime. One of the main usecases Android has is the ability to create a region and mmap it as writeable, then add protection against making any "future" writes while keeping the existing already mmap'ed writeable-region active. This allows us to implement a usecase where receivers of the shared memory buffer can get a read-only view, while the sender continues to write to the buffer. See CursorWindow documentation in Android for more details: https://developer.android.com/reference/android/database/CursorWindow This usecase cannot be implemented with the existing F_SEAL_WRITE seal. To support the usecase, this patch adds a new F_SEAL_FUTURE_WRITE seal which prevents any future mmap and write syscalls from succeeding while keeping the existing mmap active. A better way to do F_SEAL_FUTURE_WRITE seal was discussed [1] last week where we don't need to modify core VFS structures to get the same behavior of the seal. This solves several side-effects pointed by Andy. self-tests are provided in later patch to verify the expected semantics. [1] https://lore.kernel.org/lkml/20181111173650.GA256781@google.com/ Thanks a lot to Andy for suggestions to improve code. Link: http://lkml.kernel.org/r/20190112203816.85534-2-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Acked-by: John Stultz <john.stultz@linaro.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jann Horn <jannh@google.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andy Lutomirski <luto@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Jeff Layton <jlayton@kernel.org> Cc: Marc-Andr Lureau <marcandre.lureau@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 283a1833dafc..b3db3779a30a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2190,6 +2190,24 @@ out_nomem:
2190 2190
2191static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2191static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2192{ 2192{
2193 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2194
2195 if (info->seals & F_SEAL_FUTURE_WRITE) {
2196 /*
2197 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2198 * "future write" seal active.
2199 */
2200 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2201 return -EPERM;
2202
2203 /*
2204 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
2205 * read-only mapping, take care to not allow mprotect to revert
2206 * protections.
2207 */
2208 vma->vm_flags &= ~(VM_MAYWRITE);
2209 }
2210
2193 file_accessed(file); 2211 file_accessed(file);
2194 vma->vm_ops = &shmem_vm_ops; 2212 vma->vm_ops = &shmem_vm_ops;
2195 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2213 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
@@ -2440,8 +2458,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
2440 pgoff_t index = pos >> PAGE_SHIFT; 2458 pgoff_t index = pos >> PAGE_SHIFT;
2441 2459
2442 /* i_mutex is held by caller */ 2460 /* i_mutex is held by caller */
2443 if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) { 2461 if (unlikely(info->seals & (F_SEAL_GROW |
2444 if (info->seals & F_SEAL_WRITE) 2462 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2463 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2445 return -EPERM; 2464 return -EPERM;
2446 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 2465 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2447 return -EPERM; 2466 return -EPERM;
@@ -2704,7 +2723,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2704 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 2723 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2705 2724
2706 /* protected by i_mutex */ 2725 /* protected by i_mutex */
2707 if (info->seals & F_SEAL_WRITE) { 2726 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2708 error = -EPERM; 2727 error = -EPERM;
2709 goto out; 2728 goto out;
2710 } 2729 }