aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorDavid Herrmann <dh.herrmann@gmail.com>2014-08-08 17:25:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:31 -0400
commit4bb5f5d9395bc112d93a134d8f5b05611eddc9c0 (patch)
tree68af5a8a8cc65375c51c25bff678d0f9825d86af /mm/mmap.c
parent935e9f02e798051d2923d59f6025cd74f59aa0e1 (diff)
mm: allow drivers to prevent new writable mappings
This patch (of 6): The i_mmap_writable field counts existing writable mappings of an address_space. To allow drivers to prevent new writable mappings, make this counter signed and prevent new writable mappings if it is negative. This is modelled after i_writecount and DENYWRITE. This will be required by the shmem-sealing infrastructure to prevent any new writable mappings after the WRITE seal has been set. In case there exists a writable mapping, this operation will fail with EBUSY. Note that we rely on the fact that iff you already own a writable mapping, you can increase the counter without using the helpers. This is the same that we do for i_writecount. Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Michael Kerrisk <mtk.manpages@gmail.com> Cc: Ryan Lortie <desrt@desrt.ca> Cc: Lennart Poettering <lennart@poettering.net> Cc: Daniel Mack <zonque@gmail.com> Cc: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 64c9d736155c..c1f2ea4a0b99 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -221,7 +221,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
221 if (vma->vm_flags & VM_DENYWRITE) 221 if (vma->vm_flags & VM_DENYWRITE)
222 atomic_inc(&file_inode(file)->i_writecount); 222 atomic_inc(&file_inode(file)->i_writecount);
223 if (vma->vm_flags & VM_SHARED) 223 if (vma->vm_flags & VM_SHARED)
224 mapping->i_mmap_writable--; 224 mapping_unmap_writable(mapping);
225 225
226 flush_dcache_mmap_lock(mapping); 226 flush_dcache_mmap_lock(mapping);
227 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 227 if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -622,7 +622,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
622 if (vma->vm_flags & VM_DENYWRITE) 622 if (vma->vm_flags & VM_DENYWRITE)
623 atomic_dec(&file_inode(file)->i_writecount); 623 atomic_dec(&file_inode(file)->i_writecount);
624 if (vma->vm_flags & VM_SHARED) 624 if (vma->vm_flags & VM_SHARED)
625 mapping->i_mmap_writable++; 625 atomic_inc(&mapping->i_mmap_writable);
626 626
627 flush_dcache_mmap_lock(mapping); 627 flush_dcache_mmap_lock(mapping);
628 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 628 if (unlikely(vma->vm_flags & VM_NONLINEAR))
@@ -1577,6 +1577,17 @@ munmap_back:
1577 if (error) 1577 if (error)
1578 goto free_vma; 1578 goto free_vma;
1579 } 1579 }
1580 if (vm_flags & VM_SHARED) {
1581 error = mapping_map_writable(file->f_mapping);
1582 if (error)
1583 goto allow_write_and_free_vma;
1584 }
1585
1586 /* ->mmap() can change vma->vm_file, but must guarantee that
1587 * vma_link() below can deny write-access if VM_DENYWRITE is set
1588 * and map writably if VM_SHARED is set. This usually means the
1589 * new file must not have been exposed to user-space, yet.
1590 */
1580 vma->vm_file = get_file(file); 1591 vma->vm_file = get_file(file);
1581 error = file->f_op->mmap(file, vma); 1592 error = file->f_op->mmap(file, vma);
1582 if (error) 1593 if (error)
@@ -1616,8 +1627,12 @@ munmap_back:
1616 1627
1617 vma_link(mm, vma, prev, rb_link, rb_parent); 1628 vma_link(mm, vma, prev, rb_link, rb_parent);
1618 /* Once vma denies write, undo our temporary denial count */ 1629 /* Once vma denies write, undo our temporary denial count */
1619 if (vm_flags & VM_DENYWRITE) 1630 if (file) {
1620 allow_write_access(file); 1631 if (vm_flags & VM_SHARED)
1632 mapping_unmap_writable(file->f_mapping);
1633 if (vm_flags & VM_DENYWRITE)
1634 allow_write_access(file);
1635 }
1621 file = vma->vm_file; 1636 file = vma->vm_file;
1622out: 1637out:
1623 perf_event_mmap(vma); 1638 perf_event_mmap(vma);
@@ -1646,14 +1661,17 @@ out:
1646 return addr; 1661 return addr;
1647 1662
1648unmap_and_free_vma: 1663unmap_and_free_vma:
1649 if (vm_flags & VM_DENYWRITE)
1650 allow_write_access(file);
1651 vma->vm_file = NULL; 1664 vma->vm_file = NULL;
1652 fput(file); 1665 fput(file);
1653 1666
1654 /* Undo any partial mapping done by a device driver. */ 1667 /* Undo any partial mapping done by a device driver. */
1655 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 1668 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1656 charged = 0; 1669 charged = 0;
1670 if (vm_flags & VM_SHARED)
1671 mapping_unmap_writable(file->f_mapping);
1672allow_write_and_free_vma:
1673 if (vm_flags & VM_DENYWRITE)
1674 allow_write_access(file);
1657free_vma: 1675free_vma:
1658 kmem_cache_free(vm_area_cachep, vma); 1676 kmem_cache_free(vm_area_cachep, vma);
1659unacct_error: 1677unacct_error: