diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2012-03-21 19:34:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:59 -0400 |
commit | 90481622d75715bfcb68501280a917dbfe516029 (patch) | |
tree | 63f7d9e4455366ab326ee74e6b39acf76b618fcf /include/linux/hugetlb.h | |
parent | a1d776ee3147cec2a54a645e92eb2e3e2f65a137 (diff) |
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <abarry@cray.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r-- | include/linux/hugetlb.h | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 7adc4923e7ac..cf0181738c9e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -14,6 +14,15 @@ struct user_struct; | |||
14 | #include <linux/shm.h> | 14 | #include <linux/shm.h> |
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | 16 | ||
17 | struct hugepage_subpool { | ||
18 | spinlock_t lock; | ||
19 | long count; | ||
20 | long max_hpages, used_hpages; | ||
21 | }; | ||
22 | |||
23 | struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); | ||
24 | void hugepage_put_subpool(struct hugepage_subpool *spool); | ||
25 | |||
17 | int PageHuge(struct page *page); | 26 | int PageHuge(struct page *page); |
18 | 27 | ||
19 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | 28 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
@@ -129,12 +138,11 @@ enum { | |||
129 | 138 | ||
130 | #ifdef CONFIG_HUGETLBFS | 139 | #ifdef CONFIG_HUGETLBFS |
131 | struct hugetlbfs_sb_info { | 140 | struct hugetlbfs_sb_info { |
132 | long max_blocks; /* blocks allowed */ | ||
133 | long free_blocks; /* blocks free */ | ||
134 | long max_inodes; /* inodes allowed */ | 141 | long max_inodes; /* inodes allowed */ |
135 | long free_inodes; /* inodes free */ | 142 | long free_inodes; /* inodes free */ |
136 | spinlock_t stat_lock; | 143 | spinlock_t stat_lock; |
137 | struct hstate *hstate; | 144 | struct hstate *hstate; |
145 | struct hugepage_subpool *spool; | ||
138 | }; | 146 | }; |
139 | 147 | ||
140 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) | 148 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
@@ -146,8 +154,6 @@ extern const struct file_operations hugetlbfs_file_operations; | |||
146 | extern const struct vm_operations_struct hugetlb_vm_ops; | 154 | extern const struct vm_operations_struct hugetlb_vm_ops; |
147 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, | 155 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
148 | struct user_struct **user, int creat_flags); | 156 | struct user_struct **user, int creat_flags); |
149 | int hugetlb_get_quota(struct address_space *mapping, long delta); | ||
150 | void hugetlb_put_quota(struct address_space *mapping, long delta); | ||
151 | 157 | ||
152 | static inline int is_file_hugepages(struct file *file) | 158 | static inline int is_file_hugepages(struct file *file) |
153 | { | 159 | { |