diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 27 | ||||
-rw-r--r-- | mm/util.c | 16 |
5 files changed, 42 insertions, 9 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index b53427ad30a3..57971d2ab848 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -213,6 +213,8 @@ config UNEVICTABLE_LRU | |||
213 | will use one page flag and increase the code size a little, | 213 | will use one page flag and increase the code size a little, |
214 | say Y unless you know what you are doing. | 214 | say Y unless you know what you are doing. |
215 | 215 | ||
216 | See Documentation/vm/unevictable-lru.txt for more information. | ||
217 | |||
216 | config HAVE_MLOCK | 218 | config HAVE_MLOCK |
217 | bool | 219 | bool |
218 | default y if MMU=y | 220 | default y if MMU=y |
diff --git a/mm/filemap.c b/mm/filemap.c index 2e2d38ebda4b..8bd498040f32 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -567,8 +567,8 @@ EXPORT_SYMBOL(wait_on_page_bit); | |||
567 | 567 | ||
568 | /** | 568 | /** |
569 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue | 569 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue |
570 | * @page - Page defining the wait queue of interest | 570 | * @page: Page defining the wait queue of interest |
571 | * @waiter - Waiter to add to the queue | 571 | * @waiter: Waiter to add to the queue |
572 | * | 572 | * |
573 | * Add an arbitrary @waiter to the wait queue for the nominated @page. | 573 | * Add an arbitrary @waiter to the wait queue for the nominated @page. |
574 | */ | 574 | */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2fc6d6c48238..e44fb0fbb80e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -932,7 +932,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
932 | if (unlikely(!mem)) | 932 | if (unlikely(!mem)) |
933 | return 0; | 933 | return 0; |
934 | 934 | ||
935 | VM_BUG_ON(mem_cgroup_is_obsolete(mem)); | 935 | VM_BUG_ON(!mem || mem_cgroup_is_obsolete(mem)); |
936 | 936 | ||
937 | while (1) { | 937 | while (1) { |
938 | int ret; | 938 | int ret; |
diff --git a/mm/shmem.c b/mm/shmem.c index d94d2e9146bc..f9cb20ebb990 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/vfs.h> | 25 | #include <linux/vfs.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/pagemap.h> | ||
27 | #include <linux/file.h> | 28 | #include <linux/file.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
@@ -43,7 +44,6 @@ static struct vfsmount *shm_mnt; | |||
43 | #include <linux/exportfs.h> | 44 | #include <linux/exportfs.h> |
44 | #include <linux/generic_acl.h> | 45 | #include <linux/generic_acl.h> |
45 | #include <linux/mman.h> | 46 | #include <linux/mman.h> |
46 | #include <linux/pagemap.h> | ||
47 | #include <linux/string.h> | 47 | #include <linux/string.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <linux/backing-dev.h> | 49 | #include <linux/backing-dev.h> |
@@ -65,13 +65,28 @@ static struct vfsmount *shm_mnt; | |||
65 | #include <asm/div64.h> | 65 | #include <asm/div64.h> |
66 | #include <asm/pgtable.h> | 66 | #include <asm/pgtable.h> |
67 | 67 | ||
68 | /* | ||
69 | * The maximum size of a shmem/tmpfs file is limited by the maximum size of | ||
70 | * its triple-indirect swap vector - see illustration at shmem_swp_entry(). | ||
71 | * | ||
72 | * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, | ||
73 | * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum | ||
74 | * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, | ||
75 | * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. | ||
76 | * | ||
77 | * We use / and * instead of shifts in the definitions below, so that the swap | ||
78 | * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. | ||
79 | */ | ||
68 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | 80 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) |
69 | #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | 81 | #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) |
70 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | ||
71 | 82 | ||
72 | #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | 83 | #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) |
73 | #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) | 84 | #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) |
74 | 85 | ||
86 | #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) | ||
87 | #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) | ||
88 | |||
89 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | ||
75 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | 90 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) |
76 | 91 | ||
77 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | 92 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ |
@@ -2581,7 +2596,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
2581 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) | 2596 | #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) |
2582 | #define shmem_acct_size(flags, size) 0 | 2597 | #define shmem_acct_size(flags, size) 0 |
2583 | #define shmem_unacct_size(flags, size) do {} while (0) | 2598 | #define shmem_unacct_size(flags, size) do {} while (0) |
2584 | #define SHMEM_MAX_BYTES LLONG_MAX | 2599 | #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE |
2585 | 2600 | ||
2586 | #endif /* CONFIG_SHMEM */ | 2601 | #endif /* CONFIG_SHMEM */ |
2587 | 2602 | ||
@@ -223,6 +223,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
223 | } | 223 | } |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | /** | ||
227 | * get_user_pages_fast() - pin user pages in memory | ||
228 | * @start: starting user address | ||
229 | * @nr_pages: number of pages from start to pin | ||
230 | * @write: whether pages will be written to | ||
231 | * @pages: array that receives pointers to the pages pinned. | ||
232 | * Should be at least nr_pages long. | ||
233 | * | ||
234 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | ||
235 | * If not successful, it will fall back to taking the lock and | ||
236 | * calling get_user_pages(). | ||
237 | * | ||
238 | * Returns number of pages pinned. This may be fewer than the number | ||
239 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
240 | * were pinned, returns -errno. | ||
241 | */ | ||
226 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, | 242 | int __attribute__((weak)) get_user_pages_fast(unsigned long start, |
227 | int nr_pages, int write, struct page **pages) | 243 | int nr_pages, int write, struct page **pages) |
228 | { | 244 | { |