aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h114
1 files changed, 86 insertions, 28 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 29f02d8513f6..85854b867463 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/errno.h> 5#include <linux/errno.h>
6#include <linux/capability.h>
6 7
7#ifdef __KERNEL__ 8#ifdef __KERNEL__
8 9
@@ -13,6 +14,7 @@
13#include <linux/rbtree.h> 14#include <linux/rbtree.h>
14#include <linux/prio_tree.h> 15#include <linux/prio_tree.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/mutex.h>
16 18
17struct mempolicy; 19struct mempolicy;
18struct anon_vma; 20struct anon_vma;
@@ -163,7 +165,7 @@ extern unsigned int kobjsize(const void *objp);
163#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 165#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
164#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 166#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
165#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 167#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
166#define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */ 168#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
167 169
168#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 170#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
169#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 171#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -223,24 +225,27 @@ struct page {
223 * & limit reverse map searches. 225 * & limit reverse map searches.
224 */ 226 */
225 union { 227 union {
226 unsigned long private; /* Mapping-private opaque data: 228 struct {
227 * usually used for buffer_heads 229 unsigned long private; /* Mapping-private opaque data:
228 * if PagePrivate set; used for 230 * usually used for buffer_heads
229 * swp_entry_t if PageSwapCache 231 * if PagePrivate set; used for
230 * When page is free, this indicates 232 * swp_entry_t if PageSwapCache.
231 * order in the buddy system. 233 * When page is free, this
232 */ 234 * indicates order in the buddy
235 * system.
236 */
237 struct address_space *mapping; /* If low bit clear, points to
238 * inode address_space, or NULL.
239 * If page mapped as anonymous
240 * memory, low bit is set, and
241 * it points to anon_vma object:
242 * see PAGE_MAPPING_ANON below.
243 */
244 };
233#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 245#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
234 spinlock_t ptl; 246 spinlock_t ptl;
235#endif 247#endif
236 } u; 248 };
237 struct address_space *mapping; /* If low bit clear, points to
238 * inode address_space, or NULL.
239 * If page mapped as anonymous
240 * memory, low bit is set, and
241 * it points to anon_vma object:
242 * see PAGE_MAPPING_ANON below.
243 */
244 pgoff_t index; /* Our offset within mapping. */ 249 pgoff_t index; /* Our offset within mapping. */
245 struct list_head lru; /* Pageout list, eg. active_list 250 struct list_head lru; /* Pageout list, eg. active_list
246 * protected by zone->lru_lock ! 251 * protected by zone->lru_lock !
@@ -261,8 +266,8 @@ struct page {
261#endif /* WANT_PAGE_VIRTUAL */ 266#endif /* WANT_PAGE_VIRTUAL */
262}; 267};
263 268
264#define page_private(page) ((page)->u.private) 269#define page_private(page) ((page)->private)
265#define set_page_private(page, v) ((page)->u.private = (v)) 270#define set_page_private(page, v) ((page)->private = (v))
266 271
267/* 272/*
268 * FIXME: take this include out, include page-flags.h in 273 * FIXME: take this include out, include page-flags.h in
@@ -308,7 +313,7 @@ struct page {
308 */ 313 */
309#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) 314#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
310 315
311#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1) 316#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
312#define __put_page(p) atomic_dec(&(p)->_count) 317#define __put_page(p) atomic_dec(&(p)->_count)
313 318
314extern void FASTCALL(__page_cache_release(struct page *)); 319extern void FASTCALL(__page_cache_release(struct page *));
@@ -507,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone,
507extern struct page *mem_map; 512extern struct page *mem_map;
508#endif 513#endif
509 514
510static inline void *lowmem_page_address(struct page *page) 515static __always_inline void *lowmem_page_address(struct page *page)
511{ 516{
512 return __va(page_to_pfn(page) << PAGE_SHIFT); 517 return __va(page_to_pfn(page) << PAGE_SHIFT);
513} 518}
@@ -634,14 +639,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
634int shmem_lock(struct file *file, int lock, struct user_struct *user); 639int shmem_lock(struct file *file, int lock, struct user_struct *user);
635#else 640#else
636#define shmem_nopage filemap_nopage 641#define shmem_nopage filemap_nopage
637#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */ 642
638#define shmem_set_policy(a, b) (0) 643static inline int shmem_lock(struct file *file, int lock,
639#define shmem_get_policy(a, b) (NULL) 644 struct user_struct *user)
645{
646 return 0;
647}
648
649static inline int shmem_set_policy(struct vm_area_struct *vma,
650 struct mempolicy *new)
651{
652 return 0;
653}
654
655static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
656 unsigned long addr)
657{
658 return NULL;
659}
640#endif 660#endif
641struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 661struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
662extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
642 663
643int shmem_zero_setup(struct vm_area_struct *); 664int shmem_zero_setup(struct vm_area_struct *);
644 665
666#ifndef CONFIG_MMU
667extern unsigned long shmem_get_unmapped_area(struct file *file,
668 unsigned long addr,
669 unsigned long len,
670 unsigned long pgoff,
671 unsigned long flags);
672#endif
673
645static inline int can_do_mlock(void) 674static inline int can_do_mlock(void)
646{ 675{
647 if (capable(CAP_IPC_LOCK)) 676 if (capable(CAP_IPC_LOCK))
@@ -690,14 +719,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
690} 719}
691 720
692extern int vmtruncate(struct inode * inode, loff_t offset); 721extern int vmtruncate(struct inode * inode, loff_t offset);
722extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
693extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); 723extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
694extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); 724extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
695extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
696 725
697static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) 726#ifdef CONFIG_MMU
727extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
728 unsigned long address, int write_access);
729
730static inline int handle_mm_fault(struct mm_struct *mm,
731 struct vm_area_struct *vma, unsigned long address,
732 int write_access)
698{ 733{
699 return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE); 734 return __handle_mm_fault(mm, vma, address, write_access) &
735 (~VM_FAULT_WRITE);
700} 736}
737#else
738static inline int handle_mm_fault(struct mm_struct *mm,
739 struct vm_area_struct *vma, unsigned long address,
740 int write_access)
741{
742 /* should never happen if there's no MMU */
743 BUG();
744 return VM_FAULT_SIGBUS;
745}
746#endif
701 747
702extern int make_pages_present(unsigned long addr, unsigned long end); 748extern int make_pages_present(unsigned long addr, unsigned long end);
703extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 749extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -774,7 +820,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
774 * overflow into the next struct page (as it might with DEBUG_SPINLOCK). 820 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
775 * When freeing, reset page->mapping so free_pages_check won't complain. 821 * When freeing, reset page->mapping so free_pages_check won't complain.
776 */ 822 */
777#define __pte_lockptr(page) &((page)->u.ptl) 823#define __pte_lockptr(page) &((page)->ptl)
778#define pte_lock_init(_page) do { \ 824#define pte_lock_init(_page) do { \
779 spin_lock_init(__pte_lockptr(_page)); \ 825 spin_lock_init(__pte_lockptr(_page)); \
780} while (0) 826} while (0)
@@ -896,6 +942,8 @@ extern unsigned long do_brk(unsigned long, unsigned long);
896/* filemap.c */ 942/* filemap.c */
897extern unsigned long page_unuse(struct page *); 943extern unsigned long page_unuse(struct page *);
898extern void truncate_inode_pages(struct address_space *, loff_t); 944extern void truncate_inode_pages(struct address_space *, loff_t);
945extern void truncate_inode_pages_range(struct address_space *,
946 loff_t lstart, loff_t lend);
899 947
900/* generic vm_area_ops exported for stackable file systems */ 948/* generic vm_area_ops exported for stackable file systems */
901extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); 949extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
@@ -978,6 +1026,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
978static inline void 1026static inline void
979kernel_map_pages(struct page *page, int numpages, int enable) 1027kernel_map_pages(struct page *page, int numpages, int enable)
980{ 1028{
1029 if (!PageHighMem(page) && !enable)
1030 mutex_debug_check_no_locks_freed(page_address(page),
1031 numpages * PAGE_SIZE);
981} 1032}
982#endif 1033#endif
983 1034
@@ -993,5 +1044,12 @@ int in_gate_area_no_task(unsigned long addr);
993/* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */ 1044/* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */
994#define OOM_DISABLE -17 1045#define OOM_DISABLE -17
995 1046
1047int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1048 void __user *, size_t *, loff_t *);
1049int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1050 unsigned long lru_pages);
1051void drop_pagecache(void);
1052void drop_slab(void);
1053
996#endif /* __KERNEL__ */ 1054#endif /* __KERNEL__ */
997#endif /* _LINUX_MM_H */ 1055#endif /* _LINUX_MM_H */