diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /include/linux/rmap.h | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r-- | include/linux/rmap.h | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b35bc0e19cd9..bf116d0dbf23 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, | |||
83 | /* | 83 | /* |
84 | * Called from mm/vmscan.c to handle paging out | 84 | * Called from mm/vmscan.c to handle paging out |
85 | */ | 85 | */ |
86 | int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); | 86 | int page_referenced(struct page *, int is_locked, |
87 | struct mem_cgroup *cnt, unsigned long *vm_flags); | ||
87 | int try_to_unmap(struct page *, int ignore_refs); | 88 | int try_to_unmap(struct page *, int ignore_refs); |
88 | 89 | ||
89 | /* | 90 | /* |
@@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |||
105 | */ | 106 | */ |
106 | int page_mkclean(struct page *); | 107 | int page_mkclean(struct page *); |
107 | 108 | ||
108 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
109 | /* | 109 | /* |
110 | * called in munlock()/munmap() path to check for other vmas holding | 110 | * called in munlock()/munmap() path to check for other vmas holding |
111 | * the page mlocked. | 111 | * the page mlocked. |
112 | */ | 112 | */ |
113 | int try_to_munlock(struct page *); | 113 | int try_to_munlock(struct page *); |
114 | #else | ||
115 | static inline int try_to_munlock(struct page *page) | ||
116 | { | ||
117 | return 0; /* a.k.a. SWAP_SUCCESS */ | ||
118 | } | ||
119 | #endif | ||
120 | 114 | ||
121 | #else /* !CONFIG_MMU */ | 115 | #else /* !CONFIG_MMU */ |
122 | 116 | ||
@@ -124,7 +118,14 @@ static inline int try_to_munlock(struct page *page) | |||
124 | #define anon_vma_prepare(vma) (0) | 118 | #define anon_vma_prepare(vma) (0) |
125 | #define anon_vma_link(vma) do {} while (0) | 119 | #define anon_vma_link(vma) do {} while (0) |
126 | 120 | ||
127 | #define page_referenced(page,l,cnt) TestClearPageReferenced(page) | 121 | static inline int page_referenced(struct page *page, int is_locked, |
122 | struct mem_cgroup *cnt, | ||
123 | unsigned long *vm_flags) | ||
124 | { | ||
125 | *vm_flags = 0; | ||
126 | return TestClearPageReferenced(page); | ||
127 | } | ||
128 | |||
128 | #define try_to_unmap(page, refs) SWAP_FAIL | 129 | #define try_to_unmap(page, refs) SWAP_FAIL |
129 | 130 | ||
130 | static inline int page_mkclean(struct page *page) | 131 | static inline int page_mkclean(struct page *page) |