aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:40 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:42 -0400
commit4c21e2f2441dc5fbb957b030333f5a3f2d02dea7 (patch)
tree1f76d33bb1d76221c6424bc5fed080a4f91349a6
parentb38c6845b695141259019e2b7c0fe6c32a6e720d (diff)
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with a many-threaded application which concurrently initializes different parts of a large anonymous area. This patch corrects that, by using a separate spinlock per page table page, to guard the page table entries in that page, instead of using the mm's single page_table_lock. (But even then, page_table_lock is still used to guard page table allocation, and anon_vma allocation.) In this implementation, the spinlock is tucked inside the struct page of the page table page: with a BUILD_BUG_ON in case it overflows - which it would in the case of 32-bit PA-RISC with spinlock debugging enabled. Splitting the lock is not quite for free: another cacheline access. Ideally, I suppose we would use split ptlock only for multi-threaded processes on multi-cpu machines; but deciding that dynamically would have its own costs. So for now enable it by config, at some number of cpus - since the Kconfig language doesn't support inequalities, let preprocessor compare that with NR_CPUS. But I don't think it's worth being user-configurable: for good testing of both split and unsplit configs, split now at 4 cpus, and perhaps change that to 8 later. There is a benefit even for singly threaded processes: kswapd can be attacking one part of the mm while another part is busy faulting. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/arm/mm/mm-armv.c1
-rw-r--r--arch/frv/mm/pgalloc.c4
-rw-r--r--arch/i386/mm/pgtable.c8
-rw-r--r--arch/um/kernel/skas/mmu.c1
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/jfs/jfs_metapage.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c7
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/mm.h46
-rw-r--r--kernel/kexec.c4
-rw-r--r--mm/Kconfig13
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memory.c24
-rw-r--r--mm/mremap.c11
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c22
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/vmscan.c2
23 files changed, 138 insertions, 79 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 60f3e039bac2..1221fdde1769 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -229,6 +229,7 @@ void free_pgd_slow(pgd_t *pgd)
229 pte = pmd_page(*pmd); 229 pte = pmd_page(*pmd);
230 pmd_clear(pmd); 230 pmd_clear(pmd);
231 dec_page_state(nr_page_table_pages); 231 dec_page_state(nr_page_table_pages);
232 pte_lock_deinit(pte);
232 pte_free(pte); 233 pte_free(pte);
233 pmd_free(pmd); 234 pmd_free(pmd);
234free: 235free:
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 4eaec0f3525b..2c67dfe5a6b3 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -87,14 +87,14 @@ static inline void pgd_list_add(pgd_t *pgd)
87 if (pgd_list) 87 if (pgd_list)
88 pgd_list->private = (unsigned long) &page->index; 88 pgd_list->private = (unsigned long) &page->index;
89 pgd_list = page; 89 pgd_list = page;
90 page->private = (unsigned long) &pgd_list; 90 set_page_private(page, (unsigned long)&pgd_list);
91} 91}
92 92
93static inline void pgd_list_del(pgd_t *pgd) 93static inline void pgd_list_del(pgd_t *pgd)
94{ 94{
95 struct page *next, **pprev, *page = virt_to_page(pgd); 95 struct page *next, **pprev, *page = virt_to_page(pgd);
96 next = (struct page *) page->index; 96 next = (struct page *) page->index;
97 pprev = (struct page **) page->private; 97 pprev = (struct page **)page_private(page);
98 *pprev = next; 98 *pprev = next;
99 if (next) 99 if (next)
100 next->private = (unsigned long) pprev; 100 next->private = (unsigned long) pprev;
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dcdce2c6c532..39c099f15b5f 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -188,19 +188,19 @@ static inline void pgd_list_add(pgd_t *pgd)
188 struct page *page = virt_to_page(pgd); 188 struct page *page = virt_to_page(pgd);
189 page->index = (unsigned long)pgd_list; 189 page->index = (unsigned long)pgd_list;
190 if (pgd_list) 190 if (pgd_list)
191 pgd_list->private = (unsigned long)&page->index; 191 set_page_private(pgd_list, (unsigned long)&page->index);
192 pgd_list = page; 192 pgd_list = page;
193 page->private = (unsigned long)&pgd_list; 193 set_page_private(page, (unsigned long)&pgd_list);
194} 194}
195 195
196static inline void pgd_list_del(pgd_t *pgd) 196static inline void pgd_list_del(pgd_t *pgd)
197{ 197{
198 struct page *next, **pprev, *page = virt_to_page(pgd); 198 struct page *next, **pprev, *page = virt_to_page(pgd);
199 next = (struct page *)page->index; 199 next = (struct page *)page->index;
200 pprev = (struct page **)page->private; 200 pprev = (struct page **)page_private(page);
201 *pprev = next; 201 *pprev = next;
202 if (next) 202 if (next)
203 next->private = (unsigned long)pprev; 203 set_page_private(next, (unsigned long)pprev);
204} 204}
205 205
206void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 206void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 02cf36e0331a..9e5e39cea821 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -144,6 +144,7 @@ void destroy_context_skas(struct mm_struct *mm)
144 144
145 if(!proc_mm || !ptrace_faultinfo){ 145 if(!proc_mm || !ptrace_faultinfo){
146 free_page(mmu->id.stack); 146 free_page(mmu->id.stack);
147 pte_lock_deinit(virt_to_page(mmu->last_page_table));
147 pte_free_kernel((pte_t *) mmu->last_page_table); 148 pte_free_kernel((pte_t *) mmu->last_page_table);
148 dec_page_state(nr_page_table_pages); 149 dec_page_state(nr_page_table_pages);
149#ifdef CONFIG_3_LEVEL_PGTABLES 150#ifdef CONFIG_3_LEVEL_PGTABLES
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 0d576987ec67..4975c9c193dd 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -291,8 +291,8 @@ static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
291 cachefs_uncache_page(vnode->cache, page); 291 cachefs_uncache_page(vnode->cache, page);
292#endif 292#endif
293 293
294 pageio = (struct cachefs_page *) page->private; 294 pageio = (struct cachefs_page *) page_private(page);
295 page->private = 0; 295 set_page_private(page, 0);
296 ClearPagePrivate(page); 296 ClearPagePrivate(page);
297 297
298 if (pageio) 298 if (pageio)
diff --git a/fs/buffer.c b/fs/buffer.c
index b1667986442f..2066e4cb700c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -96,7 +96,7 @@ static void
96__clear_page_buffers(struct page *page) 96__clear_page_buffers(struct page *page)
97{ 97{
98 ClearPagePrivate(page); 98 ClearPagePrivate(page);
99 page->private = 0; 99 set_page_private(page, 0);
100 page_cache_release(page); 100 page_cache_release(page);
101} 101}
102 102
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 26091a5f88d4..8a53981f9f27 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -86,7 +86,7 @@ struct meta_anchor {
86 atomic_t io_count; 86 atomic_t io_count;
87 struct metapage *mp[MPS_PER_PAGE]; 87 struct metapage *mp[MPS_PER_PAGE];
88}; 88};
89#define mp_anchor(page) ((struct meta_anchor *)page->private) 89#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
90 90
91static inline struct metapage *page_to_mp(struct page *page, uint offset) 91static inline struct metapage *page_to_mp(struct page *page, uint offset)
92{ 92{
@@ -108,7 +108,7 @@ static inline int insert_metapage(struct page *page, struct metapage *mp)
108 if (!a) 108 if (!a)
109 return -ENOMEM; 109 return -ENOMEM;
110 memset(a, 0, sizeof(struct meta_anchor)); 110 memset(a, 0, sizeof(struct meta_anchor));
111 page->private = (unsigned long)a; 111 set_page_private(page, (unsigned long)a);
112 SetPagePrivate(page); 112 SetPagePrivate(page);
113 kmap(page); 113 kmap(page);
114 } 114 }
@@ -136,7 +136,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
136 a->mp[index] = NULL; 136 a->mp[index] = NULL;
137 if (--a->mp_count == 0) { 137 if (--a->mp_count == 0) {
138 kfree(a); 138 kfree(a);
139 page->private = 0; 139 set_page_private(page, 0);
140 ClearPagePrivate(page); 140 ClearPagePrivate(page);
141 kunmap(page); 141 kunmap(page);
142 } 142 }
@@ -156,13 +156,13 @@ static inline void dec_io(struct page *page, void (*handler) (struct page *))
156#else 156#else
157static inline struct metapage *page_to_mp(struct page *page, uint offset) 157static inline struct metapage *page_to_mp(struct page *page, uint offset)
158{ 158{
159 return PagePrivate(page) ? (struct metapage *)page->private : NULL; 159 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
160} 160}
161 161
162static inline int insert_metapage(struct page *page, struct metapage *mp) 162static inline int insert_metapage(struct page *page, struct metapage *mp)
163{ 163{
164 if (mp) { 164 if (mp) {
165 page->private = (unsigned long)mp; 165 set_page_private(page, (unsigned long)mp);
166 SetPagePrivate(page); 166 SetPagePrivate(page);
167 kmap(page); 167 kmap(page);
168 } 168 }
@@ -171,7 +171,7 @@ static inline int insert_metapage(struct page *page, struct metapage *mp)
171 171
172static inline void remove_metapage(struct page *page, struct metapage *mp) 172static inline void remove_metapage(struct page *page, struct metapage *mp)
173{ 173{
174 page->private = 0; 174 set_page_private(page, 0);
175 ClearPagePrivate(page); 175 ClearPagePrivate(page);
176 kunmap(page); 176 kunmap(page);
177} 177}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ba4767c04adf..4cd46abe8434 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -181,8 +181,9 @@ set_page_region(
181 size_t offset, 181 size_t offset,
182 size_t length) 182 size_t length)
183{ 183{
184 page->private |= page_region_mask(offset, length); 184 set_page_private(page,
185 if (page->private == ~0UL) 185 page_private(page) | page_region_mask(offset, length));
186 if (page_private(page) == ~0UL)
186 SetPageUptodate(page); 187 SetPageUptodate(page);
187} 188}
188 189
@@ -194,7 +195,7 @@ test_page_region(
194{ 195{
195 unsigned long mask = page_region_mask(offset, length); 196 unsigned long mask = page_region_mask(offset, length);
196 197
197 return (mask && (page->private & mask) == mask); 198 return (mask && (page_private(page) & mask) == mask);
198} 199}
199 200
200/* 201/*
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 88af42f5e04a..c937d6e65502 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -126,8 +126,8 @@ BUFFER_FNS(Eopnotsupp, eopnotsupp)
126/* If we *know* page->private refers to buffer_heads */ 126/* If we *know* page->private refers to buffer_heads */
127#define page_buffers(page) \ 127#define page_buffers(page) \
128 ({ \ 128 ({ \
129 BUG_ON(!PagePrivate(page)); \ 129 BUG_ON(!PagePrivate(page)); \
130 ((struct buffer_head *)(page)->private); \ 130 ((struct buffer_head *)page_private(page)); \
131 }) 131 })
132#define page_has_buffers(page) PagePrivate(page) 132#define page_has_buffers(page) PagePrivate(page)
133 133
@@ -219,7 +219,7 @@ static inline void attach_page_buffers(struct page *page,
219{ 219{
220 page_cache_get(page); 220 page_cache_get(page);
221 SetPagePrivate(page); 221 SetPagePrivate(page);
222 page->private = (unsigned long)head; 222 set_page_private(page, (unsigned long)head);
223} 223}
224 224
225static inline void get_bh(struct buffer_head *bh) 225static inline void get_bh(struct buffer_head *bh)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e8d1424153bb..8a514eca40d5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -226,13 +226,18 @@ struct page {
226 * to show when page is mapped 226 * to show when page is mapped
227 * & limit reverse map searches. 227 * & limit reverse map searches.
228 */ 228 */
229 unsigned long private; /* Mapping-private opaque data: 229 union {
230 unsigned long private; /* Mapping-private opaque data:
230 * usually used for buffer_heads 231 * usually used for buffer_heads
231 * if PagePrivate set; used for 232 * if PagePrivate set; used for
232 * swp_entry_t if PageSwapCache 233 * swp_entry_t if PageSwapCache
233 * When page is free, this indicates 234 * When page is free, this indicates
234 * order in the buddy system. 235 * order in the buddy system.
235 */ 236 */
237#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
238 spinlock_t ptl;
239#endif
240 } u;
236 struct address_space *mapping; /* If low bit clear, points to 241 struct address_space *mapping; /* If low bit clear, points to
237 * inode address_space, or NULL. 242 * inode address_space, or NULL.
238 * If page mapped as anonymous 243 * If page mapped as anonymous
@@ -260,6 +265,9 @@ struct page {
260#endif /* WANT_PAGE_VIRTUAL */ 265#endif /* WANT_PAGE_VIRTUAL */
261}; 266};
262 267
268#define page_private(page) ((page)->u.private)
269#define set_page_private(page, v) ((page)->u.private = (v))
270
263/* 271/*
264 * FIXME: take this include out, include page-flags.h in 272 * FIXME: take this include out, include page-flags.h in
265 * files which need it (119 of them) 273 * files which need it (119 of them)
@@ -311,17 +319,17 @@ extern void FASTCALL(__page_cache_release(struct page *));
311 319
312#ifdef CONFIG_HUGETLB_PAGE 320#ifdef CONFIG_HUGETLB_PAGE
313 321
314static inline int page_count(struct page *p) 322static inline int page_count(struct page *page)
315{ 323{
316 if (PageCompound(p)) 324 if (PageCompound(page))
317 p = (struct page *)p->private; 325 page = (struct page *)page_private(page);
318 return atomic_read(&(p)->_count) + 1; 326 return atomic_read(&page->_count) + 1;
319} 327}
320 328
321static inline void get_page(struct page *page) 329static inline void get_page(struct page *page)
322{ 330{
323 if (unlikely(PageCompound(page))) 331 if (unlikely(PageCompound(page)))
324 page = (struct page *)page->private; 332 page = (struct page *)page_private(page);
325 atomic_inc(&page->_count); 333 atomic_inc(&page->_count);
326} 334}
327 335
@@ -587,7 +595,7 @@ static inline int PageAnon(struct page *page)
587static inline pgoff_t page_index(struct page *page) 595static inline pgoff_t page_index(struct page *page)
588{ 596{
589 if (unlikely(PageSwapCache(page))) 597 if (unlikely(PageSwapCache(page)))
590 return page->private; 598 return page_private(page);
591 return page->index; 599 return page->index;
592} 600}
593 601
@@ -779,9 +787,31 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
779} 787}
780#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 788#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
781 789
790#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
791/*
792 * We tuck a spinlock to guard each pagetable page into its struct page,
793 * at page->private, with BUILD_BUG_ON to make sure that this will not
794 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
795 * When freeing, reset page->mapping so free_pages_check won't complain.
796 */
797#define __pte_lockptr(page) &((page)->u.ptl)
798#define pte_lock_init(_page) do { \
799 spin_lock_init(__pte_lockptr(_page)); \
800} while (0)
801#define pte_lock_deinit(page) ((page)->mapping = NULL)
802#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
803#else
804/*
805 * We use mm->page_table_lock to guard all pagetable pages of the mm.
806 */
807#define pte_lock_init(page) do {} while (0)
808#define pte_lock_deinit(page) do {} while (0)
809#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
810#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
811
782#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 812#define pte_offset_map_lock(mm, pmd, address, ptlp) \
783({ \ 813({ \
784 spinlock_t *__ptl = &(mm)->page_table_lock; \ 814 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
785 pte_t *__pte = pte_offset_map(pmd, address); \ 815 pte_t *__pte = pte_offset_map(pmd, address); \
786 *(ptlp) = __ptl; \ 816 *(ptlp) = __ptl; \
787 spin_lock(__ptl); \ 817 spin_lock(__ptl); \
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 36c5d9cd4cc1..2c95848fbce8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -334,7 +334,7 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
334 if (pages) { 334 if (pages) {
335 unsigned int count, i; 335 unsigned int count, i;
336 pages->mapping = NULL; 336 pages->mapping = NULL;
337 pages->private = order; 337 set_page_private(pages, order);
338 count = 1 << order; 338 count = 1 << order;
339 for (i = 0; i < count; i++) 339 for (i = 0; i < count; i++)
340 SetPageReserved(pages + i); 340 SetPageReserved(pages + i);
@@ -347,7 +347,7 @@ static void kimage_free_pages(struct page *page)
347{ 347{
348 unsigned int order, count, i; 348 unsigned int order, count, i;
349 349
350 order = page->private; 350 order = page_private(page);
351 count = 1 << order; 351 count = 1 << order;
352 for (i = 0; i < count; i++) 352 for (i = 0; i < count; i++)
353 ClearPageReserved(page + i); 353 ClearPageReserved(page + i);
diff --git a/mm/Kconfig b/mm/Kconfig
index 391ffc54d136..f35a550ba4b9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -111,3 +111,16 @@ config SPARSEMEM_STATIC
111config SPARSEMEM_EXTREME 111config SPARSEMEM_EXTREME
112 def_bool y 112 def_bool y
113 depends on SPARSEMEM && !SPARSEMEM_STATIC 113 depends on SPARSEMEM && !SPARSEMEM_STATIC
114
115# Heavily threaded applications may benefit from splitting the mm-wide
116# page_table_lock, so that faults on different parts of the user address
117# space can be handled with less contention: split it at this NR_CPUS.
118# Default to 4 for wider testing, though 8 might be more appropriate.
119# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
120# PA-RISC's debug spinlock_t is too large for the 32-bit struct page.
121#
122config SPLIT_PTLOCK_CPUS
123 int
124 default "4096" if ARM && !CPU_CACHE_VIPT
125 default "4096" if PARISC && DEBUG_SPINLOCK && !64BIT
126 default "4"
diff --git a/mm/filemap.c b/mm/filemap.c
index 8aa344e88489..f560b41c8f61 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -152,7 +152,7 @@ static int sync_page(void *word)
152 * in the ->sync_page() methods make essential use of the 152 * in the ->sync_page() methods make essential use of the
153 * page_mapping(), merely passing the page down to the backing 153 * page_mapping(), merely passing the page down to the backing
154 * device's unplug functions when it's non-NULL, which in turn 154 * device's unplug functions when it's non-NULL, which in turn
155 * ignore it for all cases but swap, where only page->private is 155 * ignore it for all cases but swap, where only page_private(page) is
156 * of interest. When page_mapping() does go NULL, the entire 156 * of interest. When page_mapping() does go NULL, the entire
157 * call stack gracefully ignores the page and returns. 157 * call stack gracefully ignores the page and returns.
158 * -- wli 158 * -- wli
diff --git a/mm/memory.c b/mm/memory.c
index 8461e2dd91d7..e9ef599498b5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -114,6 +114,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
114{ 114{
115 struct page *page = pmd_page(*pmd); 115 struct page *page = pmd_page(*pmd);
116 pmd_clear(pmd); 116 pmd_clear(pmd);
117 pte_lock_deinit(page);
117 pte_free_tlb(tlb, page); 118 pte_free_tlb(tlb, page);
118 dec_page_state(nr_page_table_pages); 119 dec_page_state(nr_page_table_pages);
119 tlb->mm->nr_ptes--; 120 tlb->mm->nr_ptes--;
@@ -294,10 +295,12 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
294 if (!new) 295 if (!new)
295 return -ENOMEM; 296 return -ENOMEM;
296 297
298 pte_lock_init(new);
297 spin_lock(&mm->page_table_lock); 299 spin_lock(&mm->page_table_lock);
298 if (pmd_present(*pmd)) /* Another has populated it */ 300 if (pmd_present(*pmd)) { /* Another has populated it */
301 pte_lock_deinit(new);
299 pte_free(new); 302 pte_free(new);
300 else { 303 } else {
301 mm->nr_ptes++; 304 mm->nr_ptes++;
302 inc_page_state(nr_page_table_pages); 305 inc_page_state(nr_page_table_pages);
303 pmd_populate(mm, pmd, new); 306 pmd_populate(mm, pmd, new);
@@ -432,7 +435,7 @@ again:
432 if (!dst_pte) 435 if (!dst_pte)
433 return -ENOMEM; 436 return -ENOMEM;
434 src_pte = pte_offset_map_nested(src_pmd, addr); 437 src_pte = pte_offset_map_nested(src_pmd, addr);
435 src_ptl = &src_mm->page_table_lock; 438 src_ptl = pte_lockptr(src_mm, src_pmd);
436 spin_lock(src_ptl); 439 spin_lock(src_ptl);
437 440
438 do { 441 do {
@@ -1194,15 +1197,16 @@ EXPORT_SYMBOL(remap_pfn_range);
1194 * (but do_wp_page is only called after already making such a check; 1197 * (but do_wp_page is only called after already making such a check;
1195 * and do_anonymous_page and do_no_page can safely check later on). 1198 * and do_anonymous_page and do_no_page can safely check later on).
1196 */ 1199 */
1197static inline int pte_unmap_same(struct mm_struct *mm, 1200static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1198 pte_t *page_table, pte_t orig_pte) 1201 pte_t *page_table, pte_t orig_pte)
1199{ 1202{
1200 int same = 1; 1203 int same = 1;
1201#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1204#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1202 if (sizeof(pte_t) > sizeof(unsigned long)) { 1205 if (sizeof(pte_t) > sizeof(unsigned long)) {
1203 spin_lock(&mm->page_table_lock); 1206 spinlock_t *ptl = pte_lockptr(mm, pmd);
1207 spin_lock(ptl);
1204 same = pte_same(*page_table, orig_pte); 1208 same = pte_same(*page_table, orig_pte);
1205 spin_unlock(&mm->page_table_lock); 1209 spin_unlock(ptl);
1206 } 1210 }
1207#endif 1211#endif
1208 pte_unmap(page_table); 1212 pte_unmap(page_table);
@@ -1655,7 +1659,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1655 pte_t pte; 1659 pte_t pte;
1656 int ret = VM_FAULT_MINOR; 1660 int ret = VM_FAULT_MINOR;
1657 1661
1658 if (!pte_unmap_same(mm, page_table, orig_pte)) 1662 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
1659 goto out; 1663 goto out;
1660 1664
1661 entry = pte_to_swp_entry(orig_pte); 1665 entry = pte_to_swp_entry(orig_pte);
@@ -1773,7 +1777,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1773 page_cache_get(page); 1777 page_cache_get(page);
1774 entry = mk_pte(page, vma->vm_page_prot); 1778 entry = mk_pte(page, vma->vm_page_prot);
1775 1779
1776 ptl = &mm->page_table_lock; 1780 ptl = pte_lockptr(mm, pmd);
1777 spin_lock(ptl); 1781 spin_lock(ptl);
1778 if (!pte_none(*page_table)) 1782 if (!pte_none(*page_table))
1779 goto release; 1783 goto release;
@@ -1934,7 +1938,7 @@ static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
1934 pgoff_t pgoff; 1938 pgoff_t pgoff;
1935 int err; 1939 int err;
1936 1940
1937 if (!pte_unmap_same(mm, page_table, orig_pte)) 1941 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
1938 return VM_FAULT_MINOR; 1942 return VM_FAULT_MINOR;
1939 1943
1940 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { 1944 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
@@ -1992,7 +1996,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
1992 pte, pmd, write_access, entry); 1996 pte, pmd, write_access, entry);
1993 } 1997 }
1994 1998
1995 ptl = &mm->page_table_lock; 1999 ptl = pte_lockptr(mm, pmd);
1996 spin_lock(ptl); 2000 spin_lock(ptl);
1997 if (unlikely(!pte_same(*pte, entry))) 2001 if (unlikely(!pte_same(*pte, entry)))
1998 goto unlock; 2002 goto unlock;
diff --git a/mm/mremap.c b/mm/mremap.c
index 8de77b632a20..b535438c363c 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -72,7 +72,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72 struct address_space *mapping = NULL; 72 struct address_space *mapping = NULL;
73 struct mm_struct *mm = vma->vm_mm; 73 struct mm_struct *mm = vma->vm_mm;
74 pte_t *old_pte, *new_pte, pte; 74 pte_t *old_pte, *new_pte, pte;
75 spinlock_t *old_ptl; 75 spinlock_t *old_ptl, *new_ptl;
76 76
77 if (vma->vm_file) { 77 if (vma->vm_file) {
78 /* 78 /*
@@ -88,8 +88,15 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
88 new_vma->vm_truncate_count = 0; 88 new_vma->vm_truncate_count = 0;
89 } 89 }
90 90
91 /*
92 * We don't have to worry about the ordering of src and dst
93 * pte locks because exclusive mmap_sem prevents deadlock.
94 */
91 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 95 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
92 new_pte = pte_offset_map_nested(new_pmd, new_addr); 96 new_pte = pte_offset_map_nested(new_pmd, new_addr);
97 new_ptl = pte_lockptr(mm, new_pmd);
98 if (new_ptl != old_ptl)
99 spin_lock(new_ptl);
93 100
94 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 101 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
95 new_pte++, new_addr += PAGE_SIZE) { 102 new_pte++, new_addr += PAGE_SIZE) {
@@ -101,6 +108,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
101 set_pte_at(mm, new_addr, new_pte, pte); 108 set_pte_at(mm, new_addr, new_pte, pte);
102 } 109 }
103 110
111 if (new_ptl != old_ptl)
112 spin_unlock(new_ptl);
104 pte_unmap_nested(new_pte - 1); 113 pte_unmap_nested(new_pte - 1);
105 pte_unmap_unlock(old_pte - 1, old_ptl); 114 pte_unmap_unlock(old_pte - 1, old_ptl);
106 if (mapping) 115 if (mapping)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0541288ebf4b..a2995a5d012c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -154,7 +154,7 @@ static void prep_compound_page(struct page *page, unsigned long order)
154 struct page *p = page + i; 154 struct page *p = page + i;
155 155
156 SetPageCompound(p); 156 SetPageCompound(p);
157 p->private = (unsigned long)page; 157 set_page_private(p, (unsigned long)page);
158 } 158 }
159} 159}
160 160
@@ -174,7 +174,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
174 174
175 if (!PageCompound(p)) 175 if (!PageCompound(p))
176 bad_page(__FUNCTION__, page); 176 bad_page(__FUNCTION__, page);
177 if (p->private != (unsigned long)page) 177 if (page_private(p) != (unsigned long)page)
178 bad_page(__FUNCTION__, page); 178 bad_page(__FUNCTION__, page);
179 ClearPageCompound(p); 179 ClearPageCompound(p);
180 } 180 }
@@ -187,18 +187,18 @@ static void destroy_compound_page(struct page *page, unsigned long order)
187 * So, we don't need atomic page->flags operations here. 187 * So, we don't need atomic page->flags operations here.
188 */ 188 */
189static inline unsigned long page_order(struct page *page) { 189static inline unsigned long page_order(struct page *page) {
190 return page->private; 190 return page_private(page);
191} 191}
192 192
193static inline void set_page_order(struct page *page, int order) { 193static inline void set_page_order(struct page *page, int order) {
194 page->private = order; 194 set_page_private(page, order);
195 __SetPagePrivate(page); 195 __SetPagePrivate(page);
196} 196}
197 197
198static inline void rmv_page_order(struct page *page) 198static inline void rmv_page_order(struct page *page)
199{ 199{
200 __ClearPagePrivate(page); 200 __ClearPagePrivate(page);
201 page->private = 0; 201 set_page_private(page, 0);
202} 202}
203 203
204/* 204/*
@@ -238,7 +238,7 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
238 * (a) the buddy is free && 238 * (a) the buddy is free &&
239 * (b) the buddy is on the buddy system && 239 * (b) the buddy is on the buddy system &&
240 * (c) a page and its buddy have the same order. 240 * (c) a page and its buddy have the same order.
241 * for recording page's order, we use page->private and PG_private. 241 * for recording page's order, we use page_private(page) and PG_private.
242 * 242 *
243 */ 243 */
244static inline int page_is_buddy(struct page *page, int order) 244static inline int page_is_buddy(struct page *page, int order)
@@ -264,7 +264,7 @@ static inline int page_is_buddy(struct page *page, int order)
264 * parts of the VM system. 264 * parts of the VM system.
265 * At each level, we keep a list of pages, which are heads of continuous 265 * At each level, we keep a list of pages, which are heads of continuous
266 * free pages of length of (1 << order) and marked with PG_Private.Page's 266 * free pages of length of (1 << order) and marked with PG_Private.Page's
267 * order is recorded in page->private field. 267 * order is recorded in page_private(page) field.
268 * So when we are allocating or freeing one, we can derive the state of the 268 * So when we are allocating or freeing one, we can derive the state of the
269 * other. That is, if we allocate a small block, and both were 269 * other. That is, if we allocate a small block, and both were
270 * free, the remainder of the region must be split into blocks. 270 * free, the remainder of the region must be split into blocks.
@@ -463,7 +463,7 @@ static void prep_new_page(struct page *page, int order)
463 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 463 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
464 1 << PG_referenced | 1 << PG_arch_1 | 464 1 << PG_referenced | 1 << PG_arch_1 |
465 1 << PG_checked | 1 << PG_mappedtodisk); 465 1 << PG_checked | 1 << PG_mappedtodisk);
466 page->private = 0; 466 set_page_private(page, 0);
467 set_page_refs(page, order); 467 set_page_refs(page, order);
468 kernel_map_pages(page, 1 << order, 1); 468 kernel_map_pages(page, 1 << order, 1);
469} 469}
diff --git a/mm/page_io.c b/mm/page_io.c
index 330e00d6db00..bb2b0d53889c 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -91,7 +91,8 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
91 unlock_page(page); 91 unlock_page(page);
92 goto out; 92 goto out;
93 } 93 }
94 bio = get_swap_bio(GFP_NOIO, page->private, page, end_swap_bio_write); 94 bio = get_swap_bio(GFP_NOIO, page_private(page), page,
95 end_swap_bio_write);
95 if (bio == NULL) { 96 if (bio == NULL) {
96 set_page_dirty(page); 97 set_page_dirty(page);
97 unlock_page(page); 98 unlock_page(page);
@@ -115,7 +116,8 @@ int swap_readpage(struct file *file, struct page *page)
115 116
116 BUG_ON(!PageLocked(page)); 117 BUG_ON(!PageLocked(page));
117 ClearPageUptodate(page); 118 ClearPageUptodate(page);
118 bio = get_swap_bio(GFP_KERNEL, page->private, page, end_swap_bio_read); 119 bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
120 end_swap_bio_read);
119 if (bio == NULL) { 121 if (bio == NULL) {
120 unlock_page(page); 122 unlock_page(page);
121 ret = -ENOMEM; 123 ret = -ENOMEM;
diff --git a/mm/rmap.c b/mm/rmap.c
index a84bdfe582c0..a33e779d1bd8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,7 +274,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
274 return NULL; 274 return NULL;
275 } 275 }
276 276
277 ptl = &mm->page_table_lock; 277 ptl = pte_lockptr(mm, pmd);
278 spin_lock(ptl); 278 spin_lock(ptl);
279 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 279 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280 *ptlp = ptl; 280 *ptlp = ptl;
@@ -550,7 +550,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
550 update_hiwater_rss(mm); 550 update_hiwater_rss(mm);
551 551
552 if (PageAnon(page)) { 552 if (PageAnon(page)) {
553 swp_entry_t entry = { .val = page->private }; 553 swp_entry_t entry = { .val = page_private(page) };
554 /* 554 /*
555 * Store the swap location in the pte. 555 * Store the swap location in the pte.
556 * See handle_pte_fault() ... 556 * See handle_pte_fault() ...
diff --git a/mm/shmem.c b/mm/shmem.c
index 37777f4c11f8..dc25565a61e9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -71,9 +71,6 @@
71/* Pretend that each entry is of this size in directory's i_size */ 71/* Pretend that each entry is of this size in directory's i_size */
72#define BOGO_DIRENT_SIZE 20 72#define BOGO_DIRENT_SIZE 20
73 73
74/* Keep swapped page count in private field of indirect struct page */
75#define nr_swapped private
76
77/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 74/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
78enum sgp_type { 75enum sgp_type {
79 SGP_QUICK, /* don't try more than file page cache lookup */ 76 SGP_QUICK, /* don't try more than file page cache lookup */
@@ -324,8 +321,10 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns
324 321
325 entry->val = value; 322 entry->val = value;
326 info->swapped += incdec; 323 info->swapped += incdec;
327 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) 324 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
328 kmap_atomic_to_page(entry)->nr_swapped += incdec; 325 struct page *page = kmap_atomic_to_page(entry);
326 set_page_private(page, page_private(page) + incdec);
327 }
329} 328}
330 329
331/* 330/*
@@ -368,9 +367,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
368 367
369 spin_unlock(&info->lock); 368 spin_unlock(&info->lock);
370 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 369 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
371 if (page) { 370 if (page)
372 page->nr_swapped = 0; 371 set_page_private(page, 0);
373 }
374 spin_lock(&info->lock); 372 spin_lock(&info->lock);
375 373
376 if (!page) { 374 if (!page) {
@@ -561,7 +559,7 @@ static void shmem_truncate(struct inode *inode)
561 diroff = 0; 559 diroff = 0;
562 } 560 }
563 subdir = dir[diroff]; 561 subdir = dir[diroff];
564 if (subdir && subdir->nr_swapped) { 562 if (subdir && page_private(subdir)) {
565 size = limit - idx; 563 size = limit - idx;
566 if (size > ENTRIES_PER_PAGE) 564 if (size > ENTRIES_PER_PAGE)
567 size = ENTRIES_PER_PAGE; 565 size = ENTRIES_PER_PAGE;
@@ -572,10 +570,10 @@ static void shmem_truncate(struct inode *inode)
572 nr_swaps_freed += freed; 570 nr_swaps_freed += freed;
573 if (offset) 571 if (offset)
574 spin_lock(&info->lock); 572 spin_lock(&info->lock);
575 subdir->nr_swapped -= freed; 573 set_page_private(subdir, page_private(subdir) - freed);
576 if (offset) 574 if (offset)
577 spin_unlock(&info->lock); 575 spin_unlock(&info->lock);
578 BUG_ON(subdir->nr_swapped > offset); 576 BUG_ON(page_private(subdir) > offset);
579 } 577 }
580 if (offset) 578 if (offset)
581 offset = 0; 579 offset = 0;
@@ -743,7 +741,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
743 dir = shmem_dir_map(subdir); 741 dir = shmem_dir_map(subdir);
744 } 742 }
745 subdir = *dir; 743 subdir = *dir;
746 if (subdir && subdir->nr_swapped) { 744 if (subdir && page_private(subdir)) {
747 ptr = shmem_swp_map(subdir); 745 ptr = shmem_swp_map(subdir);
748 size = limit - idx; 746 size = limit - idx;
749 if (size > ENTRIES_PER_PAGE) 747 if (size > ENTRIES_PER_PAGE)
diff --git a/mm/swap.c b/mm/swap.c
index 21d15f99805c..b89512877ec2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -39,7 +39,7 @@ int page_cluster;
39void put_page(struct page *page) 39void put_page(struct page *page)
40{ 40{
41 if (unlikely(PageCompound(page))) { 41 if (unlikely(PageCompound(page))) {
42 page = (struct page *)page->private; 42 page = (struct page *)page_private(page);
43 if (put_page_testzero(page)) { 43 if (put_page_testzero(page)) {
44 void (*dtor)(struct page *page); 44 void (*dtor)(struct page *page);
45 45
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 132164f7d0a7..cafc1edcbeba 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -83,7 +83,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
83 page_cache_get(page); 83 page_cache_get(page);
84 SetPageLocked(page); 84 SetPageLocked(page);
85 SetPageSwapCache(page); 85 SetPageSwapCache(page);
86 page->private = entry.val; 86 set_page_private(page, entry.val);
87 total_swapcache_pages++; 87 total_swapcache_pages++;
88 pagecache_acct(1); 88 pagecache_acct(1);
89 } 89 }
@@ -126,8 +126,8 @@ void __delete_from_swap_cache(struct page *page)
126 BUG_ON(PageWriteback(page)); 126 BUG_ON(PageWriteback(page));
127 BUG_ON(PagePrivate(page)); 127 BUG_ON(PagePrivate(page));
128 128
129 radix_tree_delete(&swapper_space.page_tree, page->private); 129 radix_tree_delete(&swapper_space.page_tree, page_private(page));
130 page->private = 0; 130 set_page_private(page, 0);
131 ClearPageSwapCache(page); 131 ClearPageSwapCache(page);
132 total_swapcache_pages--; 132 total_swapcache_pages--;
133 pagecache_acct(-1); 133 pagecache_acct(-1);
@@ -197,7 +197,7 @@ void delete_from_swap_cache(struct page *page)
197{ 197{
198 swp_entry_t entry; 198 swp_entry_t entry;
199 199
200 entry.val = page->private; 200 entry.val = page_private(page);
201 201
202 write_lock_irq(&swapper_space.tree_lock); 202 write_lock_irq(&swapper_space.tree_lock);
203 __delete_from_swap_cache(page); 203 __delete_from_swap_cache(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 510f0039b000..8970c0b74194 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -61,7 +61,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
61 swp_entry_t entry; 61 swp_entry_t entry;
62 62
63 down_read(&swap_unplug_sem); 63 down_read(&swap_unplug_sem);
64 entry.val = page->private; 64 entry.val = page_private(page);
65 if (PageSwapCache(page)) { 65 if (PageSwapCache(page)) {
66 struct block_device *bdev = swap_info[swp_type(entry)].bdev; 66 struct block_device *bdev = swap_info[swp_type(entry)].bdev;
67 struct backing_dev_info *bdi; 67 struct backing_dev_info *bdi;
@@ -69,8 +69,8 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
69 /* 69 /*
70 * If the page is removed from swapcache from under us (with a 70 * If the page is removed from swapcache from under us (with a
71 * racy try_to_unuse/swapoff) we need an additional reference 71 * racy try_to_unuse/swapoff) we need an additional reference
72 * count to avoid reading garbage from page->private above. If 72 * count to avoid reading garbage from page_private(page) above.
73 * the WARN_ON triggers during a swapoff it maybe the race 73 * If the WARN_ON triggers during a swapoff it maybe the race
74 * condition and it's harmless. However if it triggers without 74 * condition and it's harmless. However if it triggers without
75 * swapoff it signals a problem. 75 * swapoff it signals a problem.
76 */ 76 */
@@ -294,7 +294,7 @@ static inline int page_swapcount(struct page *page)
294 struct swap_info_struct *p; 294 struct swap_info_struct *p;
295 swp_entry_t entry; 295 swp_entry_t entry;
296 296
297 entry.val = page->private; 297 entry.val = page_private(page);
298 p = swap_info_get(entry); 298 p = swap_info_get(entry);
299 if (p) { 299 if (p) {
300 /* Subtract the 1 for the swap cache itself */ 300 /* Subtract the 1 for the swap cache itself */
@@ -339,7 +339,7 @@ int remove_exclusive_swap_page(struct page *page)
339 if (page_count(page) != 2) /* 2: us + cache */ 339 if (page_count(page) != 2) /* 2: us + cache */
340 return 0; 340 return 0;
341 341
342 entry.val = page->private; 342 entry.val = page_private(page);
343 p = swap_info_get(entry); 343 p = swap_info_get(entry);
344 if (!p) 344 if (!p)
345 return 0; 345 return 0;
@@ -1042,7 +1042,7 @@ int page_queue_congested(struct page *page)
1042 BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ 1042 BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */
1043 1043
1044 if (PageSwapCache(page)) { 1044 if (PageSwapCache(page)) {
1045 swp_entry_t entry = { .val = page->private }; 1045 swp_entry_t entry = { .val = page_private(page) };
1046 struct swap_info_struct *sis; 1046 struct swap_info_struct *sis;
1047 1047
1048 sis = get_swap_info_struct(swp_type(entry)); 1048 sis = get_swap_info_struct(swp_type(entry));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 41d1064aabfb..135bf8ca96ee 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -521,7 +521,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
521 521
522#ifdef CONFIG_SWAP 522#ifdef CONFIG_SWAP
523 if (PageSwapCache(page)) { 523 if (PageSwapCache(page)) {
524 swp_entry_t swap = { .val = page->private }; 524 swp_entry_t swap = { .val = page_private(page) };
525 __delete_from_swap_cache(page); 525 __delete_from_swap_cache(page);
526 write_unlock_irq(&mapping->tree_lock); 526 write_unlock_irq(&mapping->tree_lock);
527 swap_free(swap); 527 swap_free(swap);