summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c15
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/mlock.c14
-rw-r--r--mm/page_idle.c8
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/swap.c16
-rw-r--r--mm/vmscan.c16
9 files changed, 50 insertions, 49 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 1cc871da3fda..e054276cf397 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -775,6 +775,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
775 unsigned long end_pfn, isolate_mode_t isolate_mode) 775 unsigned long end_pfn, isolate_mode_t isolate_mode)
776{ 776{
777 struct zone *zone = cc->zone; 777 struct zone *zone = cc->zone;
778 pg_data_t *pgdat = zone->zone_pgdat;
778 unsigned long nr_scanned = 0, nr_isolated = 0; 779 unsigned long nr_scanned = 0, nr_isolated = 0;
779 struct lruvec *lruvec; 780 struct lruvec *lruvec;
780 unsigned long flags = 0; 781 unsigned long flags = 0;
@@ -839,8 +840,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
839 * if contended. 840 * if contended.
840 */ 841 */
841 if (!(low_pfn % SWAP_CLUSTER_MAX) 842 if (!(low_pfn % SWAP_CLUSTER_MAX)
842 && compact_unlock_should_abort(zone_lru_lock(zone), flags, 843 && compact_unlock_should_abort(&pgdat->lru_lock,
843 &locked, cc)) 844 flags, &locked, cc))
844 break; 845 break;
845 846
846 if (!pfn_valid_within(low_pfn)) 847 if (!pfn_valid_within(low_pfn))
@@ -910,7 +911,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
910 if (unlikely(__PageMovable(page)) && 911 if (unlikely(__PageMovable(page)) &&
911 !PageIsolated(page)) { 912 !PageIsolated(page)) {
912 if (locked) { 913 if (locked) {
913 spin_unlock_irqrestore(zone_lru_lock(zone), 914 spin_unlock_irqrestore(&pgdat->lru_lock,
914 flags); 915 flags);
915 locked = false; 916 locked = false;
916 } 917 }
@@ -940,7 +941,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
940 941
941 /* If we already hold the lock, we can skip some rechecking */ 942 /* If we already hold the lock, we can skip some rechecking */
942 if (!locked) { 943 if (!locked) {
943 locked = compact_lock_irqsave(zone_lru_lock(zone), 944 locked = compact_lock_irqsave(&pgdat->lru_lock,
944 &flags, cc); 945 &flags, cc);
945 946
946 /* Try get exclusive access under lock */ 947 /* Try get exclusive access under lock */
@@ -965,7 +966,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
965 } 966 }
966 } 967 }
967 968
968 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 969 lruvec = mem_cgroup_page_lruvec(page, pgdat);
969 970
970 /* Try isolate the page */ 971 /* Try isolate the page */
971 if (__isolate_lru_page(page, isolate_mode) != 0) 972 if (__isolate_lru_page(page, isolate_mode) != 0)
@@ -1007,7 +1008,7 @@ isolate_fail:
1007 */ 1008 */
1008 if (nr_isolated) { 1009 if (nr_isolated) {
1009 if (locked) { 1010 if (locked) {
1010 spin_unlock_irqrestore(zone_lru_lock(zone), flags); 1011 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1011 locked = false; 1012 locked = false;
1012 } 1013 }
1013 putback_movable_pages(&cc->migratepages); 1014 putback_movable_pages(&cc->migratepages);
@@ -1034,7 +1035,7 @@ isolate_fail:
1034 1035
1035isolate_abort: 1036isolate_abort:
1036 if (locked) 1037 if (locked)
1037 spin_unlock_irqrestore(zone_lru_lock(zone), flags); 1038 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1038 1039
1039 /* 1040 /*
1040 * Updated the cached scanner pfn once the pageblock has been scanned 1041 * Updated the cached scanner pfn once the pageblock has been scanned
diff --git a/mm/filemap.c b/mm/filemap.c
index a41e01c472f3..a3b4021c448f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -98,8 +98,8 @@
98 * ->swap_lock (try_to_unmap_one) 98 * ->swap_lock (try_to_unmap_one)
99 * ->private_lock (try_to_unmap_one) 99 * ->private_lock (try_to_unmap_one)
100 * ->i_pages lock (try_to_unmap_one) 100 * ->i_pages lock (try_to_unmap_one)
101 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) 101 * ->pgdat->lru_lock (follow_page->mark_page_accessed)
102 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) 102 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page)
103 * ->private_lock (page_remove_rmap->set_page_dirty) 103 * ->private_lock (page_remove_rmap->set_page_dirty)
104 * ->i_pages lock (page_remove_rmap->set_page_dirty) 104 * ->i_pages lock (page_remove_rmap->set_page_dirty)
105 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 105 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d4847026d4b1..fcf657886b4b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2440,11 +2440,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2440 pgoff_t end, unsigned long flags) 2440 pgoff_t end, unsigned long flags)
2441{ 2441{
2442 struct page *head = compound_head(page); 2442 struct page *head = compound_head(page);
2443 struct zone *zone = page_zone(head); 2443 pg_data_t *pgdat = page_pgdat(head);
2444 struct lruvec *lruvec; 2444 struct lruvec *lruvec;
2445 int i; 2445 int i;
2446 2446
2447 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat); 2447 lruvec = mem_cgroup_page_lruvec(head, pgdat);
2448 2448
2449 /* complete memcg works before add pages to LRU */ 2449 /* complete memcg works before add pages to LRU */
2450 mem_cgroup_split_huge_fixup(head); 2450 mem_cgroup_split_huge_fixup(head);
@@ -2475,7 +2475,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2475 xa_unlock(&head->mapping->i_pages); 2475 xa_unlock(&head->mapping->i_pages);
2476 } 2476 }
2477 2477
2478 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2478 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
2479 2479
2480 remap_page(head); 2480 remap_page(head);
2481 2481
@@ -2686,7 +2686,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2686 lru_add_drain(); 2686 lru_add_drain();
2687 2687
2688 /* prevent PageLRU to go away from under us, and freeze lru stats */ 2688 /* prevent PageLRU to go away from under us, and freeze lru stats */
2689 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); 2689 spin_lock_irqsave(&pgdata->lru_lock, flags);
2690 2690
2691 if (mapping) { 2691 if (mapping) {
2692 XA_STATE(xas, &mapping->i_pages, page_index(head)); 2692 XA_STATE(xas, &mapping->i_pages, page_index(head));
@@ -2731,7 +2731,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2731 spin_unlock(&pgdata->split_queue_lock); 2731 spin_unlock(&pgdata->split_queue_lock);
2732fail: if (mapping) 2732fail: if (mapping)
2733 xa_unlock(&mapping->i_pages); 2733 xa_unlock(&mapping->i_pages);
2734 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); 2734 spin_unlock_irqrestore(&pgdata->lru_lock, flags);
2735 remap_page(head); 2735 remap_page(head);
2736 ret = -EBUSY; 2736 ret = -EBUSY;
2737 } 2737 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 45cd1f84268a..7160cfab8107 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2362,13 +2362,13 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2362 2362
2363static void lock_page_lru(struct page *page, int *isolated) 2363static void lock_page_lru(struct page *page, int *isolated)
2364{ 2364{
2365 struct zone *zone = page_zone(page); 2365 pg_data_t *pgdat = page_pgdat(page);
2366 2366
2367 spin_lock_irq(zone_lru_lock(zone)); 2367 spin_lock_irq(&pgdat->lru_lock);
2368 if (PageLRU(page)) { 2368 if (PageLRU(page)) {
2369 struct lruvec *lruvec; 2369 struct lruvec *lruvec;
2370 2370
2371 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2371 lruvec = mem_cgroup_page_lruvec(page, pgdat);
2372 ClearPageLRU(page); 2372 ClearPageLRU(page);
2373 del_page_from_lru_list(page, lruvec, page_lru(page)); 2373 del_page_from_lru_list(page, lruvec, page_lru(page));
2374 *isolated = 1; 2374 *isolated = 1;
@@ -2378,17 +2378,17 @@ static void lock_page_lru(struct page *page, int *isolated)
2378 2378
2379static void unlock_page_lru(struct page *page, int isolated) 2379static void unlock_page_lru(struct page *page, int isolated)
2380{ 2380{
2381 struct zone *zone = page_zone(page); 2381 pg_data_t *pgdat = page_pgdat(page);
2382 2382
2383 if (isolated) { 2383 if (isolated) {
2384 struct lruvec *lruvec; 2384 struct lruvec *lruvec;
2385 2385
2386 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2386 lruvec = mem_cgroup_page_lruvec(page, pgdat);
2387 VM_BUG_ON_PAGE(PageLRU(page), page); 2387 VM_BUG_ON_PAGE(PageLRU(page), page);
2388 SetPageLRU(page); 2388 SetPageLRU(page);
2389 add_page_to_lru_list(page, lruvec, page_lru(page)); 2389 add_page_to_lru_list(page, lruvec, page_lru(page));
2390 } 2390 }
2391 spin_unlock_irq(zone_lru_lock(zone)); 2391 spin_unlock_irq(&pgdat->lru_lock);
2392} 2392}
2393 2393
2394static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2394static void commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -2674,7 +2674,7 @@ void __memcg_kmem_uncharge(struct page *page, int order)
2674 2674
2675/* 2675/*
2676 * Because tail pages are not marked as "used", set it. We're under 2676 * Because tail pages are not marked as "used", set it. We're under
2677 * zone_lru_lock and migration entries setup in all page mappings. 2677 * pgdat->lru_lock and migration entries setup in all page mappings.
2678 */ 2678 */
2679void mem_cgroup_split_huge_fixup(struct page *head) 2679void mem_cgroup_split_huge_fixup(struct page *head)
2680{ 2680{
diff --git a/mm/mlock.c b/mm/mlock.c
index 41cc47e28ad6..080f3b36415b 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -182,7 +182,7 @@ static void __munlock_isolation_failed(struct page *page)
182unsigned int munlock_vma_page(struct page *page) 182unsigned int munlock_vma_page(struct page *page)
183{ 183{
184 int nr_pages; 184 int nr_pages;
185 struct zone *zone = page_zone(page); 185 pg_data_t *pgdat = page_pgdat(page);
186 186
187 /* For try_to_munlock() and to serialize with page migration */ 187 /* For try_to_munlock() and to serialize with page migration */
188 BUG_ON(!PageLocked(page)); 188 BUG_ON(!PageLocked(page));
@@ -194,7 +194,7 @@ unsigned int munlock_vma_page(struct page *page)
194 * might otherwise copy PageMlocked to part of the tail pages before 194 * might otherwise copy PageMlocked to part of the tail pages before
195 * we clear it in the head page. It also stabilizes hpage_nr_pages(). 195 * we clear it in the head page. It also stabilizes hpage_nr_pages().
196 */ 196 */
197 spin_lock_irq(zone_lru_lock(zone)); 197 spin_lock_irq(&pgdat->lru_lock);
198 198
199 if (!TestClearPageMlocked(page)) { 199 if (!TestClearPageMlocked(page)) {
200 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ 200 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
@@ -203,17 +203,17 @@ unsigned int munlock_vma_page(struct page *page)
203 } 203 }
204 204
205 nr_pages = hpage_nr_pages(page); 205 nr_pages = hpage_nr_pages(page);
206 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 206 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
207 207
208 if (__munlock_isolate_lru_page(page, true)) { 208 if (__munlock_isolate_lru_page(page, true)) {
209 spin_unlock_irq(zone_lru_lock(zone)); 209 spin_unlock_irq(&pgdat->lru_lock);
210 __munlock_isolated_page(page); 210 __munlock_isolated_page(page);
211 goto out; 211 goto out;
212 } 212 }
213 __munlock_isolation_failed(page); 213 __munlock_isolation_failed(page);
214 214
215unlock_out: 215unlock_out:
216 spin_unlock_irq(zone_lru_lock(zone)); 216 spin_unlock_irq(&pgdat->lru_lock);
217 217
218out: 218out:
219 return nr_pages - 1; 219 return nr_pages - 1;
@@ -298,7 +298,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
298 pagevec_init(&pvec_putback); 298 pagevec_init(&pvec_putback);
299 299
300 /* Phase 1: page isolation */ 300 /* Phase 1: page isolation */
301 spin_lock_irq(zone_lru_lock(zone)); 301 spin_lock_irq(&zone->zone_pgdat->lru_lock);
302 for (i = 0; i < nr; i++) { 302 for (i = 0; i < nr; i++) {
303 struct page *page = pvec->pages[i]; 303 struct page *page = pvec->pages[i];
304 304
@@ -325,7 +325,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
325 pvec->pages[i] = NULL; 325 pvec->pages[i] = NULL;
326 } 326 }
327 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 327 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
328 spin_unlock_irq(zone_lru_lock(zone)); 328 spin_unlock_irq(&zone->zone_pgdat->lru_lock);
329 329
330 /* Now we can release pins of pages that we are not munlocking */ 330 /* Now we can release pins of pages that we are not munlocking */
331 pagevec_release(&pvec_putback); 331 pagevec_release(&pvec_putback);
diff --git a/mm/page_idle.c b/mm/page_idle.c
index b9e4b42b33ab..0b39ec0c945c 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -31,7 +31,7 @@
31static struct page *page_idle_get_page(unsigned long pfn) 31static struct page *page_idle_get_page(unsigned long pfn)
32{ 32{
33 struct page *page; 33 struct page *page;
34 struct zone *zone; 34 pg_data_t *pgdat;
35 35
36 if (!pfn_valid(pfn)) 36 if (!pfn_valid(pfn))
37 return NULL; 37 return NULL;
@@ -41,13 +41,13 @@ static struct page *page_idle_get_page(unsigned long pfn)
41 !get_page_unless_zero(page)) 41 !get_page_unless_zero(page))
42 return NULL; 42 return NULL;
43 43
44 zone = page_zone(page); 44 pgdat = page_pgdat(page);
45 spin_lock_irq(zone_lru_lock(zone)); 45 spin_lock_irq(&pgdat->lru_lock);
46 if (unlikely(!PageLRU(page))) { 46 if (unlikely(!PageLRU(page))) {
47 put_page(page); 47 put_page(page);
48 page = NULL; 48 page = NULL;
49 } 49 }
50 spin_unlock_irq(zone_lru_lock(zone)); 50 spin_unlock_irq(&pgdat->lru_lock);
51 return page; 51 return page;
52} 52}
53 53
diff --git a/mm/rmap.c b/mm/rmap.c
index 0454ecc29537..b30c7c71d1d9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -27,7 +27,7 @@
27 * mapping->i_mmap_rwsem 27 * mapping->i_mmap_rwsem
28 * anon_vma->rwsem 28 * anon_vma->rwsem
29 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 30 * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers) 33 * mapping->private_lock (in __set_page_dirty_buffers)
diff --git a/mm/swap.c b/mm/swap.c
index 4d7d37eb3c40..301ed4e04320 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -58,16 +58,16 @@ static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
58static void __page_cache_release(struct page *page) 58static void __page_cache_release(struct page *page)
59{ 59{
60 if (PageLRU(page)) { 60 if (PageLRU(page)) {
61 struct zone *zone = page_zone(page); 61 pg_data_t *pgdat = page_pgdat(page);
62 struct lruvec *lruvec; 62 struct lruvec *lruvec;
63 unsigned long flags; 63 unsigned long flags;
64 64
65 spin_lock_irqsave(zone_lru_lock(zone), flags); 65 spin_lock_irqsave(&pgdat->lru_lock, flags);
66 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 66 lruvec = mem_cgroup_page_lruvec(page, pgdat);
67 VM_BUG_ON_PAGE(!PageLRU(page), page); 67 VM_BUG_ON_PAGE(!PageLRU(page), page);
68 __ClearPageLRU(page); 68 __ClearPageLRU(page);
69 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 69 del_page_from_lru_list(page, lruvec, page_off_lru(page));
70 spin_unlock_irqrestore(zone_lru_lock(zone), flags); 70 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
71 } 71 }
72 __ClearPageWaiters(page); 72 __ClearPageWaiters(page);
73 mem_cgroup_uncharge(page); 73 mem_cgroup_uncharge(page);
@@ -322,12 +322,12 @@ static inline void activate_page_drain(int cpu)
322 322
323void activate_page(struct page *page) 323void activate_page(struct page *page)
324{ 324{
325 struct zone *zone = page_zone(page); 325 pg_data_t *pgdat = page_pgdat(page);
326 326
327 page = compound_head(page); 327 page = compound_head(page);
328 spin_lock_irq(zone_lru_lock(zone)); 328 spin_lock_irq(&pgdat->lru_lock);
329 __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL); 329 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
330 spin_unlock_irq(zone_lru_lock(zone)); 330 spin_unlock_irq(&pgdat->lru_lock);
331} 331}
332#endif 332#endif
333 333
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dda6b80d045f..a5ad0b35ab8e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1614,8 +1614,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1614 1614
1615} 1615}
1616 1616
1617/* 1617/**
1618 * zone_lru_lock is heavily contended. Some of the functions that 1618 * pgdat->lru_lock is heavily contended. Some of the functions that
1619 * shrink the lists perform better by taking out a batch of pages 1619 * shrink the lists perform better by taking out a batch of pages
1620 * and working on them outside the LRU lock. 1620 * and working on them outside the LRU lock.
1621 * 1621 *
@@ -1750,11 +1750,11 @@ int isolate_lru_page(struct page *page)
1750 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); 1750 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1751 1751
1752 if (PageLRU(page)) { 1752 if (PageLRU(page)) {
1753 struct zone *zone = page_zone(page); 1753 pg_data_t *pgdat = page_pgdat(page);
1754 struct lruvec *lruvec; 1754 struct lruvec *lruvec;
1755 1755
1756 spin_lock_irq(zone_lru_lock(zone)); 1756 spin_lock_irq(&pgdat->lru_lock);
1757 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 1757 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1758 if (PageLRU(page)) { 1758 if (PageLRU(page)) {
1759 int lru = page_lru(page); 1759 int lru = page_lru(page);
1760 get_page(page); 1760 get_page(page);
@@ -1762,7 +1762,7 @@ int isolate_lru_page(struct page *page)
1762 del_page_from_lru_list(page, lruvec, lru); 1762 del_page_from_lru_list(page, lruvec, lru);
1763 ret = 0; 1763 ret = 0;
1764 } 1764 }
1765 spin_unlock_irq(zone_lru_lock(zone)); 1765 spin_unlock_irq(&pgdat->lru_lock);
1766 } 1766 }
1767 return ret; 1767 return ret;
1768} 1768}
@@ -1990,9 +1990,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1990 * processes, from rmap. 1990 * processes, from rmap.
1991 * 1991 *
1992 * If the pages are mostly unmapped, the processing is fast and it is 1992 * If the pages are mostly unmapped, the processing is fast and it is
1993 * appropriate to hold zone_lru_lock across the whole operation. But if 1993 * appropriate to hold pgdat->lru_lock across the whole operation. But if
1994 * the pages are mapped, the processing is slow (page_referenced()) so we 1994 * the pages are mapped, the processing is slow (page_referenced()) so we
1995 * should drop zone_lru_lock around each page. It's impossible to balance 1995 * should drop pgdat->lru_lock around each page. It's impossible to balance
1996 * this, so instead we remove the pages from the LRU while processing them. 1996 * this, so instead we remove the pages from the LRU while processing them.
1997 * It is safe to rely on PG_active against the non-LRU pages in here because 1997 * It is safe to rely on PG_active against the non-LRU pages in here because
1998 * nobody will play with that bit on a non-LRU page. 1998 * nobody will play with that bit on a non-LRU page.