aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2008-12-04 01:16:36 -0500
committerJames Morris <jmorris@namei.org>2008-12-04 01:16:36 -0500
commitec98ce480ada787f2cfbd696980ff3564415505b (patch)
tree1a4d644b38f9f1e4b4e086fde0b195df4a92cf84 /mm
parent3496f92beb9aa99ef21fccc154a36c7698e9c538 (diff)
parentfeaf3848a813a106f163013af6fcf6c4bfec92d9 (diff)
Merge branch 'master' into next
Conflicts: fs/nfsd/nfs4recover.c Manually fixed above to use new creds API functions, e.g. nfs4_save_creds(). Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c3
-rw-r--r--mm/memory_hotplug.c12
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page_cgroup.c56
-rw-r--r--mm/slub.c6
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c7
-rw-r--r--mm/vmalloc.c41
-rw-r--r--mm/vmscan.c11
9 files changed, 95 insertions, 48 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f2e574dbc300..2a56124dbc28 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -176,6 +176,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
176 int ret = 0; 176 int ret = 0;
177 struct device *dev; 177 struct device *dev;
178 178
179 if (WARN_ON(bdi->dev))
180 goto exit;
181
179 va_start(args, fmt); 182 va_start(args, fmt);
180 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 183 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
181 va_end(args); 184 va_end(args);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6837a1014372..b17371185468 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -22,7 +22,6 @@
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/cpuset.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
27#include <linux/migrate.h> 26#include <linux/migrate.h>
28#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
@@ -190,7 +189,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
190 pgdat->node_start_pfn; 189 pgdat->node_start_pfn;
191} 190}
192 191
193static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 192static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
194{ 193{
195 struct pglist_data *pgdat = zone->zone_pgdat; 194 struct pglist_data *pgdat = zone->zone_pgdat;
196 int nr_pages = PAGES_PER_SECTION; 195 int nr_pages = PAGES_PER_SECTION;
@@ -217,7 +216,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
217 return 0; 216 return 0;
218} 217}
219 218
220static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 219static int __meminit __add_section(struct zone *zone, unsigned long phys_start_pfn)
221{ 220{
222 int nr_pages = PAGES_PER_SECTION; 221 int nr_pages = PAGES_PER_SECTION;
223 int ret; 222 int ret;
@@ -274,7 +273,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
274 * call this function after deciding the zone to which to 273 * call this function after deciding the zone to which to
275 * add the new pages. 274 * add the new pages.
276 */ 275 */
277int __add_pages(struct zone *zone, unsigned long phys_start_pfn, 276int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn,
278 unsigned long nr_pages) 277 unsigned long nr_pages)
279{ 278{
280 unsigned long i; 279 unsigned long i;
@@ -471,7 +470,8 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
471} 470}
472 471
473 472
474int add_memory(int nid, u64 start, u64 size) 473/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
474int __ref add_memory(int nid, u64 start, u64 size)
475{ 475{
476 pg_data_t *pgdat = NULL; 476 pg_data_t *pgdat = NULL;
477 int new_pgdat = 0; 477 int new_pgdat = 0;
@@ -498,8 +498,6 @@ int add_memory(int nid, u64 start, u64 size)
498 /* we online node here. we can't roll back from here. */ 498 /* we online node here. we can't roll back from here. */
499 node_set_online(nid); 499 node_set_online(nid);
500 500
501 cpuset_track_online_nodes();
502
503 if (new_pgdat) { 501 if (new_pgdat) {
504 ret = register_one_node(nid); 502 ret = register_one_node(nid);
505 /* 503 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index 9dd10da1cc23..0461fc6c961c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -522,15 +522,12 @@ static int writeout(struct address_space *mapping, struct page *page)
522 remove_migration_ptes(page, page); 522 remove_migration_ptes(page, page);
523 523
524 rc = mapping->a_ops->writepage(page, &wbc); 524 rc = mapping->a_ops->writepage(page, &wbc);
525 if (rc < 0)
526 /* I/O Error writing */
527 return -EIO;
528 525
529 if (rc != AOP_WRITEPAGE_ACTIVATE) 526 if (rc != AOP_WRITEPAGE_ACTIVATE)
530 /* unlocked. Relock */ 527 /* unlocked. Relock */
531 lock_page(page); 528 lock_page(page);
532 529
533 return -EAGAIN; 530 return (rc < 0) ? -EIO : -EAGAIN;
534} 531}
535 532
536/* 533/*
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 1223d927904d..0b3cbf090a67 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -21,7 +21,7 @@ static unsigned long total_usage;
21#if !defined(CONFIG_SPARSEMEM) 21#if !defined(CONFIG_SPARSEMEM)
22 22
23 23
24void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 24void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
25{ 25{
26 pgdat->node_page_cgroup = NULL; 26 pgdat->node_page_cgroup = NULL;
27} 27}
@@ -97,7 +97,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
97 return section->page_cgroup + pfn; 97 return section->page_cgroup + pfn;
98} 98}
99 99
100int __meminit init_section_page_cgroup(unsigned long pfn) 100/* __alloc_bootmem...() is protected by !slab_available() */
101int __init_refok init_section_page_cgroup(unsigned long pfn)
101{ 102{
102 struct mem_section *section; 103 struct mem_section *section;
103 struct page_cgroup *base, *pc; 104 struct page_cgroup *base, *pc;
@@ -106,19 +107,29 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
106 107
107 section = __pfn_to_section(pfn); 108 section = __pfn_to_section(pfn);
108 109
109 if (section->page_cgroup) 110 if (!section->page_cgroup) {
110 return 0; 111 nid = page_to_nid(pfn_to_page(pfn));
111 112 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
112 nid = page_to_nid(pfn_to_page(pfn)); 113 if (slab_is_available()) {
113 114 base = kmalloc_node(table_size, GFP_KERNEL, nid);
114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 if (!base)
115 if (slab_is_available()) { 116 base = vmalloc_node(table_size, nid);
116 base = kmalloc_node(table_size, GFP_KERNEL, nid); 117 } else {
117 if (!base) 118 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
118 base = vmalloc_node(table_size, nid); 119 table_size,
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
121 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 120 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
121 }
122 } else {
123 /*
124 * We don't have to allocate page_cgroup again, but
125 * address of memmap may be changed. So, we have to initialize
126 * again.
127 */
128 base = section->page_cgroup + pfn;
129 table_size = 0;
130 /* check address of memmap is changed or not. */
131 if (base->page == pfn_to_page(pfn))
132 return 0;
122 } 133 }
123 134
124 if (!base) { 135 if (!base) {
@@ -158,7 +169,7 @@ void __free_page_cgroup(unsigned long pfn)
158 } 169 }
159} 170}
160 171
161int online_page_cgroup(unsigned long start_pfn, 172int __meminit online_page_cgroup(unsigned long start_pfn,
162 unsigned long nr_pages, 173 unsigned long nr_pages,
163 int nid) 174 int nid)
164{ 175{
@@ -183,7 +194,7 @@ int online_page_cgroup(unsigned long start_pfn,
183 return -ENOMEM; 194 return -ENOMEM;
184} 195}
185 196
186int offline_page_cgroup(unsigned long start_pfn, 197int __meminit offline_page_cgroup(unsigned long start_pfn,
187 unsigned long nr_pages, int nid) 198 unsigned long nr_pages, int nid)
188{ 199{
189 unsigned long start, end, pfn; 200 unsigned long start, end, pfn;
@@ -197,7 +208,7 @@ int offline_page_cgroup(unsigned long start_pfn,
197 208
198} 209}
199 210
200static int page_cgroup_callback(struct notifier_block *self, 211static int __meminit page_cgroup_callback(struct notifier_block *self,
201 unsigned long action, void *arg) 212 unsigned long action, void *arg)
202{ 213{
203 struct memory_notify *mn = arg; 214 struct memory_notify *mn = arg;
@@ -207,18 +218,23 @@ static int page_cgroup_callback(struct notifier_block *self,
207 ret = online_page_cgroup(mn->start_pfn, 218 ret = online_page_cgroup(mn->start_pfn,
208 mn->nr_pages, mn->status_change_nid); 219 mn->nr_pages, mn->status_change_nid);
209 break; 220 break;
210 case MEM_CANCEL_ONLINE:
211 case MEM_OFFLINE: 221 case MEM_OFFLINE:
212 offline_page_cgroup(mn->start_pfn, 222 offline_page_cgroup(mn->start_pfn,
213 mn->nr_pages, mn->status_change_nid); 223 mn->nr_pages, mn->status_change_nid);
214 break; 224 break;
225 case MEM_CANCEL_ONLINE:
215 case MEM_GOING_OFFLINE: 226 case MEM_GOING_OFFLINE:
216 break; 227 break;
217 case MEM_ONLINE: 228 case MEM_ONLINE:
218 case MEM_CANCEL_OFFLINE: 229 case MEM_CANCEL_OFFLINE:
219 break; 230 break;
220 } 231 }
221 ret = notifier_from_errno(ret); 232
233 if (ret)
234 ret = notifier_from_errno(ret);
235 else
236 ret = NOTIFY_OK;
237
222 return ret; 238 return ret;
223} 239}
224 240
@@ -248,7 +264,7 @@ void __init page_cgroup_init(void)
248 " want\n"); 264 " want\n");
249} 265}
250 266
251void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 267void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
252{ 268{
253 return; 269 return;
254} 270}
diff --git a/mm/slub.c b/mm/slub.c
index 7ad489af9561..749588a50a5a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2931,8 +2931,10 @@ static int slab_memory_callback(struct notifier_block *self,
2931 case MEM_CANCEL_OFFLINE: 2931 case MEM_CANCEL_OFFLINE:
2932 break; 2932 break;
2933 } 2933 }
2934 2934 if (ret)
2935 ret = notifier_from_errno(ret); 2935 ret = notifier_from_errno(ret);
2936 else
2937 ret = NOTIFY_OK;
2936 return ret; 2938 return ret;
2937} 2939}
2938 2940
diff --git a/mm/sparse.c b/mm/sparse.c
index 39db301b920d..083f5b63e7a8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -570,7 +570,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
570 * set. If this is <=0, then that means that the passed-in 570 * set. If this is <=0, then that means that the passed-in
571 * map was not consumed and must be freed. 571 * map was not consumed and must be freed.
572 */ 572 */
573int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 573int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
574 int nr_pages) 574 int nr_pages)
575{ 575{
576 unsigned long section_nr = pfn_to_section_nr(start_pfn); 576 unsigned long section_nr = pfn_to_section_nr(start_pfn);
diff --git a/mm/swap.c b/mm/swap.c
index 2152e48a7b8f..2881987603eb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -445,6 +445,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
445 for (i = 0; i < pagevec_count(pvec); i++) { 445 for (i = 0; i < pagevec_count(pvec); i++) {
446 struct page *page = pvec->pages[i]; 446 struct page *page = pvec->pages[i];
447 struct zone *pagezone = page_zone(page); 447 struct zone *pagezone = page_zone(page);
448 int file;
448 449
449 if (pagezone != zone) { 450 if (pagezone != zone) {
450 if (zone) 451 if (zone)
@@ -456,8 +457,12 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
456 VM_BUG_ON(PageUnevictable(page)); 457 VM_BUG_ON(PageUnevictable(page));
457 VM_BUG_ON(PageLRU(page)); 458 VM_BUG_ON(PageLRU(page));
458 SetPageLRU(page); 459 SetPageLRU(page);
459 if (is_active_lru(lru)) 460 file = is_file_lru(lru);
461 zone->recent_scanned[file]++;
462 if (is_active_lru(lru)) {
460 SetPageActive(page); 463 SetPageActive(page);
464 zone->recent_rotated[file]++;
465 }
461 add_page_to_lru_list(zone, page, lru); 466 add_page_to_lru_list(zone, page, lru);
462 } 467 }
463 if (zone) 468 if (zone)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ba6b0f5f7fac..f3f6e0758562 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
77 77
78 BUG_ON(addr >= end); 78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr); 79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do { 80 do {
82 next = pgd_addr_end(addr, end); 81 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd)) 82 if (pgd_none_or_clear_bad(pgd))
@@ -324,14 +323,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
324 323
325 BUG_ON(size & ~PAGE_MASK); 324 BUG_ON(size & ~PAGE_MASK);
326 325
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area), 326 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node); 327 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va)) 328 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM); 329 return ERR_PTR(-ENOMEM);
333 330
334retry: 331retry:
332 addr = ALIGN(vstart, align);
333
335 spin_lock(&vmap_area_lock); 334 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */ 335 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node; 336 n = vmap_area_root.rb_node;
@@ -362,7 +361,7 @@ retry:
362 goto found; 361 goto found;
363 } 362 }
364 363
365 while (addr + size >= first->va_start && addr + size <= vend) { 364 while (addr + size > first->va_start && addr + size <= vend) {
366 addr = ALIGN(first->va_end + PAGE_SIZE, align); 365 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367 366
368 n = rb_next(&first->rb_node); 367 n = rb_next(&first->rb_node);
@@ -522,24 +521,45 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
522} 521}
523 522
524/* 523/*
524 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
525 * is already purging.
526 */
527static void try_purge_vmap_area_lazy(void)
528{
529 unsigned long start = ULONG_MAX, end = 0;
530
531 __purge_vmap_area_lazy(&start, &end, 0, 0);
532}
533
534/*
525 * Kick off a purge of the outstanding lazy areas. 535 * Kick off a purge of the outstanding lazy areas.
526 */ 536 */
527static void purge_vmap_area_lazy(void) 537static void purge_vmap_area_lazy(void)
528{ 538{
529 unsigned long start = ULONG_MAX, end = 0; 539 unsigned long start = ULONG_MAX, end = 0;
530 540
531 __purge_vmap_area_lazy(&start, &end, 0, 0); 541 __purge_vmap_area_lazy(&start, &end, 1, 0);
532} 542}
533 543
534/* 544/*
535 * Free and unmap a vmap area 545 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
546 * called for the correct range previously.
536 */ 547 */
537static void free_unmap_vmap_area(struct vmap_area *va) 548static void free_unmap_vmap_area_noflush(struct vmap_area *va)
538{ 549{
539 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 552 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
554}
555
556/*
557 * Free and unmap a vmap area
558 */
559static void free_unmap_vmap_area(struct vmap_area *va)
560{
561 flush_cache_vunmap(va->va_start, va->va_end);
562 free_unmap_vmap_area_noflush(va);
543} 563}
544 564
545static struct vmap_area *find_vmap_area(unsigned long addr) 565static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -723,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb)
723 spin_unlock(&vmap_block_tree_lock); 743 spin_unlock(&vmap_block_tree_lock);
724 BUG_ON(tmp != vb); 744 BUG_ON(tmp != vb);
725 745
726 free_unmap_vmap_area(vb->va); 746 free_unmap_vmap_area_noflush(vb->va);
727 call_rcu(&vb->rcu_head, rcu_free_vb); 747 call_rcu(&vb->rcu_head, rcu_free_vb);
728} 748}
729 749
@@ -785,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size)
785 805
786 BUG_ON(size & ~PAGE_MASK); 806 BUG_ON(size & ~PAGE_MASK);
787 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 807 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
808
809 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
810
788 order = get_order(size); 811 order = get_order(size);
789 812
790 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 813 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c141b3e78071..62e7f62fb559 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
623 * Try to allocate it some swap space here. 623 * Try to allocate it some swap space here.
624 */ 624 */
625 if (PageAnon(page) && !PageSwapCache(page)) { 625 if (PageAnon(page) && !PageSwapCache(page)) {
626 if (!(sc->gfp_mask & __GFP_IO))
627 goto keep_locked;
626 switch (try_to_munlock(page)) { 628 switch (try_to_munlock(page)) {
627 case SWAP_FAIL: /* shouldn't happen */ 629 case SWAP_FAIL: /* shouldn't happen */
628 case SWAP_AGAIN: 630 case SWAP_AGAIN:
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
634 } 636 }
635 if (!add_to_swap(page, GFP_ATOMIC)) 637 if (!add_to_swap(page, GFP_ATOMIC))
636 goto activate_locked; 638 goto activate_locked;
639 may_enter_fs = 1;
637 } 640 }
638#endif /* CONFIG_SWAP */ 641#endif /* CONFIG_SWAP */
639 642
@@ -1245,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1245 list_add(&page->lru, &l_inactive); 1248 list_add(&page->lru, &l_inactive);
1246 } 1249 }
1247 1250
1251 spin_lock_irq(&zone->lru_lock);
1248 /* 1252 /*
1249 * Count referenced pages from currently used mappings as 1253 * Count referenced pages from currently used mappings as
1250 * rotated, even though they are moved to the inactive list. 1254 * rotated, even though they are moved to the inactive list.
@@ -1260,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1260 1264
1261 pgmoved = 0; 1265 pgmoved = 0;
1262 lru = LRU_BASE + file * LRU_FILE; 1266 lru = LRU_BASE + file * LRU_FILE;
1263 spin_lock_irq(&zone->lru_lock);
1264 while (!list_empty(&l_inactive)) { 1267 while (!list_empty(&l_inactive)) {
1265 page = lru_to_page(&l_inactive); 1268 page = lru_to_page(&l_inactive);
1266 prefetchw_prev_lru_page(page, &l_inactive, flags); 1269 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1386 file_prio = 200 - sc->swappiness; 1389 file_prio = 200 - sc->swappiness;
1387 1390
1388 /* 1391 /*
1389 * anon recent_rotated[0] 1392 * The amount of pressure on anon vs file pages is inversely
1390 * %anon = 100 * ----------- / ----------------- * IO cost 1393 * proportional to the fraction of recently scanned pages on
1391 * anon + file rotate_sum 1394 * each list that were recently referenced and in active use.
1392 */ 1395 */
1393 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1396 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
1394 ap /= zone->recent_rotated[0] + 1; 1397 ap /= zone->recent_rotated[0] + 1;