aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
commit0b271ef4521756010675b1611bef20fd3096790d (patch)
tree2c9d22a2c74122a9904e533df27f41d63ffef394 /mm
parentb19b3c74c7bbec45a848631b8f970ac110665a01 (diff)
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
Merge commit 'v2.6.28' into core/core
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c3
-rw-r--r--mm/memory_hotplug.c9
-rw-r--r--mm/migrate.c59
-rw-r--r--mm/page_cgroup.c59
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c8
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c20
-rw-r--r--mm/vmalloc.c22
-rw-r--r--mm/vmscan.c2
10 files changed, 124 insertions, 62 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f2e574dbc300..801c08b046e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -176,6 +176,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
176 int ret = 0; 176 int ret = 0;
177 struct device *dev; 177 struct device *dev;
178 178
179 if (bdi->dev) /* The driver needs to use separate queues per device */
180 goto exit;
181
179 va_start(args, fmt); 182 va_start(args, fmt);
180 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 183 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
181 va_end(args); 184 va_end(args);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b5b2b15085a8..b17371185468 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -189,7 +189,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
189 pgdat->node_start_pfn; 189 pgdat->node_start_pfn;
190} 190}
191 191
192static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 192static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
193{ 193{
194 struct pglist_data *pgdat = zone->zone_pgdat; 194 struct pglist_data *pgdat = zone->zone_pgdat;
195 int nr_pages = PAGES_PER_SECTION; 195 int nr_pages = PAGES_PER_SECTION;
@@ -216,7 +216,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
216 return 0; 216 return 0;
217} 217}
218 218
219static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 219static int __meminit __add_section(struct zone *zone, unsigned long phys_start_pfn)
220{ 220{
221 int nr_pages = PAGES_PER_SECTION; 221 int nr_pages = PAGES_PER_SECTION;
222 int ret; 222 int ret;
@@ -273,7 +273,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
273 * call this function after deciding the zone to which to 273 * call this function after deciding the zone to which to
274 * add the new pages. 274 * add the new pages.
275 */ 275 */
276int __add_pages(struct zone *zone, unsigned long phys_start_pfn, 276int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn,
277 unsigned long nr_pages) 277 unsigned long nr_pages)
278{ 278{
279 unsigned long i; 279 unsigned long i;
@@ -470,7 +470,8 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
470} 470}
471 471
472 472
473int add_memory(int nid, u64 start, u64 size) 473/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
474int __ref add_memory(int nid, u64 start, u64 size)
474{ 475{
475 pg_data_t *pgdat = NULL; 476 pg_data_t *pgdat = NULL;
476 int new_pgdat = 0; 477 int new_pgdat = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 1e0d6b237f44..037b0967c1e3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -987,25 +987,18 @@ out:
987/* 987/*
988 * Determine the nodes of an array of pages and store it in an array of status. 988 * Determine the nodes of an array of pages and store it in an array of status.
989 */ 989 */
990static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 990static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
991 const void __user * __user *pages, 991 const void __user **pages, int *status)
992 int __user *status)
993{ 992{
994 unsigned long i; 993 unsigned long i;
995 int err;
996 994
997 down_read(&mm->mmap_sem); 995 down_read(&mm->mmap_sem);
998 996
999 for (i = 0; i < nr_pages; i++) { 997 for (i = 0; i < nr_pages; i++) {
1000 const void __user *p; 998 unsigned long addr = (unsigned long)(*pages);
1001 unsigned long addr;
1002 struct vm_area_struct *vma; 999 struct vm_area_struct *vma;
1003 struct page *page; 1000 struct page *page;
1004 1001 int err = -EFAULT;
1005 err = -EFAULT;
1006 if (get_user(p, pages+i))
1007 goto out;
1008 addr = (unsigned long) p;
1009 1002
1010 vma = find_vma(mm, addr); 1003 vma = find_vma(mm, addr);
1011 if (!vma) 1004 if (!vma)
@@ -1024,12 +1017,52 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1024 1017
1025 err = page_to_nid(page); 1018 err = page_to_nid(page);
1026set_status: 1019set_status:
1027 put_user(err, status+i); 1020 *status = err;
1021
1022 pages++;
1023 status++;
1024 }
1025
1026 up_read(&mm->mmap_sem);
1027}
1028
1029/*
1030 * Determine the nodes of a user array of pages and store it in
1031 * a user array of status.
1032 */
1033static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1036{
1037#define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1042
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1046
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1052 }
1053
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1055
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1061 }
1028 } 1062 }
1029 err = 0; 1063 err = 0;
1030 1064
1031out: 1065out:
1032 up_read(&mm->mmap_sem);
1033 return err; 1066 return err;
1034} 1067}
1035 1068
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 1223d927904d..ab27ff750519 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -21,7 +21,7 @@ static unsigned long total_usage;
21#if !defined(CONFIG_SPARSEMEM) 21#if !defined(CONFIG_SPARSEMEM)
22 22
23 23
24void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 24void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
25{ 25{
26 pgdat->node_page_cgroup = NULL; 26 pgdat->node_page_cgroup = NULL;
27} 27}
@@ -49,6 +49,9 @@ static int __init alloc_node_page_cgroup(int nid)
49 start_pfn = NODE_DATA(nid)->node_start_pfn; 49 start_pfn = NODE_DATA(nid)->node_start_pfn;
50 nr_pages = NODE_DATA(nid)->node_spanned_pages; 50 nr_pages = NODE_DATA(nid)->node_spanned_pages;
51 51
52 if (!nr_pages)
53 return 0;
54
52 table_size = sizeof(struct page_cgroup) * nr_pages; 55 table_size = sizeof(struct page_cgroup) * nr_pages;
53 56
54 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), 57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
@@ -97,7 +100,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
97 return section->page_cgroup + pfn; 100 return section->page_cgroup + pfn;
98} 101}
99 102
100int __meminit init_section_page_cgroup(unsigned long pfn) 103/* __alloc_bootmem...() is protected by !slab_available() */
104int __init_refok init_section_page_cgroup(unsigned long pfn)
101{ 105{
102 struct mem_section *section; 106 struct mem_section *section;
103 struct page_cgroup *base, *pc; 107 struct page_cgroup *base, *pc;
@@ -106,19 +110,29 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
106 110
107 section = __pfn_to_section(pfn); 111 section = __pfn_to_section(pfn);
108 112
109 if (section->page_cgroup) 113 if (!section->page_cgroup) {
110 return 0; 114 nid = page_to_nid(pfn_to_page(pfn));
111 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
112 nid = page_to_nid(pfn_to_page(pfn)); 116 if (slab_is_available()) {
113 117 base = kmalloc_node(table_size, GFP_KERNEL, nid);
114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 118 if (!base)
115 if (slab_is_available()) { 119 base = vmalloc_node(table_size, nid);
116 base = kmalloc_node(table_size, GFP_KERNEL, nid); 120 } else {
117 if (!base) 121 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
118 base = vmalloc_node(table_size, nid); 122 table_size,
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
121 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 123 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
124 }
125 } else {
126 /*
127 * We don't have to allocate page_cgroup again, but
128 * address of memmap may be changed. So, we have to initialize
129 * again.
130 */
131 base = section->page_cgroup + pfn;
132 table_size = 0;
133 /* check address of memmap is changed or not. */
134 if (base->page == pfn_to_page(pfn))
135 return 0;
122 } 136 }
123 137
124 if (!base) { 138 if (!base) {
@@ -158,7 +172,7 @@ void __free_page_cgroup(unsigned long pfn)
158 } 172 }
159} 173}
160 174
161int online_page_cgroup(unsigned long start_pfn, 175int __meminit online_page_cgroup(unsigned long start_pfn,
162 unsigned long nr_pages, 176 unsigned long nr_pages,
163 int nid) 177 int nid)
164{ 178{
@@ -183,7 +197,7 @@ int online_page_cgroup(unsigned long start_pfn,
183 return -ENOMEM; 197 return -ENOMEM;
184} 198}
185 199
186int offline_page_cgroup(unsigned long start_pfn, 200int __meminit offline_page_cgroup(unsigned long start_pfn,
187 unsigned long nr_pages, int nid) 201 unsigned long nr_pages, int nid)
188{ 202{
189 unsigned long start, end, pfn; 203 unsigned long start, end, pfn;
@@ -197,7 +211,7 @@ int offline_page_cgroup(unsigned long start_pfn,
197 211
198} 212}
199 213
200static int page_cgroup_callback(struct notifier_block *self, 214static int __meminit page_cgroup_callback(struct notifier_block *self,
201 unsigned long action, void *arg) 215 unsigned long action, void *arg)
202{ 216{
203 struct memory_notify *mn = arg; 217 struct memory_notify *mn = arg;
@@ -207,18 +221,23 @@ static int page_cgroup_callback(struct notifier_block *self,
207 ret = online_page_cgroup(mn->start_pfn, 221 ret = online_page_cgroup(mn->start_pfn,
208 mn->nr_pages, mn->status_change_nid); 222 mn->nr_pages, mn->status_change_nid);
209 break; 223 break;
210 case MEM_CANCEL_ONLINE:
211 case MEM_OFFLINE: 224 case MEM_OFFLINE:
212 offline_page_cgroup(mn->start_pfn, 225 offline_page_cgroup(mn->start_pfn,
213 mn->nr_pages, mn->status_change_nid); 226 mn->nr_pages, mn->status_change_nid);
214 break; 227 break;
228 case MEM_CANCEL_ONLINE:
215 case MEM_GOING_OFFLINE: 229 case MEM_GOING_OFFLINE:
216 break; 230 break;
217 case MEM_ONLINE: 231 case MEM_ONLINE:
218 case MEM_CANCEL_OFFLINE: 232 case MEM_CANCEL_OFFLINE:
219 break; 233 break;
220 } 234 }
221 ret = notifier_from_errno(ret); 235
236 if (ret)
237 ret = notifier_from_errno(ret);
238 else
239 ret = NOTIFY_OK;
240
222 return ret; 241 return ret;
223} 242}
224 243
@@ -248,7 +267,7 @@ void __init page_cgroup_init(void)
248 " want\n"); 267 " want\n");
249} 268}
250 269
251void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 270void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
252{ 271{
253 return; 272 return;
254} 273}
diff --git a/mm/slob.c b/mm/slob.c
index cb675d126791..bf7e8fc3aed8 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
535 struct kmem_cache *c; 535 struct kmem_cache *c;
536 536
537 c = slob_alloc(sizeof(struct kmem_cache), 537 c = slob_alloc(sizeof(struct kmem_cache),
538 flags, ARCH_KMALLOC_MINALIGN, -1); 538 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
539 539
540 if (c) { 540 if (c) {
541 c->name = name; 541 c->name = name;
diff --git a/mm/slub.c b/mm/slub.c
index 7ad489af9561..a2cd47d89e0a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2931,8 +2931,10 @@ static int slab_memory_callback(struct notifier_block *self,
2931 case MEM_CANCEL_OFFLINE: 2931 case MEM_CANCEL_OFFLINE:
2932 break; 2932 break;
2933 } 2933 }
2934 2934 if (ret)
2935 ret = notifier_from_errno(ret); 2935 ret = notifier_from_errno(ret);
2936 else
2937 ret = NOTIFY_OK;
2936 return ret; 2938 return ret;
2937} 2939}
2938 2940
@@ -3595,7 +3597,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3595 for (i = 0; i < t.count; i++) { 3597 for (i = 0; i < t.count; i++) {
3596 struct location *l = &t.loc[i]; 3598 struct location *l = &t.loc[i];
3597 3599
3598 if (len > PAGE_SIZE - 100) 3600 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3599 break; 3601 break;
3600 len += sprintf(buf + len, "%7ld ", l->count); 3602 len += sprintf(buf + len, "%7ld ", l->count);
3601 3603
diff --git a/mm/sparse.c b/mm/sparse.c
index 39db301b920d..083f5b63e7a8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -570,7 +570,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
570 * set. If this is <=0, then that means that the passed-in 570 * set. If this is <=0, then that means that the passed-in
571 * map was not consumed and must be freed. 571 * map was not consumed and must be freed.
572 */ 572 */
573int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 573int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
574 int nr_pages) 574 int nr_pages)
575{ 575{
576 unsigned long section_nr = pfn_to_section_nr(start_pfn); 576 unsigned long section_nr = pfn_to_section_nr(start_pfn);
diff --git a/mm/swap.c b/mm/swap.c
index 2152e48a7b8f..b135ec90cdeb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -299,7 +299,6 @@ void lru_add_drain(void)
299 put_cpu(); 299 put_cpu();
300} 300}
301 301
302#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
303static void lru_add_drain_per_cpu(struct work_struct *dummy) 302static void lru_add_drain_per_cpu(struct work_struct *dummy)
304{ 303{
305 lru_add_drain(); 304 lru_add_drain();
@@ -313,18 +312,6 @@ int lru_add_drain_all(void)
313 return schedule_on_each_cpu(lru_add_drain_per_cpu); 312 return schedule_on_each_cpu(lru_add_drain_per_cpu);
314} 313}
315 314
316#else
317
318/*
319 * Returns 0 for success
320 */
321int lru_add_drain_all(void)
322{
323 lru_add_drain();
324 return 0;
325}
326#endif
327
328/* 315/*
329 * Batched page_cache_release(). Decrement the reference count on all the 316 * Batched page_cache_release(). Decrement the reference count on all the
330 * passed pages. If it fell to zero then remove the page from the LRU and 317 * passed pages. If it fell to zero then remove the page from the LRU and
@@ -445,6 +432,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
445 for (i = 0; i < pagevec_count(pvec); i++) { 432 for (i = 0; i < pagevec_count(pvec); i++) {
446 struct page *page = pvec->pages[i]; 433 struct page *page = pvec->pages[i];
447 struct zone *pagezone = page_zone(page); 434 struct zone *pagezone = page_zone(page);
435 int file;
448 436
449 if (pagezone != zone) { 437 if (pagezone != zone) {
450 if (zone) 438 if (zone)
@@ -456,8 +444,12 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
456 VM_BUG_ON(PageUnevictable(page)); 444 VM_BUG_ON(PageUnevictable(page));
457 VM_BUG_ON(PageLRU(page)); 445 VM_BUG_ON(PageLRU(page));
458 SetPageLRU(page); 446 SetPageLRU(page);
459 if (is_active_lru(lru)) 447 file = is_file_lru(lru);
448 zone->recent_scanned[file]++;
449 if (is_active_lru(lru)) {
460 SetPageActive(page); 450 SetPageActive(page);
451 zone->recent_rotated[file]++;
452 }
461 add_page_to_lru_list(zone, page, lru); 453 add_page_to_lru_list(zone, page, lru);
462 } 454 }
463 if (zone) 455 if (zone)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 30f826d484f0..1ddb77ba3995 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
77 77
78 BUG_ON(addr >= end); 78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr); 79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do { 80 do {
82 next = pgd_addr_end(addr, end); 81 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd)) 82 if (pgd_none_or_clear_bad(pgd))
@@ -543,9 +542,10 @@ static void purge_vmap_area_lazy(void)
543} 542}
544 543
545/* 544/*
546 * Free and unmap a vmap area 545 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
546 * called for the correct range previously.
547 */ 547 */
548static void free_unmap_vmap_area(struct vmap_area *va) 548static void free_unmap_vmap_area_noflush(struct vmap_area *va)
549{ 549{
550 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
@@ -553,6 +553,15 @@ static void free_unmap_vmap_area(struct vmap_area *va)
553 try_purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
554} 554}
555 555
556/*
557 * Free and unmap a vmap area
558 */
559static void free_unmap_vmap_area(struct vmap_area *va)
560{
561 flush_cache_vunmap(va->va_start, va->va_end);
562 free_unmap_vmap_area_noflush(va);
563}
564
556static struct vmap_area *find_vmap_area(unsigned long addr) 565static struct vmap_area *find_vmap_area(unsigned long addr)
557{ 566{
558 struct vmap_area *va; 567 struct vmap_area *va;
@@ -734,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb)
734 spin_unlock(&vmap_block_tree_lock); 743 spin_unlock(&vmap_block_tree_lock);
735 BUG_ON(tmp != vb); 744 BUG_ON(tmp != vb);
736 745
737 free_unmap_vmap_area(vb->va); 746 free_unmap_vmap_area_noflush(vb->va);
738 call_rcu(&vb->rcu_head, rcu_free_vb); 747 call_rcu(&vb->rcu_head, rcu_free_vb);
739} 748}
740 749
@@ -796,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size)
796 805
797 BUG_ON(size & ~PAGE_MASK); 806 BUG_ON(size & ~PAGE_MASK);
798 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 807 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
808
809 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
810
799 order = get_order(size); 811 order = get_order(size);
800 812
801 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 813 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
@@ -1705,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p)
1705 v->addr, v->addr + v->size, v->size); 1717 v->addr, v->addr + v->size, v->size);
1706 1718
1707 if (v->caller) { 1719 if (v->caller) {
1708 char buff[2 * KSYM_NAME_LEN]; 1720 char buff[KSYM_SYMBOL_LEN];
1709 1721
1710 seq_putc(m, ' '); 1722 seq_putc(m, ' ');
1711 sprint_symbol(buff, (unsigned long)v->caller); 1723 sprint_symbol(buff, (unsigned long)v->caller);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7ea1440b53db..62e7f62fb559 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1248,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1248 list_add(&page->lru, &l_inactive); 1248 list_add(&page->lru, &l_inactive);
1249 } 1249 }
1250 1250
1251 spin_lock_irq(&zone->lru_lock);
1251 /* 1252 /*
1252 * Count referenced pages from currently used mappings as 1253 * Count referenced pages from currently used mappings as
1253 * rotated, even though they are moved to the inactive list. 1254 * rotated, even though they are moved to the inactive list.
@@ -1263,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1263 1264
1264 pgmoved = 0; 1265 pgmoved = 0;
1265 lru = LRU_BASE + file * LRU_FILE; 1266 lru = LRU_BASE + file * LRU_FILE;
1266 spin_lock_irq(&zone->lru_lock);
1267 while (!list_empty(&l_inactive)) { 1267 while (!list_empty(&l_inactive)) {
1268 page = lru_to_page(&l_inactive); 1268 page = lru_to_page(&l_inactive);
1269 prefetchw_prev_lru_page(page, &l_inactive, flags); 1269 prefetchw_prev_lru_page(page, &l_inactive, flags);