aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:19:20 -0500
commit68556ca1e03d6a35be3b315eba58df2f8176e3a0 (patch)
tree36a390d29a0d03a59a90c0f223b0d98a80f0f6c3 /mm
parent0604ca48f1689ad06144b81f5c08f297b6edd831 (diff)
parent8ab30691826fc05efa47c4ffba19b80496bb3a2c (diff)
Merge branch 'mfd/wm8994' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc into for-3.3
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c23
-rw-r--r--mm/vmalloc.c27
6 files changed, 36 insertions, 31 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a0860640378d..71034f41a2ba 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -724,6 +724,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
724 724
725 bdi_unregister(bdi); 725 bdi_unregister(bdi);
726 726
727 /*
728 * If bdi_unregister() had already been called earlier, the
729 * wakeup_timer could still be armed because bdi_prune_sb()
730 * can race with the bdi_wakeup_thread_delayed() calls from
731 * __mark_inode_dirty().
732 */
733 del_timer_sync(&bdi->wb.wakeup_timer);
734
727 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 735 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
728 percpu_counter_destroy(&bdi->bdi_stat[i]); 736 percpu_counter_destroy(&bdi->bdi_stat[i]);
729 737
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dae27ba3be2c..bb28a5f9db8d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2422,6 +2422,8 @@ retry_avoidcopy:
2422 * anon_vma prepared. 2422 * anon_vma prepared.
2423 */ 2423 */
2424 if (unlikely(anon_vma_prepare(vma))) { 2424 if (unlikely(anon_vma_prepare(vma))) {
2425 page_cache_release(new_page);
2426 page_cache_release(old_page);
2425 /* Caller expects lock to be held */ 2427 /* Caller expects lock to be held */
2426 spin_lock(&mm->page_table_lock); 2428 spin_lock(&mm->page_table_lock);
2427 return VM_FAULT_OOM; 2429 return VM_FAULT_OOM;
diff --git a/mm/nommu.c b/mm/nommu.c
index 73419c55eda6..b982290fd962 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -454,7 +454,7 @@ void __attribute__((weak)) vmalloc_sync_all(void)
454 * between processes, it syncs the pagetable across all 454 * between processes, it syncs the pagetable across all
455 * processes. 455 * processes.
456 */ 456 */
457struct vm_struct *alloc_vm_area(size_t size) 457struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
458{ 458{
459 BUG(); 459 BUG();
460 return NULL; 460 return NULL;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 471dedb463ab..76f2c5ae908e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -185,6 +185,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
185 if (!p) 185 if (!p)
186 return 0; 186 return 0;
187 187
188 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
189 task_unlock(p);
190 return 0;
191 }
192
188 /* 193 /*
189 * The memory controller may have a limit of 0 bytes, so avoid a divide 194 * The memory controller may have a limit of 0 bytes, so avoid a divide
190 * by zero, if necessary. 195 * by zero, if necessary.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3278f005230..71252486bc6f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
128 * 128 *
129 */ 129 */
130static struct prop_descriptor vm_completions; 130static struct prop_descriptor vm_completions;
131static struct prop_descriptor vm_dirties;
132 131
133/* 132/*
134 * couple the period to the dirty_ratio: 133 * couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
154{ 153{
155 int shift = calc_period_shift(); 154 int shift = calc_period_shift();
156 prop_change_shift(&vm_completions, shift); 155 prop_change_shift(&vm_completions, shift);
157 prop_change_shift(&vm_dirties, shift);
158 156
159 writeback_set_ratelimit(); 157 writeback_set_ratelimit();
160} 158}
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
235} 233}
236EXPORT_SYMBOL_GPL(bdi_writeout_inc); 234EXPORT_SYMBOL_GPL(bdi_writeout_inc);
237 235
238void task_dirty_inc(struct task_struct *tsk)
239{
240 prop_inc_single(&vm_dirties, &tsk->dirties);
241}
242
243/* 236/*
244 * Obtain an accurate fraction of the BDI's portion. 237 * Obtain an accurate fraction of the BDI's portion.
245 */ 238 */
@@ -1133,17 +1126,17 @@ pause:
1133 pages_dirtied, 1126 pages_dirtied,
1134 pause, 1127 pause,
1135 start_time); 1128 start_time);
1136 __set_current_state(TASK_UNINTERRUPTIBLE); 1129 __set_current_state(TASK_KILLABLE);
1137 io_schedule_timeout(pause); 1130 io_schedule_timeout(pause);
1138 1131
1139 dirty_thresh = hard_dirty_limit(dirty_thresh);
1140 /* 1132 /*
1141 * max-pause area. If dirty exceeded but still within this 1133 * This is typically equal to (nr_dirty < dirty_thresh) and can
1142 * area, no need to sleep for more than 200ms: (a) 8 pages per 1134 * also keep "1000+ dd on a slow USB stick" under control.
1143 * 200ms is typically more than enough to curb heavy dirtiers;
1144 * (b) the pause time limit makes the dirtiers more responsive.
1145 */ 1135 */
1146 if (nr_dirty < dirty_thresh) 1136 if (task_ratelimit)
1137 break;
1138
1139 if (fatal_signal_pending(current))
1147 break; 1140 break;
1148 } 1141 }
1149 1142
@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
1395 1388
1396 shift = calc_period_shift(); 1389 shift = calc_period_shift();
1397 prop_descriptor_init(&vm_completions, shift); 1390 prop_descriptor_init(&vm_completions, shift);
1398 prop_descriptor_init(&vm_dirties, shift);
1399} 1391}
1400 1392
1401/** 1393/**
@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1724 __inc_zone_page_state(page, NR_DIRTIED); 1716 __inc_zone_page_state(page, NR_DIRTIED);
1725 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 1717 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1726 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 1718 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1727 task_dirty_inc(current);
1728 task_io_account_write(PAGE_CACHE_SIZE); 1719 task_io_account_write(PAGE_CACHE_SIZE);
1729 } 1720 }
1730} 1721}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b669aa6f6caf..3231bf332878 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2141,23 +2141,30 @@ void __attribute__((weak)) vmalloc_sync_all(void)
2141 2141
2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2143{ 2143{
2144 /* apply_to_page_range() does all the hard work. */ 2144 pte_t ***p = data;
2145
2146 if (p) {
2147 *(*p) = pte;
2148 (*p)++;
2149 }
2145 return 0; 2150 return 0;
2146} 2151}
2147 2152
2148/** 2153/**
2149 * alloc_vm_area - allocate a range of kernel address space 2154 * alloc_vm_area - allocate a range of kernel address space
2150 * @size: size of the area 2155 * @size: size of the area
2156 * @ptes: returns the PTEs for the address space
2151 * 2157 *
2152 * Returns: NULL on failure, vm_struct on success 2158 * Returns: NULL on failure, vm_struct on success
2153 * 2159 *
2154 * This function reserves a range of kernel address space, and 2160 * This function reserves a range of kernel address space, and
2155 * allocates pagetables to map that range. No actual mappings 2161 * allocates pagetables to map that range. No actual mappings
2156 * are created. If the kernel address space is not shared 2162 * are created.
2157 * between processes, it syncs the pagetable across all 2163 *
2158 * processes. 2164 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2165 * allocated for the VM area are returned.
2159 */ 2166 */
2160struct vm_struct *alloc_vm_area(size_t size) 2167struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2161{ 2168{
2162 struct vm_struct *area; 2169 struct vm_struct *area;
2163 2170
@@ -2171,19 +2178,11 @@ struct vm_struct *alloc_vm_area(size_t size)
2171 * of kernel virtual address space and mapped into init_mm. 2178 * of kernel virtual address space and mapped into init_mm.
2172 */ 2179 */
2173 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2180 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2174 area->size, f, NULL)) { 2181 size, f, ptes ? &ptes : NULL)) {
2175 free_vm_area(area); 2182 free_vm_area(area);
2176 return NULL; 2183 return NULL;
2177 } 2184 }
2178 2185
2179 /*
2180 * If the allocated address space is passed to a hypercall
2181 * before being used then we cannot rely on a page fault to
2182 * trigger an update of the page tables. So sync all the page
2183 * tables here.
2184 */
2185 vmalloc_sync_all();
2186
2187 return area; 2186 return area;
2188} 2187}
2189EXPORT_SYMBOL_GPL(alloc_vm_area); 2188EXPORT_SYMBOL_GPL(alloc_vm_area);