aboutsummaryrefslogtreecommitdiffstats
path: root/mm/backing-dev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 14:29:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 14:29:09 -0500
commit9360b53661a2c7754517b2925580055bacc8ec38 (patch)
tree58a0c6f266524247baf87d21bf41ac3bc395cf8a /mm/backing-dev.c
parentfa4c95bfdb85d568ae327d57aa33a4f55bab79c4 (diff)
Revert "bdi: add a user-tunable cpu_list for the bdi flusher threads"
This reverts commit 8fa72d234da9b6b473bbb1f74d533663e4996e6b. People disagree about how this should be done, so let's revert this for now so that nobody starts using the new tuning interface. Tejun is thinking about a more generic interface for thread pool affinity. Requested-by: Tejun Heo <tj@kernel.org> Acked-by: Jeff Moyer <jmoyer@redhat.com> Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r--mm/backing-dev.c84
1 files changed, 0 insertions, 84 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index bd6a6cabef71..d3ca2b3ee176 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -10,7 +10,6 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/slab.h>
14#include <trace/events/writeback.h> 13#include <trace/events/writeback.h>
15 14
16static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
@@ -222,63 +221,12 @@ static ssize_t max_ratio_store(struct device *dev,
222} 221}
223BDI_SHOW(max_ratio, bdi->max_ratio) 222BDI_SHOW(max_ratio, bdi->max_ratio)
224 223
225static ssize_t cpu_list_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t count)
227{
228 struct backing_dev_info *bdi = dev_get_drvdata(dev);
229 struct bdi_writeback *wb = &bdi->wb;
230 cpumask_var_t newmask;
231 ssize_t ret;
232 struct task_struct *task;
233
234 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
235 return -ENOMEM;
236
237 ret = cpulist_parse(buf, newmask);
238 if (!ret) {
239 spin_lock_bh(&bdi->wb_lock);
240 task = wb->task;
241 if (task)
242 get_task_struct(task);
243 spin_unlock_bh(&bdi->wb_lock);
244
245 mutex_lock(&bdi->flusher_cpumask_lock);
246 if (task) {
247 ret = set_cpus_allowed_ptr(task, newmask);
248 put_task_struct(task);
249 }
250 if (ret == 0) {
251 cpumask_copy(bdi->flusher_cpumask, newmask);
252 ret = count;
253 }
254 mutex_unlock(&bdi->flusher_cpumask_lock);
255
256 }
257 free_cpumask_var(newmask);
258
259 return ret;
260}
261
262static ssize_t cpu_list_show(struct device *dev,
263 struct device_attribute *attr, char *page)
264{
265 struct backing_dev_info *bdi = dev_get_drvdata(dev);
266 ssize_t ret;
267
268 mutex_lock(&bdi->flusher_cpumask_lock);
269 ret = cpulist_scnprintf(page, PAGE_SIZE-1, bdi->flusher_cpumask);
270 mutex_unlock(&bdi->flusher_cpumask_lock);
271
272 return ret;
273}
274
275#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) 224#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
276 225
277static struct device_attribute bdi_dev_attrs[] = { 226static struct device_attribute bdi_dev_attrs[] = {
278 __ATTR_RW(read_ahead_kb), 227 __ATTR_RW(read_ahead_kb),
279 __ATTR_RW(min_ratio), 228 __ATTR_RW(min_ratio),
280 __ATTR_RW(max_ratio), 229 __ATTR_RW(max_ratio),
281 __ATTR_RW(cpu_list),
282 __ATTR_NULL, 230 __ATTR_NULL,
283}; 231};
284 232
@@ -480,7 +428,6 @@ static int bdi_forker_thread(void *ptr)
480 writeback_inodes_wb(&bdi->wb, 1024, 428 writeback_inodes_wb(&bdi->wb, 1024,
481 WB_REASON_FORKER_THREAD); 429 WB_REASON_FORKER_THREAD);
482 } else { 430 } else {
483 int ret;
484 /* 431 /*
485 * The spinlock makes sure we do not lose 432 * The spinlock makes sure we do not lose
486 * wake-ups when racing with 'bdi_queue_work()'. 433 * wake-ups when racing with 'bdi_queue_work()'.
@@ -490,14 +437,6 @@ static int bdi_forker_thread(void *ptr)
490 spin_lock_bh(&bdi->wb_lock); 437 spin_lock_bh(&bdi->wb_lock);
491 bdi->wb.task = task; 438 bdi->wb.task = task;
492 spin_unlock_bh(&bdi->wb_lock); 439 spin_unlock_bh(&bdi->wb_lock);
493 mutex_lock(&bdi->flusher_cpumask_lock);
494 ret = set_cpus_allowed_ptr(task,
495 bdi->flusher_cpumask);
496 mutex_unlock(&bdi->flusher_cpumask_lock);
497 if (ret)
498 printk_once("%s: failed to bind flusher"
499 " thread %s, error %d\n",
500 __func__, task->comm, ret);
501 wake_up_process(task); 440 wake_up_process(task);
502 } 441 }
503 bdi_clear_pending(bdi); 442 bdi_clear_pending(bdi);
@@ -570,17 +509,6 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
570 dev_name(dev)); 509 dev_name(dev));
571 if (IS_ERR(wb->task)) 510 if (IS_ERR(wb->task))
572 return PTR_ERR(wb->task); 511 return PTR_ERR(wb->task);
573 } else {
574 int node;
575 /*
576 * Set up a default cpumask for the flusher threads that
577 * includes all cpus on the same numa node as the device.
578 * The mask may be overridden via sysfs.
579 */
580 node = dev_to_node(bdi->dev);
581 if (node != NUMA_NO_NODE)
582 cpumask_copy(bdi->flusher_cpumask,
583 cpumask_of_node(node));
584 } 512 }
585 513
586 bdi_debug_register(bdi, dev_name(dev)); 514 bdi_debug_register(bdi, dev_name(dev));
@@ -706,15 +634,6 @@ int bdi_init(struct backing_dev_info *bdi)
706 634
707 bdi_wb_init(&bdi->wb, bdi); 635 bdi_wb_init(&bdi->wb, bdi);
708 636
709 if (!bdi_cap_flush_forker(bdi)) {
710 bdi->flusher_cpumask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
711 if (!bdi->flusher_cpumask)
712 return -ENOMEM;
713 cpumask_setall(bdi->flusher_cpumask);
714 mutex_init(&bdi->flusher_cpumask_lock);
715 } else
716 bdi->flusher_cpumask = NULL;
717
718 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 637 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
719 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 638 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
720 if (err) 639 if (err)
@@ -737,7 +656,6 @@ int bdi_init(struct backing_dev_info *bdi)
737err: 656err:
738 while (i--) 657 while (i--)
739 percpu_counter_destroy(&bdi->bdi_stat[i]); 658 percpu_counter_destroy(&bdi->bdi_stat[i]);
740 kfree(bdi->flusher_cpumask);
741 } 659 }
742 660
743 return err; 661 return err;
@@ -765,8 +683,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
765 683
766 bdi_unregister(bdi); 684 bdi_unregister(bdi);
767 685
768 kfree(bdi->flusher_cpumask);
769
770 /* 686 /*
771 * If bdi_unregister() had already been called earlier, the 687 * If bdi_unregister() had already been called earlier, the
772 * wakeup_timer could still be armed because bdi_prune_sb() 688 * wakeup_timer could still be armed because bdi_prune_sb()