aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
committerJiri Kosina <jkosina@suse.cz>2010-08-11 03:36:51 -0400
commit6396fc3b3ff3f6b942992b653a62df11dcef9bea (patch)
treedb3c7cbe833b43c653adc99f70941431c5ff7c4e /mm
parent4785879e4d340e24e54f6de2ccfc42728b912808 (diff)
parent3d30701b58970425e1d45994d6cb82f828924fdd (diff)
Merge branch 'master' into for-next
Conflicts: fs/exofs/inode.c
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c445
-rw-r--r--mm/kmemleak.c100
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c23
-rw-r--r--mm/truncate.c38
6 files changed, 333 insertions, 280 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f9fd3dd3916b..08d357522e78 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <trace/events/writeback.h>
13 14
14static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
15 16
@@ -49,8 +50,6 @@ static struct timer_list sync_supers_timer;
49static int bdi_sync_supers(void *); 50static int bdi_sync_supers(void *);
50static void sync_supers_timer_fn(unsigned long); 51static void sync_supers_timer_fn(unsigned long);
51 52
52static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
53
54#ifdef CONFIG_DEBUG_FS 53#ifdef CONFIG_DEBUG_FS
55#include <linux/debugfs.h> 54#include <linux/debugfs.h>
56#include <linux/seq_file.h> 55#include <linux/seq_file.h>
@@ -65,28 +64,21 @@ static void bdi_debug_init(void)
65static int bdi_debug_stats_show(struct seq_file *m, void *v) 64static int bdi_debug_stats_show(struct seq_file *m, void *v)
66{ 65{
67 struct backing_dev_info *bdi = m->private; 66 struct backing_dev_info *bdi = m->private;
68 struct bdi_writeback *wb; 67 struct bdi_writeback *wb = &bdi->wb;
69 unsigned long background_thresh; 68 unsigned long background_thresh;
70 unsigned long dirty_thresh; 69 unsigned long dirty_thresh;
71 unsigned long bdi_thresh; 70 unsigned long bdi_thresh;
72 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 71 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
73 struct inode *inode; 72 struct inode *inode;
74 73
75 /*
76 * inode lock is enough here, the bdi->wb_list is protected by
77 * RCU on the reader side
78 */
79 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 74 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
80 spin_lock(&inode_lock); 75 spin_lock(&inode_lock);
81 list_for_each_entry(wb, &bdi->wb_list, list) { 76 list_for_each_entry(inode, &wb->b_dirty, i_list)
82 nr_wb++; 77 nr_dirty++;
83 list_for_each_entry(inode, &wb->b_dirty, i_list) 78 list_for_each_entry(inode, &wb->b_io, i_list)
84 nr_dirty++; 79 nr_io++;
85 list_for_each_entry(inode, &wb->b_io, i_list) 80 list_for_each_entry(inode, &wb->b_more_io, i_list)
86 nr_io++; 81 nr_more_io++;
87 list_for_each_entry(inode, &wb->b_more_io, i_list)
88 nr_more_io++;
89 }
90 spin_unlock(&inode_lock); 82 spin_unlock(&inode_lock);
91 83
92 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); 84 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
@@ -98,19 +90,16 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
98 "BdiDirtyThresh: %8lu kB\n" 90 "BdiDirtyThresh: %8lu kB\n"
99 "DirtyThresh: %8lu kB\n" 91 "DirtyThresh: %8lu kB\n"
100 "BackgroundThresh: %8lu kB\n" 92 "BackgroundThresh: %8lu kB\n"
101 "WritebackThreads: %8lu\n"
102 "b_dirty: %8lu\n" 93 "b_dirty: %8lu\n"
103 "b_io: %8lu\n" 94 "b_io: %8lu\n"
104 "b_more_io: %8lu\n" 95 "b_more_io: %8lu\n"
105 "bdi_list: %8u\n" 96 "bdi_list: %8u\n"
106 "state: %8lx\n" 97 "state: %8lx\n",
107 "wb_list: %8u\n",
108 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 98 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
109 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 99 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
110 K(bdi_thresh), K(dirty_thresh), 100 K(bdi_thresh), K(dirty_thresh),
111 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, 101 K(background_thresh), nr_dirty, nr_io, nr_more_io,
112 !list_empty(&bdi->bdi_list), bdi->state, 102 !list_empty(&bdi->bdi_list), bdi->state);
113 !list_empty(&bdi->wb_list));
114#undef K 103#undef K
115 104
116 return 0; 105 return 0;
@@ -247,7 +236,6 @@ static int __init default_bdi_init(void)
247 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); 236 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
248 BUG_ON(IS_ERR(sync_supers_tsk)); 237 BUG_ON(IS_ERR(sync_supers_tsk));
249 238
250 init_timer(&sync_supers_timer);
251 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); 239 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
252 bdi_arm_supers_timer(); 240 bdi_arm_supers_timer();
253 241
@@ -259,77 +247,6 @@ static int __init default_bdi_init(void)
259} 247}
260subsys_initcall(default_bdi_init); 248subsys_initcall(default_bdi_init);
261 249
262static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
263{
264 memset(wb, 0, sizeof(*wb));
265
266 wb->bdi = bdi;
267 wb->last_old_flush = jiffies;
268 INIT_LIST_HEAD(&wb->b_dirty);
269 INIT_LIST_HEAD(&wb->b_io);
270 INIT_LIST_HEAD(&wb->b_more_io);
271}
272
273static void bdi_task_init(struct backing_dev_info *bdi,
274 struct bdi_writeback *wb)
275{
276 struct task_struct *tsk = current;
277
278 spin_lock(&bdi->wb_lock);
279 list_add_tail_rcu(&wb->list, &bdi->wb_list);
280 spin_unlock(&bdi->wb_lock);
281
282 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
283 set_freezable();
284
285 /*
286 * Our parent may run at a different priority, just set us to normal
287 */
288 set_user_nice(tsk, 0);
289}
290
291static int bdi_start_fn(void *ptr)
292{
293 struct bdi_writeback *wb = ptr;
294 struct backing_dev_info *bdi = wb->bdi;
295 int ret;
296
297 /*
298 * Add us to the active bdi_list
299 */
300 spin_lock_bh(&bdi_lock);
301 list_add_rcu(&bdi->bdi_list, &bdi_list);
302 spin_unlock_bh(&bdi_lock);
303
304 bdi_task_init(bdi, wb);
305
306 /*
307 * Clear pending bit and wakeup anybody waiting to tear us down
308 */
309 clear_bit(BDI_pending, &bdi->state);
310 smp_mb__after_clear_bit();
311 wake_up_bit(&bdi->state, BDI_pending);
312
313 ret = bdi_writeback_task(wb);
314
315 /*
316 * Remove us from the list
317 */
318 spin_lock(&bdi->wb_lock);
319 list_del_rcu(&wb->list);
320 spin_unlock(&bdi->wb_lock);
321
322 /*
323 * Flush any work that raced with us exiting. No new work
324 * will be added, since this bdi isn't discoverable anymore.
325 */
326 if (!list_empty(&bdi->work_list))
327 wb_do_writeback(wb, 1);
328
329 wb->task = NULL;
330 return ret;
331}
332
333int bdi_has_dirty_io(struct backing_dev_info *bdi) 250int bdi_has_dirty_io(struct backing_dev_info *bdi)
334{ 251{
335 return wb_has_dirty_io(&bdi->wb); 252 return wb_has_dirty_io(&bdi->wb);
@@ -348,10 +265,10 @@ static void bdi_flush_io(struct backing_dev_info *bdi)
348} 265}
349 266
350/* 267/*
351 * kupdated() used to do this. We cannot do it from the bdi_forker_task() 268 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
352 * or we risk deadlocking on ->s_umount. The longer term solution would be 269 * or we risk deadlocking on ->s_umount. The longer term solution would be
353 * to implement sync_supers_bdi() or similar and simply do it from the 270 * to implement sync_supers_bdi() or similar and simply do it from the
354 * bdi writeback tasks individually. 271 * bdi writeback thread individually.
355 */ 272 */
356static int bdi_sync_supers(void *unused) 273static int bdi_sync_supers(void *unused)
357{ 274{
@@ -387,144 +304,198 @@ static void sync_supers_timer_fn(unsigned long unused)
387 bdi_arm_supers_timer(); 304 bdi_arm_supers_timer();
388} 305}
389 306
390static int bdi_forker_task(void *ptr) 307static void wakeup_timer_fn(unsigned long data)
308{
309 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
310
311 spin_lock_bh(&bdi->wb_lock);
312 if (bdi->wb.task) {
313 trace_writeback_wake_thread(bdi);
314 wake_up_process(bdi->wb.task);
315 } else {
316 /*
317 * When bdi tasks are inactive for long time, they are killed.
318 * In this case we have to wake-up the forker thread which
319 * should create and run the bdi thread.
320 */
321 trace_writeback_wake_forker_thread(bdi);
322 wake_up_process(default_backing_dev_info.wb.task);
323 }
324 spin_unlock_bh(&bdi->wb_lock);
325}
326
327/*
328 * This function is used when the first inode for this bdi is marked dirty. It
329 * wakes-up the corresponding bdi thread which should then take care of the
330 * periodic background write-out of dirty inodes. Since the write-out would
331 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
332 * set up a timer which wakes the bdi thread up later.
333 *
334 * Note, we wouldn't bother setting up the timer, but this function is on the
335 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
336 * by delaying the wake-up.
337 */
338void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
339{
340 unsigned long timeout;
341
342 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
343 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
344}
345
346/*
347 * Calculate the longest interval (jiffies) bdi threads are allowed to be
348 * inactive.
349 */
350static unsigned long bdi_longest_inactive(void)
351{
352 unsigned long interval;
353
354 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
355 return max(5UL * 60 * HZ, interval);
356}
357
358static int bdi_forker_thread(void *ptr)
391{ 359{
392 struct bdi_writeback *me = ptr; 360 struct bdi_writeback *me = ptr;
393 361
394 bdi_task_init(me->bdi, me); 362 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
363 set_freezable();
364
365 /*
366 * Our parent may run at a different priority, just set us to normal
367 */
368 set_user_nice(current, 0);
395 369
396 for (;;) { 370 for (;;) {
397 struct backing_dev_info *bdi, *tmp; 371 struct task_struct *task = NULL;
398 struct bdi_writeback *wb; 372 struct backing_dev_info *bdi;
373 enum {
374 NO_ACTION, /* Nothing to do */
375 FORK_THREAD, /* Fork bdi thread */
376 KILL_THREAD, /* Kill inactive bdi thread */
377 } action = NO_ACTION;
399 378
400 /* 379 /*
401 * Temporary measure, we want to make sure we don't see 380 * Temporary measure, we want to make sure we don't see
402 * dirty data on the default backing_dev_info 381 * dirty data on the default backing_dev_info
403 */ 382 */
404 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) 383 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
384 del_timer(&me->wakeup_timer);
405 wb_do_writeback(me, 0); 385 wb_do_writeback(me, 0);
386 }
406 387
407 spin_lock_bh(&bdi_lock); 388 spin_lock_bh(&bdi_lock);
389 set_current_state(TASK_INTERRUPTIBLE);
408 390
409 /* 391 list_for_each_entry(bdi, &bdi_list, bdi_list) {
410 * Check if any existing bdi's have dirty data without 392 bool have_dirty_io;
411 * a thread registered. If so, set that up. 393
412 */ 394 if (!bdi_cap_writeback_dirty(bdi) ||
413 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { 395 bdi_cap_flush_forker(bdi))
414 if (bdi->wb.task)
415 continue;
416 if (list_empty(&bdi->work_list) &&
417 !bdi_has_dirty_io(bdi))
418 continue; 396 continue;
419 397
420 bdi_add_default_flusher_task(bdi); 398 WARN(!test_bit(BDI_registered, &bdi->state),
421 } 399 "bdi %p/%s is not registered!\n", bdi, bdi->name);
422 400
423 set_current_state(TASK_INTERRUPTIBLE); 401 have_dirty_io = !list_empty(&bdi->work_list) ||
402 wb_has_dirty_io(&bdi->wb);
424 403
425 if (list_empty(&bdi_pending_list)) { 404 /*
426 unsigned long wait; 405 * If the bdi has work to do, but the thread does not
406 * exist - create it.
407 */
408 if (!bdi->wb.task && have_dirty_io) {
409 /*
410 * Set the pending bit - if someone will try to
411 * unregister this bdi - it'll wait on this bit.
412 */
413 set_bit(BDI_pending, &bdi->state);
414 action = FORK_THREAD;
415 break;
416 }
417
418 spin_lock(&bdi->wb_lock);
419
420 /*
421 * If there is no work to do and the bdi thread was
422 * inactive long enough - kill it. The wb_lock is taken
423 * to make sure no-one adds more work to this bdi and
424 * wakes the bdi thread up.
425 */
426 if (bdi->wb.task && !have_dirty_io &&
427 time_after(jiffies, bdi->wb.last_active +
428 bdi_longest_inactive())) {
429 task = bdi->wb.task;
430 bdi->wb.task = NULL;
431 spin_unlock(&bdi->wb_lock);
432 set_bit(BDI_pending, &bdi->state);
433 action = KILL_THREAD;
434 break;
435 }
436 spin_unlock(&bdi->wb_lock);
437 }
438 spin_unlock_bh(&bdi_lock);
427 439
428 spin_unlock_bh(&bdi_lock); 440 /* Keep working if default bdi still has things to do */
429 wait = msecs_to_jiffies(dirty_writeback_interval * 10); 441 if (!list_empty(&me->bdi->work_list))
430 if (wait) 442 __set_current_state(TASK_RUNNING);
431 schedule_timeout(wait); 443
444 switch (action) {
445 case FORK_THREAD:
446 __set_current_state(TASK_RUNNING);
447 task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
448 dev_name(bdi->dev));
449 if (IS_ERR(task)) {
450 /*
451 * If thread creation fails, force writeout of
452 * the bdi from the thread.
453 */
454 bdi_flush_io(bdi);
455 } else {
456 /*
457 * The spinlock makes sure we do not lose
458 * wake-ups when racing with 'bdi_queue_work()'.
459 */
460 spin_lock_bh(&bdi->wb_lock);
461 bdi->wb.task = task;
462 spin_unlock_bh(&bdi->wb_lock);
463 }
464 break;
465
466 case KILL_THREAD:
467 __set_current_state(TASK_RUNNING);
468 kthread_stop(task);
469 break;
470
471 case NO_ACTION:
472 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
473 /*
474 * There are no dirty data. The only thing we
475 * should now care about is checking for
476 * inactive bdi threads and killing them. Thus,
477 * let's sleep for longer time, save energy and
478 * be friendly for battery-driven devices.
479 */
480 schedule_timeout(bdi_longest_inactive());
432 else 481 else
433 schedule(); 482 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
434 try_to_freeze(); 483 try_to_freeze();
484 /* Back to the main loop */
435 continue; 485 continue;
436 } 486 }
437 487
438 __set_current_state(TASK_RUNNING);
439
440 /*
441 * This is our real job - check for pending entries in
442 * bdi_pending_list, and create the tasks that got added
443 */
444 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
445 bdi_list);
446 list_del_init(&bdi->bdi_list);
447 spin_unlock_bh(&bdi_lock);
448
449 wb = &bdi->wb;
450 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
451 dev_name(bdi->dev));
452 /* 488 /*
453 * If task creation fails, then readd the bdi to 489 * Clear pending bit and wakeup anybody waiting to tear us down.
454 * the pending list and force writeout of the bdi
455 * from this forker thread. That will free some memory
456 * and we can try again.
457 */ 490 */
458 if (IS_ERR(wb->task)) { 491 clear_bit(BDI_pending, &bdi->state);
459 wb->task = NULL; 492 smp_mb__after_clear_bit();
460 493 wake_up_bit(&bdi->state, BDI_pending);
461 /*
462 * Add this 'bdi' to the back, so we get
463 * a chance to flush other bdi's to free
464 * memory.
465 */
466 spin_lock_bh(&bdi_lock);
467 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
468 spin_unlock_bh(&bdi_lock);
469
470 bdi_flush_io(bdi);
471 }
472 } 494 }
473 495
474 return 0; 496 return 0;
475} 497}
476 498
477static void bdi_add_to_pending(struct rcu_head *head)
478{
479 struct backing_dev_info *bdi;
480
481 bdi = container_of(head, struct backing_dev_info, rcu_head);
482 INIT_LIST_HEAD(&bdi->bdi_list);
483
484 spin_lock(&bdi_lock);
485 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
486 spin_unlock(&bdi_lock);
487
488 /*
489 * We are now on the pending list, wake up bdi_forker_task()
490 * to finish the job and add us back to the active bdi_list
491 */
492 wake_up_process(default_backing_dev_info.wb.task);
493}
494
495/*
496 * Add the default flusher task that gets created for any bdi
497 * that has dirty data pending writeout
498 */
499void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
500{
501 if (!bdi_cap_writeback_dirty(bdi))
502 return;
503
504 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
505 printk(KERN_ERR "bdi %p/%s is not registered!\n",
506 bdi, bdi->name);
507 return;
508 }
509
510 /*
511 * Check with the helper whether to proceed adding a task. Will only
512 * abort if we two or more simultanous calls to
513 * bdi_add_default_flusher_task() occured, further additions will block
514 * waiting for previous additions to finish.
515 */
516 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
517 list_del_rcu(&bdi->bdi_list);
518
519 /*
520 * We must wait for the current RCU period to end before
521 * moving to the pending list. So schedule that operation
522 * from an RCU callback.
523 */
524 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
525 }
526}
527
528/* 499/*
529 * Remove bdi from bdi_list, and ensure that it is no longer visible 500 * Remove bdi from bdi_list, and ensure that it is no longer visible
530 */ 501 */
@@ -541,23 +512,16 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
541 const char *fmt, ...) 512 const char *fmt, ...)
542{ 513{
543 va_list args; 514 va_list args;
544 int ret = 0;
545 struct device *dev; 515 struct device *dev;
546 516
547 if (bdi->dev) /* The driver needs to use separate queues per device */ 517 if (bdi->dev) /* The driver needs to use separate queues per device */
548 goto exit; 518 return 0;
549 519
550 va_start(args, fmt); 520 va_start(args, fmt);
551 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 521 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
552 va_end(args); 522 va_end(args);
553 if (IS_ERR(dev)) { 523 if (IS_ERR(dev))
554 ret = PTR_ERR(dev); 524 return PTR_ERR(dev);
555 goto exit;
556 }
557
558 spin_lock_bh(&bdi_lock);
559 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
560 spin_unlock_bh(&bdi_lock);
561 525
562 bdi->dev = dev; 526 bdi->dev = dev;
563 527
@@ -569,21 +533,21 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
569 if (bdi_cap_flush_forker(bdi)) { 533 if (bdi_cap_flush_forker(bdi)) {
570 struct bdi_writeback *wb = &bdi->wb; 534 struct bdi_writeback *wb = &bdi->wb;
571 535
572 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s", 536 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
573 dev_name(dev)); 537 dev_name(dev));
574 if (IS_ERR(wb->task)) { 538 if (IS_ERR(wb->task))
575 wb->task = NULL; 539 return PTR_ERR(wb->task);
576 ret = -ENOMEM;
577
578 bdi_remove_from_list(bdi);
579 goto exit;
580 }
581 } 540 }
582 541
583 bdi_debug_register(bdi, dev_name(dev)); 542 bdi_debug_register(bdi, dev_name(dev));
584 set_bit(BDI_registered, &bdi->state); 543 set_bit(BDI_registered, &bdi->state);
585exit: 544
586 return ret; 545 spin_lock_bh(&bdi_lock);
546 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
547 spin_unlock_bh(&bdi_lock);
548
549 trace_writeback_bdi_register(bdi);
550 return 0;
587} 551}
588EXPORT_SYMBOL(bdi_register); 552EXPORT_SYMBOL(bdi_register);
589 553
@@ -598,31 +562,29 @@ EXPORT_SYMBOL(bdi_register_dev);
598 */ 562 */
599static void bdi_wb_shutdown(struct backing_dev_info *bdi) 563static void bdi_wb_shutdown(struct backing_dev_info *bdi)
600{ 564{
601 struct bdi_writeback *wb;
602
603 if (!bdi_cap_writeback_dirty(bdi)) 565 if (!bdi_cap_writeback_dirty(bdi))
604 return; 566 return;
605 567
606 /* 568 /*
607 * If setup is pending, wait for that to complete first 569 * Make sure nobody finds us on the bdi_list anymore
608 */ 570 */
609 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, 571 bdi_remove_from_list(bdi);
610 TASK_UNINTERRUPTIBLE);
611 572
612 /* 573 /*
613 * Make sure nobody finds us on the bdi_list anymore 574 * If setup is pending, wait for that to complete first
614 */ 575 */
615 bdi_remove_from_list(bdi); 576 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
577 TASK_UNINTERRUPTIBLE);
616 578
617 /* 579 /*
618 * Finally, kill the kernel threads. We don't need to be RCU 580 * Finally, kill the kernel thread. We don't need to be RCU
619 * safe anymore, since the bdi is gone from visibility. Force 581 * safe anymore, since the bdi is gone from visibility. Force
620 * unfreeze of the thread before calling kthread_stop(), otherwise 582 * unfreeze of the thread before calling kthread_stop(), otherwise
621 * it would never exet if it is currently stuck in the refrigerator. 583 * it would never exet if it is currently stuck in the refrigerator.
622 */ 584 */
623 list_for_each_entry(wb, &bdi->wb_list, list) { 585 if (bdi->wb.task) {
624 thaw_process(wb->task); 586 thaw_process(bdi->wb.task);
625 kthread_stop(wb->task); 587 kthread_stop(bdi->wb.task);
626 } 588 }
627} 589}
628 590
@@ -644,7 +606,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
644void bdi_unregister(struct backing_dev_info *bdi) 606void bdi_unregister(struct backing_dev_info *bdi)
645{ 607{
646 if (bdi->dev) { 608 if (bdi->dev) {
609 trace_writeback_bdi_unregister(bdi);
647 bdi_prune_sb(bdi); 610 bdi_prune_sb(bdi);
611 del_timer_sync(&bdi->wb.wakeup_timer);
648 612
649 if (!bdi_cap_flush_forker(bdi)) 613 if (!bdi_cap_flush_forker(bdi))
650 bdi_wb_shutdown(bdi); 614 bdi_wb_shutdown(bdi);
@@ -655,6 +619,18 @@ void bdi_unregister(struct backing_dev_info *bdi)
655} 619}
656EXPORT_SYMBOL(bdi_unregister); 620EXPORT_SYMBOL(bdi_unregister);
657 621
622static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
623{
624 memset(wb, 0, sizeof(*wb));
625
626 wb->bdi = bdi;
627 wb->last_old_flush = jiffies;
628 INIT_LIST_HEAD(&wb->b_dirty);
629 INIT_LIST_HEAD(&wb->b_io);
630 INIT_LIST_HEAD(&wb->b_more_io);
631 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
632}
633
658int bdi_init(struct backing_dev_info *bdi) 634int bdi_init(struct backing_dev_info *bdi)
659{ 635{
660 int i, err; 636 int i, err;
@@ -666,7 +642,6 @@ int bdi_init(struct backing_dev_info *bdi)
666 bdi->max_prop_frac = PROP_FRAC_BASE; 642 bdi->max_prop_frac = PROP_FRAC_BASE;
667 spin_lock_init(&bdi->wb_lock); 643 spin_lock_init(&bdi->wb_lock);
668 INIT_LIST_HEAD(&bdi->bdi_list); 644 INIT_LIST_HEAD(&bdi->bdi_list);
669 INIT_LIST_HEAD(&bdi->wb_list);
670 INIT_LIST_HEAD(&bdi->work_list); 645 INIT_LIST_HEAD(&bdi->work_list);
671 646
672 bdi_wb_init(&bdi->wb, bdi); 647 bdi_wb_init(&bdi->wb, bdi);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 2c0d032ac898..bd9bc214091b 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -211,6 +211,9 @@ static signed long jiffies_scan_wait;
211static int kmemleak_stack_scan = 1; 211static int kmemleak_stack_scan = 1;
212/* protects the memory scanning, parameters and debug/kmemleak file access */ 212/* protects the memory scanning, parameters and debug/kmemleak file access */
213static DEFINE_MUTEX(scan_mutex); 213static DEFINE_MUTEX(scan_mutex);
214/* setting kmemleak=on, will set this var, skipping the disable */
215static int kmemleak_skip_disable;
216
214 217
215/* 218/*
216 * Early object allocation/freeing logging. Kmemleak is initialized after the 219 * Early object allocation/freeing logging. Kmemleak is initialized after the
@@ -398,7 +401,9 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
398 object = prio_tree_entry(node, struct kmemleak_object, 401 object = prio_tree_entry(node, struct kmemleak_object,
399 tree_node); 402 tree_node);
400 if (!alias && object->pointer != ptr) { 403 if (!alias && object->pointer != ptr) {
401 kmemleak_warn("Found object by alias"); 404 pr_warning("Found object by alias at 0x%08lx\n", ptr);
405 dump_stack();
406 dump_object_info(object);
402 object = NULL; 407 object = NULL;
403 } 408 }
404 } else 409 } else
@@ -695,7 +700,7 @@ static void paint_ptr(unsigned long ptr, int color)
695} 700}
696 701
697/* 702/*
698 * Make a object permanently as gray-colored so that it can no longer be 703 * Mark an object permanently as gray-colored so that it can no longer be
699 * reported as a leak. This is used in general to mark a false positive. 704 * reported as a leak. This is used in general to mark a false positive.
700 */ 705 */
701static void make_gray_object(unsigned long ptr) 706static void make_gray_object(unsigned long ptr)
@@ -838,10 +843,19 @@ out:
838 rcu_read_unlock(); 843 rcu_read_unlock();
839} 844}
840 845
841/* 846/**
842 * Memory allocation function callback. This function is called from the 847 * kmemleak_alloc - register a newly allocated object
843 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, 848 * @ptr: pointer to beginning of the object
844 * vmalloc etc.). 849 * @size: size of the object
850 * @min_count: minimum number of references to this object. If during memory
851 * scanning a number of references less than @min_count is found,
852 * the object is reported as a memory leak. If @min_count is 0,
853 * the object is never reported as a leak. If @min_count is -1,
854 * the object is ignored (not scanned and not reported as a leak)
855 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
856 *
857 * This function is called from the kernel allocators when a new object
858 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
845 */ 859 */
846void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 860void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
847 gfp_t gfp) 861 gfp_t gfp)
@@ -855,9 +869,12 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
855} 869}
856EXPORT_SYMBOL_GPL(kmemleak_alloc); 870EXPORT_SYMBOL_GPL(kmemleak_alloc);
857 871
858/* 872/**
859 * Memory freeing function callback. This function is called from the kernel 873 * kmemleak_free - unregister a previously registered object
860 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). 874 * @ptr: pointer to beginning of the object
875 *
876 * This function is called from the kernel allocators when an object (memory
877 * block) is freed (kmem_cache_free, kfree, vfree etc.).
861 */ 878 */
862void __ref kmemleak_free(const void *ptr) 879void __ref kmemleak_free(const void *ptr)
863{ 880{
@@ -870,9 +887,14 @@ void __ref kmemleak_free(const void *ptr)
870} 887}
871EXPORT_SYMBOL_GPL(kmemleak_free); 888EXPORT_SYMBOL_GPL(kmemleak_free);
872 889
873/* 890/**
874 * Partial memory freeing function callback. This function is usually called 891 * kmemleak_free_part - partially unregister a previously registered object
875 * from bootmem allocator when (part of) a memory block is freed. 892 * @ptr: pointer to the beginning or inside the object. This also
893 * represents the start of the range to be freed
894 * @size: size to be unregistered
895 *
896 * This function is called when only a part of a memory block is freed
897 * (usually from the bootmem allocator).
876 */ 898 */
877void __ref kmemleak_free_part(const void *ptr, size_t size) 899void __ref kmemleak_free_part(const void *ptr, size_t size)
878{ 900{
@@ -885,9 +907,12 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
885} 907}
886EXPORT_SYMBOL_GPL(kmemleak_free_part); 908EXPORT_SYMBOL_GPL(kmemleak_free_part);
887 909
888/* 910/**
889 * Mark an already allocated memory block as a false positive. This will cause 911 * kmemleak_not_leak - mark an allocated object as false positive
890 * the block to no longer be reported as leak and always be scanned. 912 * @ptr: pointer to beginning of the object
913 *
914 * Calling this function on an object will cause the memory block to no longer
915 * be reported as leak and always be scanned.
891 */ 916 */
892void __ref kmemleak_not_leak(const void *ptr) 917void __ref kmemleak_not_leak(const void *ptr)
893{ 918{
@@ -900,10 +925,14 @@ void __ref kmemleak_not_leak(const void *ptr)
900} 925}
901EXPORT_SYMBOL(kmemleak_not_leak); 926EXPORT_SYMBOL(kmemleak_not_leak);
902 927
903/* 928/**
904 * Ignore a memory block. This is usually done when it is known that the 929 * kmemleak_ignore - ignore an allocated object
905 * corresponding block is not a leak and does not contain any references to 930 * @ptr: pointer to beginning of the object
906 * other allocated memory blocks. 931 *
932 * Calling this function on an object will cause the memory block to be
933 * ignored (not scanned and not reported as a leak). This is usually done when
934 * it is known that the corresponding block is not a leak and does not contain
935 * any references to other allocated memory blocks.
907 */ 936 */
908void __ref kmemleak_ignore(const void *ptr) 937void __ref kmemleak_ignore(const void *ptr)
909{ 938{
@@ -916,8 +945,16 @@ void __ref kmemleak_ignore(const void *ptr)
916} 945}
917EXPORT_SYMBOL(kmemleak_ignore); 946EXPORT_SYMBOL(kmemleak_ignore);
918 947
919/* 948/**
920 * Limit the range to be scanned in an allocated memory block. 949 * kmemleak_scan_area - limit the range to be scanned in an allocated object
950 * @ptr: pointer to beginning or inside the object. This also
951 * represents the start of the scan area
952 * @size: size of the scan area
953 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
954 *
955 * This function is used when it is known that only certain parts of an object
956 * contain references to other objects. Kmemleak will only scan these areas
957 * reducing the number false negatives.
921 */ 958 */
922void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 959void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
923{ 960{
@@ -930,8 +967,14 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
930} 967}
931EXPORT_SYMBOL(kmemleak_scan_area); 968EXPORT_SYMBOL(kmemleak_scan_area);
932 969
933/* 970/**
934 * Inform kmemleak not to scan the given memory block. 971 * kmemleak_no_scan - do not scan an allocated object
972 * @ptr: pointer to beginning of the object
973 *
974 * This function notifies kmemleak not to scan the given memory block. Useful
975 * in situations where it is known that the given object does not contain any
976 * references to other objects. Kmemleak will not scan such objects reducing
977 * the number of false negatives.
935 */ 978 */
936void __ref kmemleak_no_scan(const void *ptr) 979void __ref kmemleak_no_scan(const void *ptr)
937{ 980{
@@ -1602,7 +1645,9 @@ static int kmemleak_boot_config(char *str)
1602 return -EINVAL; 1645 return -EINVAL;
1603 if (strcmp(str, "off") == 0) 1646 if (strcmp(str, "off") == 0)
1604 kmemleak_disable(); 1647 kmemleak_disable();
1605 else if (strcmp(str, "on") != 0) 1648 else if (strcmp(str, "on") == 0)
1649 kmemleak_skip_disable = 1;
1650 else
1606 return -EINVAL; 1651 return -EINVAL;
1607 return 0; 1652 return 0;
1608} 1653}
@@ -1616,6 +1661,13 @@ void __init kmemleak_init(void)
1616 int i; 1661 int i;
1617 unsigned long flags; 1662 unsigned long flags;
1618 1663
1664#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1665 if (!kmemleak_skip_disable) {
1666 kmemleak_disable();
1667 return;
1668 }
1669#endif
1670
1619 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1671 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1620 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1672 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1621 1673
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index df8202ebc7b8..0c6258bd1ba3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -34,6 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/buffer_head.h> 35#include <linux/buffer_head.h>
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37#include <trace/events/writeback.h>
37 38
38/* 39/*
39 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 40 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
@@ -535,11 +536,13 @@ static void balance_dirty_pages(struct address_space *mapping,
535 * threshold otherwise wait until the disk writes catch 536 * threshold otherwise wait until the disk writes catch
536 * up. 537 * up.
537 */ 538 */
539 trace_wbc_balance_dirty_start(&wbc, bdi);
538 if (bdi_nr_reclaimable > bdi_thresh) { 540 if (bdi_nr_reclaimable > bdi_thresh) {
539 writeback_inodes_wb(&bdi->wb, &wbc); 541 writeback_inodes_wb(&bdi->wb, &wbc);
540 pages_written += write_chunk - wbc.nr_to_write; 542 pages_written += write_chunk - wbc.nr_to_write;
541 get_dirty_limits(&background_thresh, &dirty_thresh, 543 get_dirty_limits(&background_thresh, &dirty_thresh,
542 &bdi_thresh, bdi); 544 &bdi_thresh, bdi);
545 trace_wbc_balance_dirty_written(&wbc, bdi);
543 } 546 }
544 547
545 /* 548 /*
@@ -565,6 +568,7 @@ static void balance_dirty_pages(struct address_space *mapping,
565 if (pages_written >= write_chunk) 568 if (pages_written >= write_chunk)
566 break; /* We've done our duty */ 569 break; /* We've done our duty */
567 570
571 trace_wbc_balance_dirty_wait(&wbc, bdi);
568 __set_current_state(TASK_INTERRUPTIBLE); 572 __set_current_state(TASK_INTERRUPTIBLE);
569 io_schedule_timeout(pause); 573 io_schedule_timeout(pause);
570 574
@@ -962,6 +966,7 @@ continue_unlock:
962 if (!clear_page_dirty_for_io(page)) 966 if (!clear_page_dirty_for_io(page))
963 goto continue_unlock; 967 goto continue_unlock;
964 968
969 trace_wbc_writepage(wbc, mapping->backing_dev_info);
965 ret = (*writepage)(page, wbc, data); 970 ret = (*writepage)(page, wbc, data);
966 if (unlikely(ret)) { 971 if (unlikely(ret)) {
967 if (ret == AOP_WRITEPAGE_ACTIVATE) { 972 if (ret == AOP_WRITEPAGE_ACTIVATE) {
diff --git a/mm/page_io.c b/mm/page_io.c
index 31a3b962230a..2dee975bf469 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
106 goto out; 106 goto out;
107 } 107 }
108 if (wbc->sync_mode == WB_SYNC_ALL) 108 if (wbc->sync_mode == WB_SYNC_ALL)
109 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 109 rw |= REQ_SYNC | REQ_UNPLUG;
110 count_vm_event(PSWPOUT); 110 count_vm_event(PSWPOUT);
111 set_page_writeback(page); 111 set_page_writeback(page);
112 unlock_page(page); 112 unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 566f9a481e64..dfaa0f4e9789 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -766,6 +766,10 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
766 loff_t newsize = attr->ia_size; 766 loff_t newsize = attr->ia_size;
767 int error; 767 int error;
768 768
769 error = inode_change_ok(inode, attr);
770 if (error)
771 return error;
772
769 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) 773 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
770 && newsize != inode->i_size) { 774 && newsize != inode->i_size) {
771 struct page *page = NULL; 775 struct page *page = NULL;
@@ -800,25 +804,22 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
800 } 804 }
801 } 805 }
802 806
803 error = simple_setsize(inode, newsize); 807 /* XXX(truncate): truncate_setsize should be called last */
808 truncate_setsize(inode, newsize);
804 if (page) 809 if (page)
805 page_cache_release(page); 810 page_cache_release(page);
806 if (error)
807 return error;
808 shmem_truncate_range(inode, newsize, (loff_t)-1); 811 shmem_truncate_range(inode, newsize, (loff_t)-1);
809 } 812 }
810 813
811 error = inode_change_ok(inode, attr); 814 setattr_copy(inode, attr);
812 if (!error)
813 generic_setattr(inode, attr);
814#ifdef CONFIG_TMPFS_POSIX_ACL 815#ifdef CONFIG_TMPFS_POSIX_ACL
815 if (!error && (attr->ia_valid & ATTR_MODE)) 816 if (attr->ia_valid & ATTR_MODE)
816 error = generic_acl_chmod(inode); 817 error = generic_acl_chmod(inode);
817#endif 818#endif
818 return error; 819 return error;
819} 820}
820 821
821static void shmem_delete_inode(struct inode *inode) 822static void shmem_evict_inode(struct inode *inode)
822{ 823{
823 struct shmem_inode_info *info = SHMEM_I(inode); 824 struct shmem_inode_info *info = SHMEM_I(inode);
824 825
@@ -835,7 +836,7 @@ static void shmem_delete_inode(struct inode *inode)
835 } 836 }
836 BUG_ON(inode->i_blocks); 837 BUG_ON(inode->i_blocks);
837 shmem_free_inode(inode->i_sb); 838 shmem_free_inode(inode->i_sb);
838 clear_inode(inode); 839 end_writeback(inode);
839} 840}
840 841
841static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 842static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
@@ -932,7 +933,7 @@ found:
932 933
933 /* 934 /*
934 * Move _head_ to start search for next from here. 935 * Move _head_ to start search for next from here.
935 * But be careful: shmem_delete_inode checks list_empty without taking 936 * But be careful: shmem_evict_inode checks list_empty without taking
936 * mutex, and there's an instant in list_move_tail when info->swaplist 937 * mutex, and there's an instant in list_move_tail when info->swaplist
937 * would appear empty, if it were the only one on shmem_swaplist. We 938 * would appear empty, if it were the only one on shmem_swaplist. We
938 * could avoid doing it if inode NULL; or use this minor optimization. 939 * could avoid doing it if inode NULL; or use this minor optimization.
@@ -2518,7 +2519,7 @@ static const struct super_operations shmem_ops = {
2518 .remount_fs = shmem_remount_fs, 2519 .remount_fs = shmem_remount_fs,
2519 .show_options = shmem_show_options, 2520 .show_options = shmem_show_options,
2520#endif 2521#endif
2521 .delete_inode = shmem_delete_inode, 2522 .evict_inode = shmem_evict_inode,
2522 .drop_inode = generic_delete_inode, 2523 .drop_inode = generic_delete_inode,
2523 .put_super = shmem_put_super, 2524 .put_super = shmem_put_super,
2524}; 2525};
diff --git a/mm/truncate.c b/mm/truncate.c
index 937571b8b233..ba887bff48c5 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -541,28 +541,48 @@ void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
541EXPORT_SYMBOL(truncate_pagecache); 541EXPORT_SYMBOL(truncate_pagecache);
542 542
543/** 543/**
544 * truncate_setsize - update inode and pagecache for a new file size
545 * @inode: inode
546 * @newsize: new file size
547 *
548 * truncate_setsize updastes i_size update and performs pagecache
549 * truncation (if necessary) for a file size updates. It will be
550 * typically be called from the filesystem's setattr function when
551 * ATTR_SIZE is passed in.
552 *
553 * Must be called with inode_mutex held and after all filesystem
554 * specific block truncation has been performed.
555 */
556void truncate_setsize(struct inode *inode, loff_t newsize)
557{
558 loff_t oldsize;
559
560 oldsize = inode->i_size;
561 i_size_write(inode, newsize);
562
563 truncate_pagecache(inode, oldsize, newsize);
564}
565EXPORT_SYMBOL(truncate_setsize);
566
567/**
544 * vmtruncate - unmap mappings "freed" by truncate() syscall 568 * vmtruncate - unmap mappings "freed" by truncate() syscall
545 * @inode: inode of the file used 569 * @inode: inode of the file used
546 * @offset: file offset to start truncating 570 * @offset: file offset to start truncating
547 * 571 *
548 * NOTE! We have to be ready to update the memory sharing 572 * This function is deprecated and truncate_setsize or truncate_pagecache
549 * between the file and the memory map for a potential last 573 * should be used instead, together with filesystem specific block truncation.
550 * incomplete page. Ugly, but necessary.
551 *
552 * This function is deprecated and simple_setsize or truncate_pagecache
553 * should be used instead.
554 */ 574 */
555int vmtruncate(struct inode *inode, loff_t offset) 575int vmtruncate(struct inode *inode, loff_t offset)
556{ 576{
557 int error; 577 int error;
558 578
559 error = simple_setsize(inode, offset); 579 error = inode_newsize_ok(inode, offset);
560 if (error) 580 if (error)
561 return error; 581 return error;
562 582
583 truncate_setsize(inode, offset);
563 if (inode->i_op->truncate) 584 if (inode->i_op->truncate)
564 inode->i_op->truncate(inode); 585 inode->i_op->truncate(inode);
565 586 return 0;
566 return error;
567} 587}
568EXPORT_SYMBOL(vmtruncate); 588EXPORT_SYMBOL(vmtruncate);