diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /mm/backing-dev.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 94 |
1 files changed, 75 insertions, 19 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 65d420499a61..f032e6e1e09a 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -14,17 +14,11 @@ | |||
14 | 14 | ||
15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | 15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
16 | 16 | ||
17 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
18 | { | ||
19 | } | ||
20 | EXPORT_SYMBOL(default_unplug_io_fn); | ||
21 | |||
22 | struct backing_dev_info default_backing_dev_info = { | 17 | struct backing_dev_info default_backing_dev_info = { |
23 | .name = "default", | 18 | .name = "default", |
24 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, | 19 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, |
25 | .state = 0, | 20 | .state = 0, |
26 | .capabilities = BDI_CAP_MAP_COPY, | 21 | .capabilities = BDI_CAP_MAP_COPY, |
27 | .unplug_io_fn = default_unplug_io_fn, | ||
28 | }; | 22 | }; |
29 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 23 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
30 | 24 | ||
@@ -69,18 +63,18 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
69 | unsigned long background_thresh; | 63 | unsigned long background_thresh; |
70 | unsigned long dirty_thresh; | 64 | unsigned long dirty_thresh; |
71 | unsigned long bdi_thresh; | 65 | unsigned long bdi_thresh; |
72 | unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; | 66 | unsigned long nr_dirty, nr_io, nr_more_io; |
73 | struct inode *inode; | 67 | struct inode *inode; |
74 | 68 | ||
75 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; | 69 | nr_dirty = nr_io = nr_more_io = 0; |
76 | spin_lock(&inode_lock); | 70 | spin_lock(&inode_wb_list_lock); |
77 | list_for_each_entry(inode, &wb->b_dirty, i_list) | 71 | list_for_each_entry(inode, &wb->b_dirty, i_wb_list) |
78 | nr_dirty++; | 72 | nr_dirty++; |
79 | list_for_each_entry(inode, &wb->b_io, i_list) | 73 | list_for_each_entry(inode, &wb->b_io, i_wb_list) |
80 | nr_io++; | 74 | nr_io++; |
81 | list_for_each_entry(inode, &wb->b_more_io, i_list) | 75 | list_for_each_entry(inode, &wb->b_more_io, i_wb_list) |
82 | nr_more_io++; | 76 | nr_more_io++; |
83 | spin_unlock(&inode_lock); | 77 | spin_unlock(&inode_wb_list_lock); |
84 | 78 | ||
85 | global_dirty_limits(&background_thresh, &dirty_thresh); | 79 | global_dirty_limits(&background_thresh, &dirty_thresh); |
86 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 80 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); |
@@ -362,7 +356,7 @@ static int bdi_forker_thread(void *ptr) | |||
362 | { | 356 | { |
363 | struct bdi_writeback *me = ptr; | 357 | struct bdi_writeback *me = ptr; |
364 | 358 | ||
365 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; | 359 | current->flags |= PF_SWAPWRITE; |
366 | set_freezable(); | 360 | set_freezable(); |
367 | 361 | ||
368 | /* | 362 | /* |
@@ -604,7 +598,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) | |||
604 | spin_lock(&sb_lock); | 598 | spin_lock(&sb_lock); |
605 | list_for_each_entry(sb, &super_blocks, s_list) { | 599 | list_for_each_entry(sb, &super_blocks, s_list) { |
606 | if (sb->s_bdi == bdi) | 600 | if (sb->s_bdi == bdi) |
607 | sb->s_bdi = NULL; | 601 | sb->s_bdi = &default_backing_dev_info; |
608 | } | 602 | } |
609 | spin_unlock(&sb_lock); | 603 | spin_unlock(&sb_lock); |
610 | } | 604 | } |
@@ -682,11 +676,11 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
682 | if (bdi_has_dirty_io(bdi)) { | 676 | if (bdi_has_dirty_io(bdi)) { |
683 | struct bdi_writeback *dst = &default_backing_dev_info.wb; | 677 | struct bdi_writeback *dst = &default_backing_dev_info.wb; |
684 | 678 | ||
685 | spin_lock(&inode_lock); | 679 | spin_lock(&inode_wb_list_lock); |
686 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); | 680 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); |
687 | list_splice(&bdi->wb.b_io, &dst->b_io); | 681 | list_splice(&bdi->wb.b_io, &dst->b_io); |
688 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); | 682 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); |
689 | spin_unlock(&inode_lock); | 683 | spin_unlock(&inode_wb_list_lock); |
690 | } | 684 | } |
691 | 685 | ||
692 | bdi_unregister(bdi); | 686 | bdi_unregister(bdi); |
@@ -729,6 +723,7 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
729 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 723 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
730 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 724 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
731 | }; | 725 | }; |
726 | static atomic_t nr_bdi_congested[2]; | ||
732 | 727 | ||
733 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | 728 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
734 | { | 729 | { |
@@ -736,7 +731,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
736 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | 731 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
737 | 732 | ||
738 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 733 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
739 | clear_bit(bit, &bdi->state); | 734 | if (test_and_clear_bit(bit, &bdi->state)) |
735 | atomic_dec(&nr_bdi_congested[sync]); | ||
740 | smp_mb__after_clear_bit(); | 736 | smp_mb__after_clear_bit(); |
741 | if (waitqueue_active(wqh)) | 737 | if (waitqueue_active(wqh)) |
742 | wake_up(wqh); | 738 | wake_up(wqh); |
@@ -748,7 +744,8 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
748 | enum bdi_state bit; | 744 | enum bdi_state bit; |
749 | 745 | ||
750 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 746 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
751 | set_bit(bit, &bdi->state); | 747 | if (!test_and_set_bit(bit, &bdi->state)) |
748 | atomic_inc(&nr_bdi_congested[sync]); | ||
752 | } | 749 | } |
753 | EXPORT_SYMBOL(set_bdi_congested); | 750 | EXPORT_SYMBOL(set_bdi_congested); |
754 | 751 | ||
@@ -764,13 +761,72 @@ EXPORT_SYMBOL(set_bdi_congested); | |||
764 | long congestion_wait(int sync, long timeout) | 761 | long congestion_wait(int sync, long timeout) |
765 | { | 762 | { |
766 | long ret; | 763 | long ret; |
764 | unsigned long start = jiffies; | ||
767 | DEFINE_WAIT(wait); | 765 | DEFINE_WAIT(wait); |
768 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | 766 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
769 | 767 | ||
770 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | 768 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
771 | ret = io_schedule_timeout(timeout); | 769 | ret = io_schedule_timeout(timeout); |
772 | finish_wait(wqh, &wait); | 770 | finish_wait(wqh, &wait); |
771 | |||
772 | trace_writeback_congestion_wait(jiffies_to_usecs(timeout), | ||
773 | jiffies_to_usecs(jiffies - start)); | ||
774 | |||
773 | return ret; | 775 | return ret; |
774 | } | 776 | } |
775 | EXPORT_SYMBOL(congestion_wait); | 777 | EXPORT_SYMBOL(congestion_wait); |
776 | 778 | ||
779 | /** | ||
780 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes | ||
781 | * @zone: A zone to check if it is heavily congested | ||
782 | * @sync: SYNC or ASYNC IO | ||
783 | * @timeout: timeout in jiffies | ||
784 | * | ||
785 | * In the event of a congested backing_dev (any backing_dev) and the given | ||
786 | * @zone has experienced recent congestion, this waits for up to @timeout | ||
787 | * jiffies for either a BDI to exit congestion of the given @sync queue | ||
788 | * or a write to complete. | ||
789 | * | ||
790 | * In the absence of zone congestion, cond_resched() is called to yield | ||
791 | * the processor if necessary but otherwise does not sleep. | ||
792 | * | ||
793 | * The return value is 0 if the sleep is for the full timeout. Otherwise, | ||
794 | * it is the number of jiffies that were still remaining when the function | ||
795 | * returned. return_value == timeout implies the function did not sleep. | ||
796 | */ | ||
797 | long wait_iff_congested(struct zone *zone, int sync, long timeout) | ||
798 | { | ||
799 | long ret; | ||
800 | unsigned long start = jiffies; | ||
801 | DEFINE_WAIT(wait); | ||
802 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | ||
803 | |||
804 | /* | ||
805 | * If there is no congestion, or heavy congestion is not being | ||
806 | * encountered in the current zone, yield if necessary instead | ||
807 | * of sleeping on the congestion queue | ||
808 | */ | ||
809 | if (atomic_read(&nr_bdi_congested[sync]) == 0 || | ||
810 | !zone_is_reclaim_congested(zone)) { | ||
811 | cond_resched(); | ||
812 | |||
813 | /* In case we scheduled, work out time remaining */ | ||
814 | ret = timeout - (jiffies - start); | ||
815 | if (ret < 0) | ||
816 | ret = 0; | ||
817 | |||
818 | goto out; | ||
819 | } | ||
820 | |||
821 | /* Sleep until uncongested or a write happens */ | ||
822 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | ||
823 | ret = io_schedule_timeout(timeout); | ||
824 | finish_wait(wqh, &wait); | ||
825 | |||
826 | out: | ||
827 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | ||
828 | jiffies_to_usecs(jiffies - start)); | ||
829 | |||
830 | return ret; | ||
831 | } | ||
832 | EXPORT_SYMBOL(wait_iff_congested); | ||