diff options
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 61 |
1 files changed, 59 insertions, 2 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 55627306abe0..5ad3c106606b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -729,6 +729,7 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
729 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 729 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
730 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 730 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
731 | }; | 731 | }; |
732 | static atomic_t nr_bdi_congested[2]; | ||
732 | 733 | ||
733 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | 734 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
734 | { | 735 | { |
@@ -736,7 +737,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
736 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | 737 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
737 | 738 | ||
738 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 739 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
739 | clear_bit(bit, &bdi->state); | 740 | if (test_and_clear_bit(bit, &bdi->state)) |
741 | atomic_dec(&nr_bdi_congested[sync]); | ||
740 | smp_mb__after_clear_bit(); | 742 | smp_mb__after_clear_bit(); |
741 | if (waitqueue_active(wqh)) | 743 | if (waitqueue_active(wqh)) |
742 | wake_up(wqh); | 744 | wake_up(wqh); |
@@ -748,7 +750,8 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
748 | enum bdi_state bit; | 750 | enum bdi_state bit; |
749 | 751 | ||
750 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 752 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
751 | set_bit(bit, &bdi->state); | 753 | if (!test_and_set_bit(bit, &bdi->state)) |
754 | atomic_inc(&nr_bdi_congested[sync]); | ||
752 | } | 755 | } |
753 | EXPORT_SYMBOL(set_bdi_congested); | 756 | EXPORT_SYMBOL(set_bdi_congested); |
754 | 757 | ||
@@ -779,3 +782,57 @@ long congestion_wait(int sync, long timeout) | |||
779 | } | 782 | } |
780 | EXPORT_SYMBOL(congestion_wait); | 783 | EXPORT_SYMBOL(congestion_wait); |
781 | 784 | ||
785 | /** | ||
786 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes | ||
787 | * @zone: A zone to check if it is heavily congested | ||
788 | * @sync: SYNC or ASYNC IO | ||
789 | * @timeout: timeout in jiffies | ||
790 | * | ||
791 | * In the event of a congested backing_dev (any backing_dev) and the given | ||
792 | * @zone has experienced recent congestion, this waits for up to @timeout | ||
793 | * jiffies for either a BDI to exit congestion of the given @sync queue | ||
794 | * or a write to complete. | ||
795 | * | ||
796 | * In the absense of zone congestion, cond_resched() is called to yield | ||
797 | * the processor if necessary but otherwise does not sleep. | ||
798 | * | ||
799 | * The return value is 0 if the sleep is for the full timeout. Otherwise, | ||
800 | * it is the number of jiffies that were still remaining when the function | ||
801 | * returned. return_value == timeout implies the function did not sleep. | ||
802 | */ | ||
803 | long wait_iff_congested(struct zone *zone, int sync, long timeout) | ||
804 | { | ||
805 | long ret; | ||
806 | unsigned long start = jiffies; | ||
807 | DEFINE_WAIT(wait); | ||
808 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | ||
809 | |||
810 | /* | ||
811 | * If there is no congestion, or heavy congestion is not being | ||
812 | * encountered in the current zone, yield if necessary instead | ||
813 | * of sleeping on the congestion queue | ||
814 | */ | ||
815 | if (atomic_read(&nr_bdi_congested[sync]) == 0 || | ||
816 | !zone_is_reclaim_congested(zone)) { | ||
817 | cond_resched(); | ||
818 | |||
819 | /* In case we scheduled, work out time remaining */ | ||
820 | ret = timeout - (jiffies - start); | ||
821 | if (ret < 0) | ||
822 | ret = 0; | ||
823 | |||
824 | goto out; | ||
825 | } | ||
826 | |||
827 | /* Sleep until uncongested or a write happens */ | ||
828 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | ||
829 | ret = io_schedule_timeout(timeout); | ||
830 | finish_wait(wqh, &wait); | ||
831 | |||
832 | out: | ||
833 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | ||
834 | jiffies_to_usecs(jiffies - start)); | ||
835 | |||
836 | return ret; | ||
837 | } | ||
838 | EXPORT_SYMBOL(wait_iff_congested); | ||