aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-11-03 21:16:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-11-03 21:16:21 -0500
commit51bb296b09a83ee1aae025778db38f9d2cc7bb1a (patch)
tree739f445b953aa77e82a429fe3a939d0b4cb3d222
parentdc79d2f21a2dc19df26f0cb0b46be2d6241b627b (diff)
parent4b27e1bb442e964903f8a3fa6bdf33a602dc0941 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: cfq-iosched: limit coop preemption cfq-iosched: fix bad return value cfq_should_preempt() backing-dev: bdi sb prune should be in the unregister path, not destroy Fix bio_alloc() and bio_kmalloc() documentation bio_put(): add bio_clone() to the list of functions in the comment
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--fs/bio.c28
-rw-r--r--mm/backing-dev.c3
3 files changed, 32 insertions, 18 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 069a61017c02..aa1e9535e358 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -196,6 +196,7 @@ enum cfqq_state_flags {
196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ 198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
199 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
199}; 200};
200 201
201#define CFQ_CFQQ_FNS(name) \ 202#define CFQ_CFQQ_FNS(name) \
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
222CFQ_CFQQ_FNS(slice_new); 223CFQ_CFQQ_FNS(slice_new);
223CFQ_CFQQ_FNS(sync); 224CFQ_CFQQ_FNS(sync);
224CFQ_CFQQ_FNS(coop); 225CFQ_CFQQ_FNS(coop);
226CFQ_CFQQ_FNS(coop_preempt);
225#undef CFQ_CFQQ_FNS 227#undef CFQ_CFQQ_FNS
226 228
227#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 229#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -945,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
945{ 947{
946 if (!cfqq) { 948 if (!cfqq) {
947 cfqq = cfq_get_next_queue(cfqd); 949 cfqq = cfq_get_next_queue(cfqd);
948 if (cfqq) 950 if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
949 cfq_clear_cfqq_coop(cfqq); 951 cfq_clear_cfqq_coop(cfqq);
950 } 952 }
951 953
954 if (cfqq)
955 cfq_clear_cfqq_coop_preempt(cfqq);
956
952 __cfq_set_active_queue(cfqd, cfqq); 957 __cfq_set_active_queue(cfqd, cfqq);
953 return cfqq; 958 return cfqq;
954} 959}
@@ -2051,7 +2056,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2051 * it's a metadata request and the current queue is doing regular IO. 2056 * it's a metadata request and the current queue is doing regular IO.
2052 */ 2057 */
2053 if (rq_is_meta(rq) && !cfqq->meta_pending) 2058 if (rq_is_meta(rq) && !cfqq->meta_pending)
2054 return false; 2059 return true;
2055 2060
2056 /* 2061 /*
2057 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2062 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
@@ -2066,8 +2071,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2066 * if this request is as-good as one we would expect from the 2071 * if this request is as-good as one we would expect from the
2067 * current cfqq, let it preempt 2072 * current cfqq, let it preempt
2068 */ 2073 */
2069 if (cfq_rq_close(cfqd, rq)) 2074 if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
2075 cfqd->busy_queues == 1)) {
2076 /*
2077 * Mark new queue coop_preempt, so its coop flag will not be
2078 * cleared when new queue gets scheduled at the very first time
2079 */
2080 cfq_mark_cfqq_coop_preempt(new_cfqq);
2081 cfq_mark_cfqq_coop(new_cfqq);
2070 return true; 2082 return true;
2083 }
2071 2084
2072 return false; 2085 return false;
2073} 2086}
diff --git a/fs/bio.c b/fs/bio.c
index 402cb84a92a1..12da5db8682c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
325 * @gfp_mask: allocation mask to use 325 * @gfp_mask: allocation mask to use
326 * @nr_iovecs: number of iovecs 326 * @nr_iovecs: number of iovecs
327 * 327 *
328 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask 328 * bio_alloc will allocate a bio and associated bio_vec array that can hold
329 * contains __GFP_WAIT, the allocation is guaranteed to succeed. 329 * at least @nr_iovecs entries. Allocations will be done from the
330 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
331 *
332 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
333 * a bio. This is due to the mempool guarantees. To make this work, callers
334 * must never allocate more than 1 bio at a time from this pool. Callers
335 * that need to allocate more than 1 bio must always submit the previously
336 * allocated bio for IO before attempting to allocate a new one. Failure to
337 * do so can cause livelocks under memory pressure.
330 * 338 *
331 * RETURNS: 339 * RETURNS:
332 * Pointer to new bio on success, NULL on failure. 340 * Pointer to new bio on success, NULL on failure.
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
350} 358}
351 359
352/** 360/**
353 * bio_alloc - allocate a bio for I/O 361 * bio_kmalloc - allocate a bio for I/O using kmalloc()
354 * @gfp_mask: the GFP_ mask given to the slab allocator 362 * @gfp_mask: the GFP_ mask given to the slab allocator
355 * @nr_iovecs: number of iovecs to pre-allocate 363 * @nr_iovecs: number of iovecs to pre-allocate
356 * 364 *
357 * Description: 365 * Description:
358 * bio_alloc will allocate a bio and associated bio_vec array that can hold 366 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
359 * at least @nr_iovecs entries. Allocations will be done from the 367 * %__GFP_WAIT, the allocation is guaranteed to succeed.
360 * fs_bio_set. Also see @bio_alloc_bioset.
361 *
362 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
363 * a bio. This is due to the mempool guarantees. To make this work, callers
364 * must never allocate more than 1 bio at a time from this pool. Callers
365 * that need to allocate more than 1 bio must always submit the previously
366 * allocated bio for IO before attempting to allocate a new one. Failure to
367 * do so can cause livelocks under memory pressure.
368 * 368 *
369 **/ 369 **/
370struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) 370struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
407 * 407 *
408 * Description: 408 * Description:
409 * Put a reference to a &struct bio, either one you have gotten with 409 * Put a reference to a &struct bio, either one you have gotten with
410 * bio_alloc or bio_get. The last put of a bio will free it. 410 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
411 **/ 411 **/
412void bio_put(struct bio *bio) 412void bio_put(struct bio *bio)
413{ 413{
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1065b715ef64..11aee09dd2a6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -628,6 +628,8 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
628void bdi_unregister(struct backing_dev_info *bdi) 628void bdi_unregister(struct backing_dev_info *bdi)
629{ 629{
630 if (bdi->dev) { 630 if (bdi->dev) {
631 bdi_prune_sb(bdi);
632
631 if (!bdi_cap_flush_forker(bdi)) 633 if (!bdi_cap_flush_forker(bdi))
632 bdi_wb_shutdown(bdi); 634 bdi_wb_shutdown(bdi);
633 bdi_debug_unregister(bdi); 635 bdi_debug_unregister(bdi);
@@ -697,7 +699,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
697 spin_unlock(&inode_lock); 699 spin_unlock(&inode_lock);
698 } 700 }
699 701
700 bdi_prune_sb(bdi);
701 bdi_unregister(bdi); 702 bdi_unregister(bdi);
702 703
703 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 704 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)