aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c127
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h4
-rw-r--r--block/bsg-lib.c13
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c2
-rw-r--r--block/partitions/Kconfig4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/blkdev.h17
-rw-r--r--include/linux/bsg-lib.h1
-rw-r--r--mm/backing-dev.c84
19 files changed, 224 insertions, 99 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3f6d39d23bb6..b8858fb0cafa 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
231 * we shouldn't allow anything to go through for a bypassing queue. 231 * we shouldn't allow anything to go through for a bypassing queue.
232 */ 232 */
233 if (unlikely(blk_queue_bypass(q))) 233 if (unlikely(blk_queue_bypass(q)))
234 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); 234 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
235 return __blkg_lookup_create(blkcg, q, NULL); 235 return __blkg_lookup_create(blkcg, q, NULL);
236} 236}
237EXPORT_SYMBOL_GPL(blkg_lookup_create); 237EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c95c4d6e31a..c973249d68cd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -40,6 +40,7 @@
40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
43EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
43 44
44DEFINE_IDA(blk_queue_ida); 45DEFINE_IDA(blk_queue_ida);
45 46
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
219 * Description: 220 * Description:
220 * Sometimes queueing needs to be postponed for a little while, to allow 221 * Sometimes queueing needs to be postponed for a little while, to allow
221 * resources to come back. This function will make sure that queueing is 222 * resources to come back. This function will make sure that queueing is
222 * restarted around the specified time. 223 * restarted around the specified time. Queue lock must be held.
223 */ 224 */
224void blk_delay_queue(struct request_queue *q, unsigned long msecs) 225void blk_delay_queue(struct request_queue *q, unsigned long msecs)
225{ 226{
226 queue_delayed_work(kblockd_workqueue, &q->delay_work, 227 if (likely(!blk_queue_dead(q)))
227 msecs_to_jiffies(msecs)); 228 queue_delayed_work(kblockd_workqueue, &q->delay_work,
229 msecs_to_jiffies(msecs));
228} 230}
229EXPORT_SYMBOL(blk_delay_queue); 231EXPORT_SYMBOL(blk_delay_queue);
230 232
@@ -293,6 +295,34 @@ void blk_sync_queue(struct request_queue *q)
293EXPORT_SYMBOL(blk_sync_queue); 295EXPORT_SYMBOL(blk_sync_queue);
294 296
295/** 297/**
298 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
299 * @q: The queue to run
300 *
301 * Description:
302 * Invoke request handling on a queue if there are any pending requests.
303 * May be used to restart request handling after a request has completed.
304 * This variant runs the queue whether or not the queue has been
305 * stopped. Must be called with the queue lock held and interrupts
306 * disabled. See also @blk_run_queue.
307 */
308inline void __blk_run_queue_uncond(struct request_queue *q)
309{
310 if (unlikely(blk_queue_dead(q)))
311 return;
312
313 /*
314 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
315 * the queue lock internally. As a result multiple threads may be
316 * running such a request function concurrently. Keep track of the
317 * number of active request_fn invocations such that blk_drain_queue()
318 * can wait until all these request_fn calls have finished.
319 */
320 q->request_fn_active++;
321 q->request_fn(q);
322 q->request_fn_active--;
323}
324
325/**
296 * __blk_run_queue - run a single device queue 326 * __blk_run_queue - run a single device queue
297 * @q: The queue to run 327 * @q: The queue to run
298 * 328 *
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
305 if (unlikely(blk_queue_stopped(q))) 335 if (unlikely(blk_queue_stopped(q)))
306 return; 336 return;
307 337
308 q->request_fn(q); 338 __blk_run_queue_uncond(q);
309} 339}
310EXPORT_SYMBOL(__blk_run_queue); 340EXPORT_SYMBOL(__blk_run_queue);
311 341
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
315 * 345 *
316 * Description: 346 * Description:
317 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 347 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
318 * of us. 348 * of us. The caller must hold the queue lock.
319 */ 349 */
320void blk_run_queue_async(struct request_queue *q) 350void blk_run_queue_async(struct request_queue *q)
321{ 351{
322 if (likely(!blk_queue_stopped(q))) 352 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 353 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324} 354}
325EXPORT_SYMBOL(blk_run_queue_async); 355EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +379,7 @@ void blk_put_queue(struct request_queue *q)
349EXPORT_SYMBOL(blk_put_queue); 379EXPORT_SYMBOL(blk_put_queue);
350 380
351/** 381/**
352 * blk_drain_queue - drain requests from request_queue 382 * __blk_drain_queue - drain requests from request_queue
353 * @q: queue to drain 383 * @q: queue to drain
354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 384 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
355 * 385 *
@@ -357,15 +387,17 @@ EXPORT_SYMBOL(blk_put_queue);
357 * If not, only ELVPRIV requests are drained. The caller is responsible 387 * If not, only ELVPRIV requests are drained. The caller is responsible
358 * for ensuring that no new requests which need to be drained are queued. 388 * for ensuring that no new requests which need to be drained are queued.
359 */ 389 */
360void blk_drain_queue(struct request_queue *q, bool drain_all) 390static void __blk_drain_queue(struct request_queue *q, bool drain_all)
391 __releases(q->queue_lock)
392 __acquires(q->queue_lock)
361{ 393{
362 int i; 394 int i;
363 395
396 lockdep_assert_held(q->queue_lock);
397
364 while (true) { 398 while (true) {
365 bool drain = false; 399 bool drain = false;
366 400
367 spin_lock_irq(q->queue_lock);
368
369 /* 401 /*
370 * The caller might be trying to drain @q before its 402 * The caller might be trying to drain @q before its
371 * elevator is initialized. 403 * elevator is initialized.
@@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
386 __blk_run_queue(q); 418 __blk_run_queue(q);
387 419
388 drain |= q->nr_rqs_elvpriv; 420 drain |= q->nr_rqs_elvpriv;
421 drain |= q->request_fn_active;
389 422
390 /* 423 /*
391 * Unfortunately, requests are queued at and tracked from 424 * Unfortunately, requests are queued at and tracked from
@@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
401 } 434 }
402 } 435 }
403 436
404 spin_unlock_irq(q->queue_lock);
405
406 if (!drain) 437 if (!drain)
407 break; 438 break;
439
440 spin_unlock_irq(q->queue_lock);
441
408 msleep(10); 442 msleep(10);
443
444 spin_lock_irq(q->queue_lock);
409 } 445 }
410 446
411 /* 447 /*
@@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
416 if (q->request_fn) { 452 if (q->request_fn) {
417 struct request_list *rl; 453 struct request_list *rl;
418 454
419 spin_lock_irq(q->queue_lock);
420
421 blk_queue_for_each_rl(rl, q) 455 blk_queue_for_each_rl(rl, q)
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 456 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
423 wake_up_all(&rl->wait[i]); 457 wake_up_all(&rl->wait[i]);
424
425 spin_unlock_irq(q->queue_lock);
426 } 458 }
427} 459}
428 460
@@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
446 spin_unlock_irq(q->queue_lock); 478 spin_unlock_irq(q->queue_lock);
447 479
448 if (drain) { 480 if (drain) {
449 blk_drain_queue(q, false); 481 spin_lock_irq(q->queue_lock);
482 __blk_drain_queue(q, false);
483 spin_unlock_irq(q->queue_lock);
484
450 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 485 /* ensure blk_queue_bypass() is %true inside RCU read lock */
451 synchronize_rcu(); 486 synchronize_rcu();
452 } 487 }
@@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
473 * blk_cleanup_queue - shutdown a request queue 508 * blk_cleanup_queue - shutdown a request queue
474 * @q: request queue to shutdown 509 * @q: request queue to shutdown
475 * 510 *
476 * Mark @q DEAD, drain all pending requests, destroy and put it. All 511 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
477 * future requests will be failed immediately with -ENODEV. 512 * put it. All future requests will be failed immediately with -ENODEV.
478 */ 513 */
479void blk_cleanup_queue(struct request_queue *q) 514void blk_cleanup_queue(struct request_queue *q)
480{ 515{
481 spinlock_t *lock = q->queue_lock; 516 spinlock_t *lock = q->queue_lock;
482 517
483 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 518 /* mark @q DYING, no new request or merges will be allowed afterwards */
484 mutex_lock(&q->sysfs_lock); 519 mutex_lock(&q->sysfs_lock);
485 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 520 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
486 spin_lock_irq(lock); 521 spin_lock_irq(lock);
487 522
488 /* 523 /*
489 * Dead queue is permanently in bypass mode till released. Note 524 * A dying queue is permanently in bypass mode till released. Note
490 * that, unlike blk_queue_bypass_start(), we aren't performing 525 * that, unlike blk_queue_bypass_start(), we aren't performing
491 * synchronize_rcu() after entering bypass mode to avoid the delay 526 * synchronize_rcu() after entering bypass mode to avoid the delay
492 * as some drivers create and destroy a lot of queues while 527 * as some drivers create and destroy a lot of queues while
@@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)
499 534
500 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 535 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 536 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
502 queue_flag_set(QUEUE_FLAG_DEAD, q); 537 queue_flag_set(QUEUE_FLAG_DYING, q);
503 spin_unlock_irq(lock); 538 spin_unlock_irq(lock);
504 mutex_unlock(&q->sysfs_lock); 539 mutex_unlock(&q->sysfs_lock);
505 540
506 /* drain all requests queued before DEAD marking */ 541 /*
507 blk_drain_queue(q, true); 542 * Drain all requests queued before DYING marking. Set DEAD flag to
543 * prevent that q->request_fn() gets invoked after draining finished.
544 */
545 spin_lock_irq(lock);
546 __blk_drain_queue(q, true);
547 queue_flag_set(QUEUE_FLAG_DEAD, q);
548 spin_unlock_irq(lock);
508 549
509 /* @q won't process any more request, flush async actions */ 550 /* @q won't process any more request, flush async actions */
510 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 551 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)
549 590
550struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 591struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
551{ 592{
552 return blk_alloc_queue_node(gfp_mask, -1); 593 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
553} 594}
554EXPORT_SYMBOL(blk_alloc_queue); 595EXPORT_SYMBOL(blk_alloc_queue);
555 596
@@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
660 701
661struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 702struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
662{ 703{
663 return blk_init_queue_node(rfn, lock, -1); 704 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
664} 705}
665EXPORT_SYMBOL(blk_init_queue); 706EXPORT_SYMBOL(blk_init_queue);
666 707
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
716 757
717bool blk_get_queue(struct request_queue *q) 758bool blk_get_queue(struct request_queue *q)
718{ 759{
719 if (likely(!blk_queue_dead(q))) { 760 if (likely(!blk_queue_dying(q))) {
720 __blk_get_queue(q); 761 __blk_get_queue(q);
721 return true; 762 return true;
722 } 763 }
@@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
870 const bool is_sync = rw_is_sync(rw_flags) != 0; 911 const bool is_sync = rw_is_sync(rw_flags) != 0;
871 int may_queue; 912 int may_queue;
872 913
873 if (unlikely(blk_queue_dead(q))) 914 if (unlikely(blk_queue_dying(q)))
874 return NULL; 915 return NULL;
875 916
876 may_queue = elv_may_queue(q, rw_flags); 917 may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1091,7 @@ retry:
1050 if (rq) 1091 if (rq)
1051 return rq; 1092 return rq;
1052 1093
1053 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { 1094 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1054 blk_put_rl(rl); 1095 blk_put_rl(rl);
1055 return NULL; 1096 return NULL;
1056 } 1097 }
@@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1910 return -EIO; 1951 return -EIO;
1911 1952
1912 spin_lock_irqsave(q->queue_lock, flags); 1953 spin_lock_irqsave(q->queue_lock, flags);
1913 if (unlikely(blk_queue_dead(q))) { 1954 if (unlikely(blk_queue_dying(q))) {
1914 spin_unlock_irqrestore(q->queue_lock, flags); 1955 spin_unlock_irqrestore(q->queue_lock, flags);
1915 return -ENODEV; 1956 return -ENODEV;
1916 } 1957 }
@@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2884{ 2925{
2885 trace_block_unplug(q, depth, !from_schedule); 2926 trace_block_unplug(q, depth, !from_schedule);
2886 2927
2887 /* 2928 if (from_schedule)
2888 * Don't mess with dead queue.
2889 */
2890 if (unlikely(blk_queue_dead(q))) {
2891 spin_unlock(q->queue_lock);
2892 return;
2893 }
2894
2895 /*
2896 * If we are punting this to kblockd, then we can safely drop
2897 * the queue_lock before waking kblockd (which needs to take
2898 * this lock).
2899 */
2900 if (from_schedule) {
2901 spin_unlock(q->queue_lock);
2902 blk_run_queue_async(q); 2929 blk_run_queue_async(q);
2903 } else { 2930 else
2904 __blk_run_queue(q); 2931 __blk_run_queue(q);
2905 spin_unlock(q->queue_lock); 2932 spin_unlock(q->queue_lock);
2906 }
2907
2908} 2933}
2909 2934
2910static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 2935static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2996 /* 3021 /*
2997 * Short-circuit if @q is dead 3022 * Short-circuit if @q is dead
2998 */ 3023 */
2999 if (unlikely(blk_queue_dead(q))) { 3024 if (unlikely(blk_queue_dying(q))) {
3000 __blk_end_request_all(rq, -ENODEV); 3025 __blk_end_request_all(rq, -ENODEV);
3001 continue; 3026 continue;
3002 } 3027 }
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f71eac35c1b9..74638ec234c8 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
66 66
67 spin_lock_irq(q->queue_lock); 67 spin_lock_irq(q->queue_lock);
68 68
69 if (unlikely(blk_queue_dead(q))) { 69 if (unlikely(blk_queue_dying(q))) {
70 rq->errors = -ENXIO; 70 rq->errors = -ENXIO;
71 if (rq->end_io) 71 if (rq->end_io)
72 rq->end_io(rq, rq->errors); 72 rq->end_io(rq, rq->errors);
@@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
78 __blk_run_queue(q); 78 __blk_run_queue(q);
79 /* the queue is stopped so it won't be run */ 79 /* the queue is stopped so it won't be run */
80 if (is_pm_resume) 80 if (is_pm_resume)
81 q->request_fn(q); 81 __blk_run_queue_uncond(q);
82 spin_unlock_irq(q->queue_lock); 82 spin_unlock_irq(q->queue_lock);
83} 83}
84EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 84EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9373b58dfab1..b3a1f2b70b31 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
43 DECLARE_COMPLETION_ONSTACK(wait); 43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 sector_t max_discard_sectors;
47 unsigned int granularity, alignment, mask; 47 sector_t granularity, alignment;
48 struct bio_batch bb; 48 struct bio_batch bb;
49 struct bio *bio; 49 struct bio *bio;
50 int ret = 0; 50 int ret = 0;
51 struct blk_plug plug;
51 52
52 if (!q) 53 if (!q)
53 return -ENXIO; 54 return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
57 58
58 /* Zero-sector (unknown) and one-sector granularities are the same. */ 59 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U); 60 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1; 61 alignment = bdev_discard_alignment(bdev) >> 9;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask; 62 alignment = sector_div(alignment, granularity);
62 63
63 /* 64 /*
64 * Ensure that max_discard_sectors is of the proper 65 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split. 66 * granularity, so that requests stay aligned after a split.
66 */ 67 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 68 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity); 69 sector_div(max_discard_sectors, granularity);
70 max_discard_sectors *= granularity;
69 if (unlikely(!max_discard_sectors)) { 71 if (unlikely(!max_discard_sectors)) {
70 /* Avoid infinite loop below. Being cautious never hurts. */ 72 /* Avoid infinite loop below. Being cautious never hurts. */
71 return -EOPNOTSUPP; 73 return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
81 bb.flags = 1 << BIO_UPTODATE; 83 bb.flags = 1 << BIO_UPTODATE;
82 bb.wait = &wait; 84 bb.wait = &wait;
83 85
86 blk_start_plug(&plug);
84 while (nr_sects) { 87 while (nr_sects) {
85 unsigned int req_sects; 88 unsigned int req_sects;
86 sector_t end_sect; 89 sector_t end_sect, tmp;
87 90
88 bio = bio_alloc(gfp_mask, 1); 91 bio = bio_alloc(gfp_mask, 1);
89 if (!bio) { 92 if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 * misaligned, stop the discard at the previous aligned sector. 101 * misaligned, stop the discard at the previous aligned sector.
99 */ 102 */
100 end_sect = sector + req_sects; 103 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) { 104 tmp = end_sect;
102 end_sect = 105 if (req_sects < nr_sects &&
103 round_down(end_sect - alignment, granularity) 106 sector_div(tmp, granularity) != alignment) {
104 + alignment; 107 end_sect = end_sect - alignment;
108 sector_div(end_sect, granularity);
109 end_sect = end_sect * granularity + alignment;
105 req_sects = end_sect - sector; 110 req_sects = end_sect - sector;
106 } 111 }
107 112
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 atomic_inc(&bb.done); 122 atomic_inc(&bb.done);
118 submit_bio(type, bio); 123 submit_bio(type, bio);
119 } 124 }
125 blk_finish_plug(&plug);
120 126
121 /* Wait for bios in-flight */ 127 /* Wait for bios in-flight */
122 if (!atomic_dec_and_test(&bb.done)) 128 if (!atomic_dec_and_test(&bb.done))
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 779bb7646bcd..c50ecf0ea3b1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
611 bottom = b->discard_granularity + alignment; 611 bottom = b->discard_granularity + alignment;
612 612
613 /* Verify that top and bottom intervals line up */ 613 /* Verify that top and bottom intervals line up */
614 if (max(top, bottom) & (min(top, bottom) - 1)) 614 if ((max(top, bottom) % min(top, bottom)) != 0)
615 t->discard_misaligned = 1; 615 t->discard_misaligned = 1;
616 } 616 }
617 617
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
619 b->max_discard_sectors); 619 b->max_discard_sectors);
620 t->discard_granularity = max(t->discard_granularity, 620 t->discard_granularity = max(t->discard_granularity,
621 b->discard_granularity); 621 b->discard_granularity);
622 t->discard_alignment = lcm(t->discard_alignment, alignment) & 622 t->discard_alignment = lcm(t->discard_alignment, alignment) %
623 (t->discard_granularity - 1); 623 t->discard_granularity;
624 } 624 }
625 625
626 return ret; 626 return ret;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce6204608822..788147797a79 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
466 if (!entry->show) 466 if (!entry->show)
467 return -EIO; 467 return -EIO;
468 mutex_lock(&q->sysfs_lock); 468 mutex_lock(&q->sysfs_lock);
469 if (blk_queue_dead(q)) { 469 if (blk_queue_dying(q)) {
470 mutex_unlock(&q->sysfs_lock); 470 mutex_unlock(&q->sysfs_lock);
471 return -ENOENT; 471 return -ENOENT;
472 } 472 }
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
488 488
489 q = container_of(kobj, struct request_queue, kobj); 489 q = container_of(kobj, struct request_queue, kobj);
490 mutex_lock(&q->sysfs_lock); 490 mutex_lock(&q->sysfs_lock);
491 if (blk_queue_dead(q)) { 491 if (blk_queue_dying(q)) {
492 mutex_unlock(&q->sysfs_lock); 492 mutex_unlock(&q->sysfs_lock);
493 return -ENOENT; 493 return -ENOENT;
494 } 494 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a9664fa0b609..31146225f3d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
302 /* if %NULL and @q is alive, fall back to root_tg */ 302 /* if %NULL and @q is alive, fall back to root_tg */
303 if (!IS_ERR(blkg)) 303 if (!IS_ERR(blkg))
304 tg = blkg_to_tg(blkg); 304 tg = blkg_to_tg(blkg);
305 else if (!blk_queue_dead(q)) 305 else if (!blk_queue_dying(q))
306 tg = td_root_tg(td); 306 tg = td_root_tg(td);
307 } 307 }
308 308
diff --git a/block/blk.h b/block/blk.h
index ca51543b248c..47fdfdd41520 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
96 q->flush_queue_delayed = 1; 96 q->flush_queue_delayed = 1;
97 return NULL; 97 return NULL;
98 } 98 }
99 if (unlikely(blk_queue_dead(q)) || 99 if (unlikely(blk_queue_dying(q)) ||
100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
101 return NULL; 101 return NULL;
102 } 102 }
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
145 145
146void blk_queue_congestion_threshold(struct request_queue *q); 146void blk_queue_congestion_threshold(struct request_queue *q);
147 147
148void __blk_run_queue_uncond(struct request_queue *q);
149
148int blk_dev_init(void); 150int blk_dev_init(void);
149 151
150 152
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index deee61fbb741..650f427d915b 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -151,19 +151,6 @@ failjob_rls_job:
151 return -ENOMEM; 151 return -ENOMEM;
152} 152}
153 153
154/*
155 * bsg_goose_queue - restart queue in case it was stopped
156 * @q: request q to be restarted
157 */
158void bsg_goose_queue(struct request_queue *q)
159{
160 if (!q)
161 return;
162
163 blk_run_queue_async(q);
164}
165EXPORT_SYMBOL_GPL(bsg_goose_queue);
166
167/** 154/**
168 * bsg_request_fn - generic handler for bsg requests 155 * bsg_request_fn - generic handler for bsg requests
169 * @q: request queue to manage 156 * @q: request queue to manage
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fb52df9744f5..e62e9205b80a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1973,7 +1973,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1973 * reposition in fifo if next is older than rq 1973 * reposition in fifo if next is older than rq
1974 */ 1974 */
1975 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 1975 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1976 time_before(rq_fifo_time(next), rq_fifo_time(rq))) { 1976 time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
1977 cfqq == RQ_CFQQ(next)) {
1977 list_move(&rq->queuelist, &next->queuelist); 1978 list_move(&rq->queuelist, &next->queuelist);
1978 rq_set_fifo_time(rq, rq_fifo_time(next)); 1979 rq_set_fifo_time(rq, rq_fifo_time(next));
1979 } 1980 }
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 599b12e5380f..90037b5eb17f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
230 /* 230 /*
231 * rq is expired! 231 * rq is expired!
232 */ 232 */
233 if (time_after(jiffies, rq_fifo_time(rq))) 233 if (time_after_eq(jiffies, rq_fifo_time(rq)))
234 return 1; 234 return 1;
235 235
236 return 0; 236 return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 9b1d42b62f20..9edba1b8323e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
458 struct request *rq) 458 struct request *rq)
459{ 459{
460 struct request *__rq; 460 struct request *__rq;
461 bool ret;
461 462
462 if (blk_queue_nomerges(q)) 463 if (blk_queue_nomerges(q))
463 return false; 464 return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
471 if (blk_queue_noxmerges(q)) 472 if (blk_queue_noxmerges(q))
472 return false; 473 return false;
473 474
475 ret = false;
474 /* 476 /*
475 * See if our hash lookup can find a potential backmerge. 477 * See if our hash lookup can find a potential backmerge.
476 */ 478 */
477 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); 479 while (1) {
478 if (__rq && blk_attempt_req_merge(q, __rq, rq)) 480 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
479 return true; 481 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
482 break;
480 483
481 return false; 484 /* The merged request could be merged with others, try again */
485 ret = true;
486 rq = __rq;
487 }
488
489 return ret;
482} 490}
483 491
484void elv_merged_request(struct request_queue *q, struct request *rq, int type) 492void elv_merged_request(struct request_queue *q, struct request *rq, int type)
diff --git a/block/genhd.c b/block/genhd.c
index 6cace663a80e..2a6fdf539a69 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1245,7 +1245,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
1245 1245
1246struct gendisk *alloc_disk(int minors) 1246struct gendisk *alloc_disk(int minors)
1247{ 1247{
1248 return alloc_disk_node(minors, -1); 1248 return alloc_disk_node(minors, NUMA_NO_NODE);
1249} 1249}
1250EXPORT_SYMBOL(alloc_disk); 1250EXPORT_SYMBOL(alloc_disk);
1251 1251
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b03..75a54e1adbb5 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -234,8 +234,8 @@ config KARMA_PARTITION
234 uses a proprietary partition table. 234 uses a proprietary partition table.
235 235
236config EFI_PARTITION 236config EFI_PARTITION
237 bool "EFI GUID Partition support" 237 bool "EFI GUID Partition support" if PARTITION_ADVANCED
238 depends on PARTITION_ADVANCED 238 default y
239 select CRC32 239 select CRC32
240 help 240 help
241 Say Y here if you would like to use hard disks under Linux which 241 Say Y here if you would like to use hard disks under Linux which
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9032e910bca3..f1bf5aff68ed 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1418,7 +1418,7 @@ static int scsi_lld_busy(struct request_queue *q)
1418 struct scsi_device *sdev = q->queuedata; 1418 struct scsi_device *sdev = q->queuedata;
1419 struct Scsi_Host *shost; 1419 struct Scsi_Host *shost;
1420 1420
1421 if (blk_queue_dead(q)) 1421 if (blk_queue_dying(q))
1422 return 0; 1422 return 0;
1423 1423
1424 shost = sdev->host; 1424 shost = sdev->host;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 2a9a9abc9126..238521a19849 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,6 +18,7 @@
18#include <linux/writeback.h> 18#include <linux/writeback.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/sysctl.h> 20#include <linux/sysctl.h>
21#include <linux/mutex.h>
21 22
22struct page; 23struct page;
23struct device; 24struct device;
@@ -105,6 +106,9 @@ struct backing_dev_info {
105 106
106 struct timer_list laptop_mode_wb_timer; 107 struct timer_list laptop_mode_wb_timer;
107 108
109 cpumask_t *flusher_cpumask; /* used for writeback thread scheduling */
110 struct mutex flusher_cpumask_lock;
111
108#ifdef CONFIG_DEBUG_FS 112#ifdef CONFIG_DEBUG_FS
109 struct dentry *debug_dir; 113 struct dentry *debug_dir;
110 struct dentry *debug_stats; 114 struct dentry *debug_stats;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1756001210d2..acb4f7bbbd32 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -378,6 +378,12 @@ struct request_queue {
378 378
379 unsigned int nr_sorted; 379 unsigned int nr_sorted;
380 unsigned int in_flight[2]; 380 unsigned int in_flight[2];
381 /*
382 * Number of active block driver functions for which blk_drain_queue()
383 * must wait. Must be incremented around functions that unlock the
384 * queue_lock internally, e.g. scsi_request_fn().
385 */
386 unsigned int request_fn_active;
381 387
382 unsigned int rq_timeout; 388 unsigned int rq_timeout;
383 struct timer_list timeout; 389 struct timer_list timeout;
@@ -437,7 +443,7 @@ struct request_queue {
437#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 443#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
438#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 444#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
439#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 445#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
440#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 446#define QUEUE_FLAG_DYING 5 /* queue being torn down */
441#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 447#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
442#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 448#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
443#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 449#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -452,6 +458,7 @@ struct request_queue {
452#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 458#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
453#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 459#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
454#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 460#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
461#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
455 462
456#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 463#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
457 (1 << QUEUE_FLAG_STACKABLE) | \ 464 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
521 528
522#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 529#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
523#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 530#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
531#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
524#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 532#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
525#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 533#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
526#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 534#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -1180,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q)
1180 1188
1181static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1189static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1182{ 1190{
1183 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1191 sector_t alignment = sector << 9;
1192 alignment = sector_div(alignment, lim->discard_granularity);
1184 1193
1185 if (!lim->max_discard_sectors) 1194 if (!lim->max_discard_sectors)
1186 return 0; 1195 return 0;
1187 1196
1188 return (lim->discard_granularity + lim->discard_alignment - alignment) 1197 alignment = lim->discard_granularity + lim->discard_alignment - alignment;
1189 & (lim->discard_granularity - 1); 1198 return sector_div(alignment, lim->discard_granularity);
1190} 1199}
1191 1200
1192static inline int bdev_discard_alignment(struct block_device *bdev) 1201static inline int bdev_discard_alignment(struct block_device *bdev)
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 4d0fb3df2f4a..a226652a5a6c 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -67,6 +67,5 @@ void bsg_job_done(struct bsg_job *job, int result,
67int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, 67int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
68 bsg_job_fn *job_fn, int dd_job_size); 68 bsg_job_fn *job_fn, int dd_job_size);
69void bsg_request_fn(struct request_queue *q); 69void bsg_request_fn(struct request_queue *q);
70void bsg_goose_queue(struct request_queue *q);
71 70
72#endif 71#endif
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d3ca2b3ee176..bd6a6cabef71 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/slab.h>
13#include <trace/events/writeback.h> 14#include <trace/events/writeback.h>
14 15
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
@@ -221,12 +222,63 @@ static ssize_t max_ratio_store(struct device *dev,
221} 222}
222BDI_SHOW(max_ratio, bdi->max_ratio) 223BDI_SHOW(max_ratio, bdi->max_ratio)
223 224
225static ssize_t cpu_list_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t count)
227{
228 struct backing_dev_info *bdi = dev_get_drvdata(dev);
229 struct bdi_writeback *wb = &bdi->wb;
230 cpumask_var_t newmask;
231 ssize_t ret;
232 struct task_struct *task;
233
234 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
235 return -ENOMEM;
236
237 ret = cpulist_parse(buf, newmask);
238 if (!ret) {
239 spin_lock_bh(&bdi->wb_lock);
240 task = wb->task;
241 if (task)
242 get_task_struct(task);
243 spin_unlock_bh(&bdi->wb_lock);
244
245 mutex_lock(&bdi->flusher_cpumask_lock);
246 if (task) {
247 ret = set_cpus_allowed_ptr(task, newmask);
248 put_task_struct(task);
249 }
250 if (ret == 0) {
251 cpumask_copy(bdi->flusher_cpumask, newmask);
252 ret = count;
253 }
254 mutex_unlock(&bdi->flusher_cpumask_lock);
255
256 }
257 free_cpumask_var(newmask);
258
259 return ret;
260}
261
262static ssize_t cpu_list_show(struct device *dev,
263 struct device_attribute *attr, char *page)
264{
265 struct backing_dev_info *bdi = dev_get_drvdata(dev);
266 ssize_t ret;
267
268 mutex_lock(&bdi->flusher_cpumask_lock);
269 ret = cpulist_scnprintf(page, PAGE_SIZE-1, bdi->flusher_cpumask);
270 mutex_unlock(&bdi->flusher_cpumask_lock);
271
272 return ret;
273}
274
224#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) 275#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
225 276
226static struct device_attribute bdi_dev_attrs[] = { 277static struct device_attribute bdi_dev_attrs[] = {
227 __ATTR_RW(read_ahead_kb), 278 __ATTR_RW(read_ahead_kb),
228 __ATTR_RW(min_ratio), 279 __ATTR_RW(min_ratio),
229 __ATTR_RW(max_ratio), 280 __ATTR_RW(max_ratio),
281 __ATTR_RW(cpu_list),
230 __ATTR_NULL, 282 __ATTR_NULL,
231}; 283};
232 284
@@ -428,6 +480,7 @@ static int bdi_forker_thread(void *ptr)
428 writeback_inodes_wb(&bdi->wb, 1024, 480 writeback_inodes_wb(&bdi->wb, 1024,
429 WB_REASON_FORKER_THREAD); 481 WB_REASON_FORKER_THREAD);
430 } else { 482 } else {
483 int ret;
431 /* 484 /*
432 * The spinlock makes sure we do not lose 485 * The spinlock makes sure we do not lose
433 * wake-ups when racing with 'bdi_queue_work()'. 486 * wake-ups when racing with 'bdi_queue_work()'.
@@ -437,6 +490,14 @@ static int bdi_forker_thread(void *ptr)
437 spin_lock_bh(&bdi->wb_lock); 490 spin_lock_bh(&bdi->wb_lock);
438 bdi->wb.task = task; 491 bdi->wb.task = task;
439 spin_unlock_bh(&bdi->wb_lock); 492 spin_unlock_bh(&bdi->wb_lock);
493 mutex_lock(&bdi->flusher_cpumask_lock);
494 ret = set_cpus_allowed_ptr(task,
495 bdi->flusher_cpumask);
496 mutex_unlock(&bdi->flusher_cpumask_lock);
497 if (ret)
498 printk_once("%s: failed to bind flusher"
499 " thread %s, error %d\n",
500 __func__, task->comm, ret);
440 wake_up_process(task); 501 wake_up_process(task);
441 } 502 }
442 bdi_clear_pending(bdi); 503 bdi_clear_pending(bdi);
@@ -509,6 +570,17 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
509 dev_name(dev)); 570 dev_name(dev));
510 if (IS_ERR(wb->task)) 571 if (IS_ERR(wb->task))
511 return PTR_ERR(wb->task); 572 return PTR_ERR(wb->task);
573 } else {
574 int node;
575 /*
576 * Set up a default cpumask for the flusher threads that
577 * includes all cpus on the same numa node as the device.
578 * The mask may be overridden via sysfs.
579 */
580 node = dev_to_node(bdi->dev);
581 if (node != NUMA_NO_NODE)
582 cpumask_copy(bdi->flusher_cpumask,
583 cpumask_of_node(node));
512 } 584 }
513 585
514 bdi_debug_register(bdi, dev_name(dev)); 586 bdi_debug_register(bdi, dev_name(dev));
@@ -634,6 +706,15 @@ int bdi_init(struct backing_dev_info *bdi)
634 706
635 bdi_wb_init(&bdi->wb, bdi); 707 bdi_wb_init(&bdi->wb, bdi);
636 708
709 if (!bdi_cap_flush_forker(bdi)) {
710 bdi->flusher_cpumask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
711 if (!bdi->flusher_cpumask)
712 return -ENOMEM;
713 cpumask_setall(bdi->flusher_cpumask);
714 mutex_init(&bdi->flusher_cpumask_lock);
715 } else
716 bdi->flusher_cpumask = NULL;
717
637 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 718 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
638 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 719 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
639 if (err) 720 if (err)
@@ -656,6 +737,7 @@ int bdi_init(struct backing_dev_info *bdi)
656err: 737err:
657 while (i--) 738 while (i--)
658 percpu_counter_destroy(&bdi->bdi_stat[i]); 739 percpu_counter_destroy(&bdi->bdi_stat[i]);
740 kfree(bdi->flusher_cpumask);
659 } 741 }
660 742
661 return err; 743 return err;
@@ -683,6 +765,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
683 765
684 bdi_unregister(bdi); 766 bdi_unregister(bdi);
685 767
768 kfree(bdi->flusher_cpumask);
769
686 /* 770 /*
687 * If bdi_unregister() had already been called earlier, the 771 * If bdi_unregister() had already been called earlier, the
688 * wakeup_timer could still be armed because bdi_prune_sb() 772 * wakeup_timer could still be armed because bdi_prune_sb()