aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 11:27:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 11:27:23 -0500
commit60da5bf47dd3d301a1d3bd4f0a4b9e29a184515c (patch)
tree30de83370440aae5350d9ab3fbe6583abd439ee8 /block
parent3c2e81ef344a90bb0a39d84af6878b4aeff568a2 (diff)
parentcbae8d45d61f3a8c155caf267d01e5e0f0b2f4b7 (diff)
Merge branch 'for-3.8/core' of git://git.kernel.dk/linux-block
Pull block layer core updates from Jens Axboe: "Here are the core block IO bits for 3.8. The branch contains: - The final version of the surprise device removal fixups from Bart. - Don't hide EFI partitions under advanced partition types. It's fairly wide spread these days. This is especially dangerous for systems that have both msdos and efi partition tables, where you want to keep them in sync. - Cleanup of using -1 instead of the proper NUMA_NO_NODE - Export control of bdi flusher thread CPU mask and default to using the home node (if known) from Jeff. - Export unplug tracepoint for MD. - Core improvements from Shaohua. Reinstate the recursive merge, as the original bug has been fixed. Add plugging for discard and also fix a problem handling non pow-of-2 discard limits. There's a trivial merge in block/blk-exec.c due to a fix that went into 3.7-rc at a later point than -rc4 where this is based." * 'for-3.8/core' of git://git.kernel.dk/linux-block: block: export block_unplug tracepoint block: add plug for blkdev_issue_discard block: discard granularity might not be power of 2 deadline: Allow 0ms deadline latency, increase the read speed partitions: enable EFI/GPT support by default bsg: Remove unused function bsg_goose_queue() block: Make blk_cleanup_queue() wait until request_fn finished block: Avoid scheduling delayed work on a dead queue block: Avoid that request_fn is invoked on a dead queue block: Let blk_drain_queue() caller obtain the queue lock block: Rename queue dead flag bdi: add a user-tunable cpu_list for the bdi flusher threads block: use NUMA_NO_NODE instead of -1 block: recursive merge requests block CFQ: avoid moving request to different queue
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c127
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h4
-rw-r--r--block/bsg-lib.c13
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c2
-rw-r--r--block/partitions/Kconfig4
14 files changed, 122 insertions, 93 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3f6d39d23bb6..b8858fb0cafa 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
231 * we shouldn't allow anything to go through for a bypassing queue. 231 * we shouldn't allow anything to go through for a bypassing queue.
232 */ 232 */
233 if (unlikely(blk_queue_bypass(q))) 233 if (unlikely(blk_queue_bypass(q)))
234 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); 234 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
235 return __blkg_lookup_create(blkcg, q, NULL); 235 return __blkg_lookup_create(blkcg, q, NULL);
236} 236}
237EXPORT_SYMBOL_GPL(blkg_lookup_create); 237EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c95c4d6e31a..c973249d68cd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -40,6 +40,7 @@
40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
43EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
43 44
44DEFINE_IDA(blk_queue_ida); 45DEFINE_IDA(blk_queue_ida);
45 46
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
219 * Description: 220 * Description:
220 * Sometimes queueing needs to be postponed for a little while, to allow 221 * Sometimes queueing needs to be postponed for a little while, to allow
221 * resources to come back. This function will make sure that queueing is 222 * resources to come back. This function will make sure that queueing is
222 * restarted around the specified time. 223 * restarted around the specified time. Queue lock must be held.
223 */ 224 */
224void blk_delay_queue(struct request_queue *q, unsigned long msecs) 225void blk_delay_queue(struct request_queue *q, unsigned long msecs)
225{ 226{
226 queue_delayed_work(kblockd_workqueue, &q->delay_work, 227 if (likely(!blk_queue_dead(q)))
227 msecs_to_jiffies(msecs)); 228 queue_delayed_work(kblockd_workqueue, &q->delay_work,
229 msecs_to_jiffies(msecs));
228} 230}
229EXPORT_SYMBOL(blk_delay_queue); 231EXPORT_SYMBOL(blk_delay_queue);
230 232
@@ -293,6 +295,34 @@ void blk_sync_queue(struct request_queue *q)
293EXPORT_SYMBOL(blk_sync_queue); 295EXPORT_SYMBOL(blk_sync_queue);
294 296
295/** 297/**
298 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
299 * @q: The queue to run
300 *
301 * Description:
302 * Invoke request handling on a queue if there are any pending requests.
303 * May be used to restart request handling after a request has completed.
304 * This variant runs the queue whether or not the queue has been
305 * stopped. Must be called with the queue lock held and interrupts
306 * disabled. See also @blk_run_queue.
307 */
308inline void __blk_run_queue_uncond(struct request_queue *q)
309{
310 if (unlikely(blk_queue_dead(q)))
311 return;
312
313 /*
314 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
315 * the queue lock internally. As a result multiple threads may be
316 * running such a request function concurrently. Keep track of the
317 * number of active request_fn invocations such that blk_drain_queue()
318 * can wait until all these request_fn calls have finished.
319 */
320 q->request_fn_active++;
321 q->request_fn(q);
322 q->request_fn_active--;
323}
324
325/**
296 * __blk_run_queue - run a single device queue 326 * __blk_run_queue - run a single device queue
297 * @q: The queue to run 327 * @q: The queue to run
298 * 328 *
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
305 if (unlikely(blk_queue_stopped(q))) 335 if (unlikely(blk_queue_stopped(q)))
306 return; 336 return;
307 337
308 q->request_fn(q); 338 __blk_run_queue_uncond(q);
309} 339}
310EXPORT_SYMBOL(__blk_run_queue); 340EXPORT_SYMBOL(__blk_run_queue);
311 341
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
315 * 345 *
316 * Description: 346 * Description:
317 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 347 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
318 * of us. 348 * of us. The caller must hold the queue lock.
319 */ 349 */
320void blk_run_queue_async(struct request_queue *q) 350void blk_run_queue_async(struct request_queue *q)
321{ 351{
322 if (likely(!blk_queue_stopped(q))) 352 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 353 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324} 354}
325EXPORT_SYMBOL(blk_run_queue_async); 355EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +379,7 @@ void blk_put_queue(struct request_queue *q)
349EXPORT_SYMBOL(blk_put_queue); 379EXPORT_SYMBOL(blk_put_queue);
350 380
351/** 381/**
352 * blk_drain_queue - drain requests from request_queue 382 * __blk_drain_queue - drain requests from request_queue
353 * @q: queue to drain 383 * @q: queue to drain
354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV 384 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
355 * 385 *
@@ -357,15 +387,17 @@ EXPORT_SYMBOL(blk_put_queue);
357 * If not, only ELVPRIV requests are drained. The caller is responsible 387 * If not, only ELVPRIV requests are drained. The caller is responsible
358 * for ensuring that no new requests which need to be drained are queued. 388 * for ensuring that no new requests which need to be drained are queued.
359 */ 389 */
360void blk_drain_queue(struct request_queue *q, bool drain_all) 390static void __blk_drain_queue(struct request_queue *q, bool drain_all)
391 __releases(q->queue_lock)
392 __acquires(q->queue_lock)
361{ 393{
362 int i; 394 int i;
363 395
396 lockdep_assert_held(q->queue_lock);
397
364 while (true) { 398 while (true) {
365 bool drain = false; 399 bool drain = false;
366 400
367 spin_lock_irq(q->queue_lock);
368
369 /* 401 /*
370 * The caller might be trying to drain @q before its 402 * The caller might be trying to drain @q before its
371 * elevator is initialized. 403 * elevator is initialized.
@@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
386 __blk_run_queue(q); 418 __blk_run_queue(q);
387 419
388 drain |= q->nr_rqs_elvpriv; 420 drain |= q->nr_rqs_elvpriv;
421 drain |= q->request_fn_active;
389 422
390 /* 423 /*
391 * Unfortunately, requests are queued at and tracked from 424 * Unfortunately, requests are queued at and tracked from
@@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
401 } 434 }
402 } 435 }
403 436
404 spin_unlock_irq(q->queue_lock);
405
406 if (!drain) 437 if (!drain)
407 break; 438 break;
439
440 spin_unlock_irq(q->queue_lock);
441
408 msleep(10); 442 msleep(10);
443
444 spin_lock_irq(q->queue_lock);
409 } 445 }
410 446
411 /* 447 /*
@@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
416 if (q->request_fn) { 452 if (q->request_fn) {
417 struct request_list *rl; 453 struct request_list *rl;
418 454
419 spin_lock_irq(q->queue_lock);
420
421 blk_queue_for_each_rl(rl, q) 455 blk_queue_for_each_rl(rl, q)
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) 456 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
423 wake_up_all(&rl->wait[i]); 457 wake_up_all(&rl->wait[i]);
424
425 spin_unlock_irq(q->queue_lock);
426 } 458 }
427} 459}
428 460
@@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
446 spin_unlock_irq(q->queue_lock); 478 spin_unlock_irq(q->queue_lock);
447 479
448 if (drain) { 480 if (drain) {
449 blk_drain_queue(q, false); 481 spin_lock_irq(q->queue_lock);
482 __blk_drain_queue(q, false);
483 spin_unlock_irq(q->queue_lock);
484
450 /* ensure blk_queue_bypass() is %true inside RCU read lock */ 485 /* ensure blk_queue_bypass() is %true inside RCU read lock */
451 synchronize_rcu(); 486 synchronize_rcu();
452 } 487 }
@@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
473 * blk_cleanup_queue - shutdown a request queue 508 * blk_cleanup_queue - shutdown a request queue
474 * @q: request queue to shutdown 509 * @q: request queue to shutdown
475 * 510 *
476 * Mark @q DEAD, drain all pending requests, destroy and put it. All 511 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
477 * future requests will be failed immediately with -ENODEV. 512 * put it. All future requests will be failed immediately with -ENODEV.
478 */ 513 */
479void blk_cleanup_queue(struct request_queue *q) 514void blk_cleanup_queue(struct request_queue *q)
480{ 515{
481 spinlock_t *lock = q->queue_lock; 516 spinlock_t *lock = q->queue_lock;
482 517
483 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 518 /* mark @q DYING, no new request or merges will be allowed afterwards */
484 mutex_lock(&q->sysfs_lock); 519 mutex_lock(&q->sysfs_lock);
485 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 520 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
486 spin_lock_irq(lock); 521 spin_lock_irq(lock);
487 522
488 /* 523 /*
489 * Dead queue is permanently in bypass mode till released. Note 524 * A dying queue is permanently in bypass mode till released. Note
490 * that, unlike blk_queue_bypass_start(), we aren't performing 525 * that, unlike blk_queue_bypass_start(), we aren't performing
491 * synchronize_rcu() after entering bypass mode to avoid the delay 526 * synchronize_rcu() after entering bypass mode to avoid the delay
492 * as some drivers create and destroy a lot of queues while 527 * as some drivers create and destroy a lot of queues while
@@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)
499 534
500 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 535 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 536 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
502 queue_flag_set(QUEUE_FLAG_DEAD, q); 537 queue_flag_set(QUEUE_FLAG_DYING, q);
503 spin_unlock_irq(lock); 538 spin_unlock_irq(lock);
504 mutex_unlock(&q->sysfs_lock); 539 mutex_unlock(&q->sysfs_lock);
505 540
506 /* drain all requests queued before DEAD marking */ 541 /*
507 blk_drain_queue(q, true); 542 * Drain all requests queued before DYING marking. Set DEAD flag to
543 * prevent that q->request_fn() gets invoked after draining finished.
544 */
545 spin_lock_irq(lock);
546 __blk_drain_queue(q, true);
547 queue_flag_set(QUEUE_FLAG_DEAD, q);
548 spin_unlock_irq(lock);
508 549
509 /* @q won't process any more request, flush async actions */ 550 /* @q won't process any more request, flush async actions */
510 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 551 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)
549 590
550struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 591struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
551{ 592{
552 return blk_alloc_queue_node(gfp_mask, -1); 593 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
553} 594}
554EXPORT_SYMBOL(blk_alloc_queue); 595EXPORT_SYMBOL(blk_alloc_queue);
555 596
@@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
660 701
661struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 702struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
662{ 703{
663 return blk_init_queue_node(rfn, lock, -1); 704 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
664} 705}
665EXPORT_SYMBOL(blk_init_queue); 706EXPORT_SYMBOL(blk_init_queue);
666 707
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
716 757
717bool blk_get_queue(struct request_queue *q) 758bool blk_get_queue(struct request_queue *q)
718{ 759{
719 if (likely(!blk_queue_dead(q))) { 760 if (likely(!blk_queue_dying(q))) {
720 __blk_get_queue(q); 761 __blk_get_queue(q);
721 return true; 762 return true;
722 } 763 }
@@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
870 const bool is_sync = rw_is_sync(rw_flags) != 0; 911 const bool is_sync = rw_is_sync(rw_flags) != 0;
871 int may_queue; 912 int may_queue;
872 913
873 if (unlikely(blk_queue_dead(q))) 914 if (unlikely(blk_queue_dying(q)))
874 return NULL; 915 return NULL;
875 916
876 may_queue = elv_may_queue(q, rw_flags); 917 may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1091,7 @@ retry:
1050 if (rq) 1091 if (rq)
1051 return rq; 1092 return rq;
1052 1093
1053 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { 1094 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1054 blk_put_rl(rl); 1095 blk_put_rl(rl);
1055 return NULL; 1096 return NULL;
1056 } 1097 }
@@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1910 return -EIO; 1951 return -EIO;
1911 1952
1912 spin_lock_irqsave(q->queue_lock, flags); 1953 spin_lock_irqsave(q->queue_lock, flags);
1913 if (unlikely(blk_queue_dead(q))) { 1954 if (unlikely(blk_queue_dying(q))) {
1914 spin_unlock_irqrestore(q->queue_lock, flags); 1955 spin_unlock_irqrestore(q->queue_lock, flags);
1915 return -ENODEV; 1956 return -ENODEV;
1916 } 1957 }
@@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2884{ 2925{
2885 trace_block_unplug(q, depth, !from_schedule); 2926 trace_block_unplug(q, depth, !from_schedule);
2886 2927
2887 /* 2928 if (from_schedule)
2888 * Don't mess with dead queue.
2889 */
2890 if (unlikely(blk_queue_dead(q))) {
2891 spin_unlock(q->queue_lock);
2892 return;
2893 }
2894
2895 /*
2896 * If we are punting this to kblockd, then we can safely drop
2897 * the queue_lock before waking kblockd (which needs to take
2898 * this lock).
2899 */
2900 if (from_schedule) {
2901 spin_unlock(q->queue_lock);
2902 blk_run_queue_async(q); 2929 blk_run_queue_async(q);
2903 } else { 2930 else
2904 __blk_run_queue(q); 2931 __blk_run_queue(q);
2905 spin_unlock(q->queue_lock); 2932 spin_unlock(q->queue_lock);
2906 }
2907
2908} 2933}
2909 2934
2910static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 2935static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2996 /* 3021 /*
2997 * Short-circuit if @q is dead 3022 * Short-circuit if @q is dead
2998 */ 3023 */
2999 if (unlikely(blk_queue_dead(q))) { 3024 if (unlikely(blk_queue_dying(q))) {
3000 __blk_end_request_all(rq, -ENODEV); 3025 __blk_end_request_all(rq, -ENODEV);
3001 continue; 3026 continue;
3002 } 3027 }
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f71eac35c1b9..74638ec234c8 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
66 66
67 spin_lock_irq(q->queue_lock); 67 spin_lock_irq(q->queue_lock);
68 68
69 if (unlikely(blk_queue_dead(q))) { 69 if (unlikely(blk_queue_dying(q))) {
70 rq->errors = -ENXIO; 70 rq->errors = -ENXIO;
71 if (rq->end_io) 71 if (rq->end_io)
72 rq->end_io(rq, rq->errors); 72 rq->end_io(rq, rq->errors);
@@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
78 __blk_run_queue(q); 78 __blk_run_queue(q);
79 /* the queue is stopped so it won't be run */ 79 /* the queue is stopped so it won't be run */
80 if (is_pm_resume) 80 if (is_pm_resume)
81 q->request_fn(q); 81 __blk_run_queue_uncond(q);
82 spin_unlock_irq(q->queue_lock); 82 spin_unlock_irq(q->queue_lock);
83} 83}
84EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 84EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9373b58dfab1..b3a1f2b70b31 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
43 DECLARE_COMPLETION_ONSTACK(wait); 43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 sector_t max_discard_sectors;
47 unsigned int granularity, alignment, mask; 47 sector_t granularity, alignment;
48 struct bio_batch bb; 48 struct bio_batch bb;
49 struct bio *bio; 49 struct bio *bio;
50 int ret = 0; 50 int ret = 0;
51 struct blk_plug plug;
51 52
52 if (!q) 53 if (!q)
53 return -ENXIO; 54 return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
57 58
58 /* Zero-sector (unknown) and one-sector granularities are the same. */ 59 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U); 60 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1; 61 alignment = bdev_discard_alignment(bdev) >> 9;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask; 62 alignment = sector_div(alignment, granularity);
62 63
63 /* 64 /*
64 * Ensure that max_discard_sectors is of the proper 65 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split. 66 * granularity, so that requests stay aligned after a split.
66 */ 67 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 68 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity); 69 sector_div(max_discard_sectors, granularity);
70 max_discard_sectors *= granularity;
69 if (unlikely(!max_discard_sectors)) { 71 if (unlikely(!max_discard_sectors)) {
70 /* Avoid infinite loop below. Being cautious never hurts. */ 72 /* Avoid infinite loop below. Being cautious never hurts. */
71 return -EOPNOTSUPP; 73 return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
81 bb.flags = 1 << BIO_UPTODATE; 83 bb.flags = 1 << BIO_UPTODATE;
82 bb.wait = &wait; 84 bb.wait = &wait;
83 85
86 blk_start_plug(&plug);
84 while (nr_sects) { 87 while (nr_sects) {
85 unsigned int req_sects; 88 unsigned int req_sects;
86 sector_t end_sect; 89 sector_t end_sect, tmp;
87 90
88 bio = bio_alloc(gfp_mask, 1); 91 bio = bio_alloc(gfp_mask, 1);
89 if (!bio) { 92 if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 * misaligned, stop the discard at the previous aligned sector. 101 * misaligned, stop the discard at the previous aligned sector.
99 */ 102 */
100 end_sect = sector + req_sects; 103 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) { 104 tmp = end_sect;
102 end_sect = 105 if (req_sects < nr_sects &&
103 round_down(end_sect - alignment, granularity) 106 sector_div(tmp, granularity) != alignment) {
104 + alignment; 107 end_sect = end_sect - alignment;
108 sector_div(end_sect, granularity);
109 end_sect = end_sect * granularity + alignment;
105 req_sects = end_sect - sector; 110 req_sects = end_sect - sector;
106 } 111 }
107 112
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 atomic_inc(&bb.done); 122 atomic_inc(&bb.done);
118 submit_bio(type, bio); 123 submit_bio(type, bio);
119 } 124 }
125 blk_finish_plug(&plug);
120 126
121 /* Wait for bios in-flight */ 127 /* Wait for bios in-flight */
122 if (!atomic_dec_and_test(&bb.done)) 128 if (!atomic_dec_and_test(&bb.done))
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 779bb7646bcd..c50ecf0ea3b1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
611 bottom = b->discard_granularity + alignment; 611 bottom = b->discard_granularity + alignment;
612 612
613 /* Verify that top and bottom intervals line up */ 613 /* Verify that top and bottom intervals line up */
614 if (max(top, bottom) & (min(top, bottom) - 1)) 614 if ((max(top, bottom) % min(top, bottom)) != 0)
615 t->discard_misaligned = 1; 615 t->discard_misaligned = 1;
616 } 616 }
617 617
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
619 b->max_discard_sectors); 619 b->max_discard_sectors);
620 t->discard_granularity = max(t->discard_granularity, 620 t->discard_granularity = max(t->discard_granularity,
621 b->discard_granularity); 621 b->discard_granularity);
622 t->discard_alignment = lcm(t->discard_alignment, alignment) & 622 t->discard_alignment = lcm(t->discard_alignment, alignment) %
623 (t->discard_granularity - 1); 623 t->discard_granularity;
624 } 624 }
625 625
626 return ret; 626 return ret;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce6204608822..788147797a79 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
466 if (!entry->show) 466 if (!entry->show)
467 return -EIO; 467 return -EIO;
468 mutex_lock(&q->sysfs_lock); 468 mutex_lock(&q->sysfs_lock);
469 if (blk_queue_dead(q)) { 469 if (blk_queue_dying(q)) {
470 mutex_unlock(&q->sysfs_lock); 470 mutex_unlock(&q->sysfs_lock);
471 return -ENOENT; 471 return -ENOENT;
472 } 472 }
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
488 488
489 q = container_of(kobj, struct request_queue, kobj); 489 q = container_of(kobj, struct request_queue, kobj);
490 mutex_lock(&q->sysfs_lock); 490 mutex_lock(&q->sysfs_lock);
491 if (blk_queue_dead(q)) { 491 if (blk_queue_dying(q)) {
492 mutex_unlock(&q->sysfs_lock); 492 mutex_unlock(&q->sysfs_lock);
493 return -ENOENT; 493 return -ENOENT;
494 } 494 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a9664fa0b609..31146225f3d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
302 /* if %NULL and @q is alive, fall back to root_tg */ 302 /* if %NULL and @q is alive, fall back to root_tg */
303 if (!IS_ERR(blkg)) 303 if (!IS_ERR(blkg))
304 tg = blkg_to_tg(blkg); 304 tg = blkg_to_tg(blkg);
305 else if (!blk_queue_dead(q)) 305 else if (!blk_queue_dying(q))
306 tg = td_root_tg(td); 306 tg = td_root_tg(td);
307 } 307 }
308 308
diff --git a/block/blk.h b/block/blk.h
index ca51543b248c..47fdfdd41520 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
96 q->flush_queue_delayed = 1; 96 q->flush_queue_delayed = 1;
97 return NULL; 97 return NULL;
98 } 98 }
99 if (unlikely(blk_queue_dead(q)) || 99 if (unlikely(blk_queue_dying(q)) ||
100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
101 return NULL; 101 return NULL;
102 } 102 }
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
145 145
146void blk_queue_congestion_threshold(struct request_queue *q); 146void blk_queue_congestion_threshold(struct request_queue *q);
147 147
148void __blk_run_queue_uncond(struct request_queue *q);
149
148int blk_dev_init(void); 150int blk_dev_init(void);
149 151
150 152
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index deee61fbb741..650f427d915b 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -151,19 +151,6 @@ failjob_rls_job:
151 return -ENOMEM; 151 return -ENOMEM;
152} 152}
153 153
154/*
155 * bsg_goose_queue - restart queue in case it was stopped
156 * @q: request q to be restarted
157 */
158void bsg_goose_queue(struct request_queue *q)
159{
160 if (!q)
161 return;
162
163 blk_run_queue_async(q);
164}
165EXPORT_SYMBOL_GPL(bsg_goose_queue);
166
167/** 154/**
168 * bsg_request_fn - generic handler for bsg requests 155 * bsg_request_fn - generic handler for bsg requests
169 * @q: request queue to manage 156 * @q: request queue to manage
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fb52df9744f5..e62e9205b80a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1973,7 +1973,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1973 * reposition in fifo if next is older than rq 1973 * reposition in fifo if next is older than rq
1974 */ 1974 */
1975 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 1975 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1976 time_before(rq_fifo_time(next), rq_fifo_time(rq))) { 1976 time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
1977 cfqq == RQ_CFQQ(next)) {
1977 list_move(&rq->queuelist, &next->queuelist); 1978 list_move(&rq->queuelist, &next->queuelist);
1978 rq_set_fifo_time(rq, rq_fifo_time(next)); 1979 rq_set_fifo_time(rq, rq_fifo_time(next));
1979 } 1980 }
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 599b12e5380f..90037b5eb17f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
230 /* 230 /*
231 * rq is expired! 231 * rq is expired!
232 */ 232 */
233 if (time_after(jiffies, rq_fifo_time(rq))) 233 if (time_after_eq(jiffies, rq_fifo_time(rq)))
234 return 1; 234 return 1;
235 235
236 return 0; 236 return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 9b1d42b62f20..9edba1b8323e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
458 struct request *rq) 458 struct request *rq)
459{ 459{
460 struct request *__rq; 460 struct request *__rq;
461 bool ret;
461 462
462 if (blk_queue_nomerges(q)) 463 if (blk_queue_nomerges(q))
463 return false; 464 return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
471 if (blk_queue_noxmerges(q)) 472 if (blk_queue_noxmerges(q))
472 return false; 473 return false;
473 474
475 ret = false;
474 /* 476 /*
475 * See if our hash lookup can find a potential backmerge. 477 * See if our hash lookup can find a potential backmerge.
476 */ 478 */
477 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); 479 while (1) {
478 if (__rq && blk_attempt_req_merge(q, __rq, rq)) 480 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
479 return true; 481 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
482 break;
480 483
481 return false; 484 /* The merged request could be merged with others, try again */
485 ret = true;
486 rq = __rq;
487 }
488
489 return ret;
482} 490}
483 491
484void elv_merged_request(struct request_queue *q, struct request *rq, int type) 492void elv_merged_request(struct request_queue *q, struct request *rq, int type)
diff --git a/block/genhd.c b/block/genhd.c
index 6cace663a80e..2a6fdf539a69 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1245,7 +1245,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
1245 1245
1246struct gendisk *alloc_disk(int minors) 1246struct gendisk *alloc_disk(int minors)
1247{ 1247{
1248 return alloc_disk_node(minors, -1); 1248 return alloc_disk_node(minors, NUMA_NO_NODE);
1249} 1249}
1250EXPORT_SYMBOL(alloc_disk); 1250EXPORT_SYMBOL(alloc_disk);
1251 1251
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b03..75a54e1adbb5 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -234,8 +234,8 @@ config KARMA_PARTITION
234 uses a proprietary partition table. 234 uses a proprietary partition table.
235 235
236config EFI_PARTITION 236config EFI_PARTITION
237 bool "EFI GUID Partition support" 237 bool "EFI GUID Partition support" if PARTITION_ADVANCED
238 depends on PARTITION_ADVANCED 238 default y
239 select CRC32 239 select CRC32
240 help 240 help
241 Say Y here if you would like to use hard disks under Linux which 241 Say Y here if you would like to use hard disks under Linux which