diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:07:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:07:18 -0400 |
commit | a2887097f25cd38cadfc11d10769e2b349fb5eca (patch) | |
tree | cd4adcb305365d6ba9acd2c02d4eb9d0125c6f8d /block/blk-core.c | |
parent | 8abfc6e7a45eb74e51904bbae676fae008b11366 (diff) | |
parent | 005a1d15f5a6b2bb4ada80349513effbf22b4588 (diff) |
Merge branch 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block: (46 commits)
xen-blkfront: disable barrier/flush write support
Added blk-lib.c and blk-barrier.c was renamed to blk-flush.c
block: remove BLKDEV_IFL_WAIT
aic7xxx_old: removed unused 'req' variable
block: remove the BH_Eopnotsupp flag
block: remove the BLKDEV_IFL_BARRIER flag
block: remove the WRITE_BARRIER flag
swap: do not send discards as barriers
fat: do not send discards as barriers
ext4: do not send discards as barriers
jbd2: replace barriers with explicit flush / FUA usage
jbd2: Modify ASYNC_COMMIT code to not rely on queue draining on barrier
jbd: replace barriers with explicit flush / FUA usage
nilfs2: replace barriers with explicit flush / FUA usage
reiserfs: replace barriers with explicit flush / FUA usage
gfs2: replace barriers with explicit flush / FUA usage
btrfs: replace barriers with explicit flush / FUA usage
xfs: replace barriers with explicit flush / FUA usage
block: pass gfp_mask and flags to sb_issue_discard
dm: convey that all flushes are processed as empty
...
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 72 |
1 files changed, 39 insertions, 33 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 500eb859886e..45141469e89e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -139,7 +139,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
139 | { | 139 | { |
140 | struct request_queue *q = rq->q; | 140 | struct request_queue *q = rq->q; |
141 | 141 | ||
142 | if (&q->bar_rq != rq) { | 142 | if (&q->flush_rq != rq) { |
143 | if (error) | 143 | if (error) |
144 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 144 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
145 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 145 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
@@ -163,13 +163,12 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
163 | if (bio->bi_size == 0) | 163 | if (bio->bi_size == 0) |
164 | bio_endio(bio, error); | 164 | bio_endio(bio, error); |
165 | } else { | 165 | } else { |
166 | |||
167 | /* | 166 | /* |
168 | * Okay, this is the barrier request in progress, just | 167 | * Okay, this is the sequenced flush request in |
169 | * record the error; | 168 | * progress, just record the error; |
170 | */ | 169 | */ |
171 | if (error && !q->orderr) | 170 | if (error && !q->flush_err) |
172 | q->orderr = error; | 171 | q->flush_err = error; |
173 | } | 172 | } |
174 | } | 173 | } |
175 | 174 | ||
@@ -531,6 +530,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
531 | init_timer(&q->unplug_timer); | 530 | init_timer(&q->unplug_timer); |
532 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 531 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
533 | INIT_LIST_HEAD(&q->timeout_list); | 532 | INIT_LIST_HEAD(&q->timeout_list); |
533 | INIT_LIST_HEAD(&q->pending_flushes); | ||
534 | INIT_WORK(&q->unplug_work, blk_unplug_work); | 534 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
535 | 535 | ||
536 | kobject_init(&q->kobj, &blk_queue_ktype); | 536 | kobject_init(&q->kobj, &blk_queue_ktype); |
@@ -1053,22 +1053,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1053 | } | 1053 | } |
1054 | EXPORT_SYMBOL(blk_insert_request); | 1054 | EXPORT_SYMBOL(blk_insert_request); |
1055 | 1055 | ||
1056 | /* | ||
1057 | * add-request adds a request to the linked list. | ||
1058 | * queue lock is held and interrupts disabled, as we muck with the | ||
1059 | * request queue list. | ||
1060 | */ | ||
1061 | static inline void add_request(struct request_queue *q, struct request *req) | ||
1062 | { | ||
1063 | drive_stat_acct(req, 1); | ||
1064 | |||
1065 | /* | ||
1066 | * elevator indicated where it wants this request to be | ||
1067 | * inserted at elevator_merge time | ||
1068 | */ | ||
1069 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | ||
1070 | } | ||
1071 | |||
1072 | static void part_round_stats_single(int cpu, struct hd_struct *part, | 1056 | static void part_round_stats_single(int cpu, struct hd_struct *part, |
1073 | unsigned long now) | 1057 | unsigned long now) |
1074 | { | 1058 | { |
@@ -1217,13 +1201,16 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1217 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1201 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1218 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); | 1202 | const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); |
1219 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1203 | const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1204 | int where = ELEVATOR_INSERT_SORT; | ||
1220 | int rw_flags; | 1205 | int rw_flags; |
1221 | 1206 | ||
1222 | if ((bio->bi_rw & REQ_HARDBARRIER) && | 1207 | /* REQ_HARDBARRIER is no more */ |
1223 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1208 | if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER, |
1209 | "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) { | ||
1224 | bio_endio(bio, -EOPNOTSUPP); | 1210 | bio_endio(bio, -EOPNOTSUPP); |
1225 | return 0; | 1211 | return 0; |
1226 | } | 1212 | } |
1213 | |||
1227 | /* | 1214 | /* |
1228 | * low level driver can indicate that it wants pages above a | 1215 | * low level driver can indicate that it wants pages above a |
1229 | * certain limit bounced to low memory (ie for highmem, or even | 1216 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1233,7 +1220,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1233 | 1220 | ||
1234 | spin_lock_irq(q->queue_lock); | 1221 | spin_lock_irq(q->queue_lock); |
1235 | 1222 | ||
1236 | if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) | 1223 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { |
1224 | where = ELEVATOR_INSERT_FRONT; | ||
1225 | goto get_rq; | ||
1226 | } | ||
1227 | |||
1228 | if (elv_queue_empty(q)) | ||
1237 | goto get_rq; | 1229 | goto get_rq; |
1238 | 1230 | ||
1239 | el_ret = elv_merge(q, &req, bio); | 1231 | el_ret = elv_merge(q, &req, bio); |
@@ -1330,7 +1322,10 @@ get_rq: | |||
1330 | req->cpu = blk_cpu_to_group(smp_processor_id()); | 1322 | req->cpu = blk_cpu_to_group(smp_processor_id()); |
1331 | if (queue_should_plug(q) && elv_queue_empty(q)) | 1323 | if (queue_should_plug(q) && elv_queue_empty(q)) |
1332 | blk_plug_device(q); | 1324 | blk_plug_device(q); |
1333 | add_request(q, req); | 1325 | |
1326 | /* insert the request into the elevator */ | ||
1327 | drive_stat_acct(req, 1); | ||
1328 | __elv_add_request(q, req, where, 0); | ||
1334 | out: | 1329 | out: |
1335 | if (unplug || !queue_should_plug(q)) | 1330 | if (unplug || !queue_should_plug(q)) |
1336 | __generic_unplug_device(q); | 1331 | __generic_unplug_device(q); |
@@ -1530,6 +1525,19 @@ static inline void __generic_make_request(struct bio *bio) | |||
1530 | if (bio_check_eod(bio, nr_sectors)) | 1525 | if (bio_check_eod(bio, nr_sectors)) |
1531 | goto end_io; | 1526 | goto end_io; |
1532 | 1527 | ||
1528 | /* | ||
1529 | * Filter flush bio's early so that make_request based | ||
1530 | * drivers without flush support don't have to worry | ||
1531 | * about them. | ||
1532 | */ | ||
1533 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | ||
1534 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | ||
1535 | if (!nr_sectors) { | ||
1536 | err = 0; | ||
1537 | goto end_io; | ||
1538 | } | ||
1539 | } | ||
1540 | |||
1533 | if ((bio->bi_rw & REQ_DISCARD) && | 1541 | if ((bio->bi_rw & REQ_DISCARD) && |
1534 | (!blk_queue_discard(q) || | 1542 | (!blk_queue_discard(q) || |
1535 | ((bio->bi_rw & REQ_SECURE) && | 1543 | ((bio->bi_rw & REQ_SECURE) && |
@@ -1794,11 +1802,11 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) | |||
1794 | static void blk_account_io_done(struct request *req) | 1802 | static void blk_account_io_done(struct request *req) |
1795 | { | 1803 | { |
1796 | /* | 1804 | /* |
1797 | * Account IO completion. bar_rq isn't accounted as a normal | 1805 | * Account IO completion. flush_rq isn't accounted as a |
1798 | * IO on queueing nor completion. Accounting the containing | 1806 | * normal IO on queueing nor completion. Accounting the |
1799 | * request is enough. | 1807 | * containing request is enough. |
1800 | */ | 1808 | */ |
1801 | if (blk_do_io_stat(req) && req != &req->q->bar_rq) { | 1809 | if (blk_do_io_stat(req) && req != &req->q->flush_rq) { |
1802 | unsigned long duration = jiffies - req->start_time; | 1810 | unsigned long duration = jiffies - req->start_time; |
1803 | const int rw = rq_data_dir(req); | 1811 | const int rw = rq_data_dir(req); |
1804 | struct hd_struct *part; | 1812 | struct hd_struct *part; |
@@ -2523,9 +2531,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | |||
2523 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 2531 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) |
2524 | { | 2532 | { |
2525 | dst->cpu = src->cpu; | 2533 | dst->cpu = src->cpu; |
2526 | dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); | 2534 | dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; |
2527 | if (src->cmd_flags & REQ_DISCARD) | ||
2528 | dst->cmd_flags |= REQ_DISCARD; | ||
2529 | dst->cmd_type = src->cmd_type; | 2535 | dst->cmd_type = src->cmd_type; |
2530 | dst->__sector = blk_rq_pos(src); | 2536 | dst->__sector = blk_rq_pos(src); |
2531 | dst->__data_len = blk_rq_bytes(src); | 2537 | dst->__data_len = blk_rq_bytes(src); |