aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c7
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk.h6
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--block/elevator.c16
7 files changed, 39 insertions, 28 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d710c9d403b..74e404393172 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -79,7 +79,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
79 * 79 *
80 * http://thread.gmane.org/gmane.linux.kernel/537473 80 * http://thread.gmane.org/gmane.linux.kernel/537473
81 */ 81 */
82 if (!blk_fs_request(rq)) 82 if (rq->cmd_type != REQ_TYPE_FS)
83 return QUEUE_ORDSEQ_DRAIN; 83 return QUEUE_ORDSEQ_DRAIN;
84 84
85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == 85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -236,7 +236,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
236bool blk_do_ordered(struct request_queue *q, struct request **rqp) 236bool blk_do_ordered(struct request_queue *q, struct request **rqp)
237{ 237{
238 struct request *rq = *rqp; 238 struct request *rq = *rqp;
239 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 239 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
240 (rq->cmd_flags & REQ_HARDBARRIER);
240 241
241 if (!q->ordseq) { 242 if (!q->ordseq) {
242 if (!is_barrier) 243 if (!is_barrier)
@@ -261,7 +262,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
261 */ 262 */
262 263
263 /* Special requests are not subject to ordering rules. */ 264 /* Special requests are not subject to ordering rules. */
264 if (!blk_fs_request(rq) && 265 if (rq->cmd_type != REQ_TYPE_FS &&
265 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 266 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
266 return true; 267 return true;
267 268
diff --git a/block/blk-core.c b/block/blk-core.c
index b4131d29148c..dca43a31e725 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
186 186
187 if (blk_pc_request(rq)) { 187 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
188 printk(KERN_INFO " cdb: "); 188 printk(KERN_INFO " cdb: ");
189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 189 for (bit = 0; bit < BLK_MAX_CDB; bit++)
190 printk("%02x ", rq->cmd[bit]); 190 printk("%02x ", rq->cmd[bit]);
@@ -1796,7 +1796,7 @@ struct request *blk_peek_request(struct request_queue *q)
1796 * sees this request (possibly after 1796 * sees this request (possibly after
1797 * requeueing). Notify IO scheduler. 1797 * requeueing). Notify IO scheduler.
1798 */ 1798 */
1799 if (blk_sorted_rq(rq)) 1799 if (rq->cmd_flags & REQ_SORTED)
1800 elv_activate_rq(q, rq); 1800 elv_activate_rq(q, rq);
1801 1801
1802 /* 1802 /*
@@ -1984,10 +1984,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1984 * TODO: tj: This is too subtle. It would be better to let 1984 * TODO: tj: This is too subtle. It would be better to let
1985 * low level drivers do what they see fit. 1985 * low level drivers do what they see fit.
1986 */ 1986 */
1987 if (blk_fs_request(req)) 1987 if (req->cmd_type == REQ_TYPE_FS)
1988 req->errors = 0; 1988 req->errors = 0;
1989 1989
1990 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1990 if (error && req->cmd_type == REQ_TYPE_FS &&
1991 !(req->cmd_flags & REQ_QUIET)) {
1991 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1992 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1992 req->rq_disk ? req->rq_disk->disk_name : "?", 1993 req->rq_disk ? req->rq_disk->disk_name : "?",
1993 (unsigned long long)blk_rq_pos(req)); 1994 (unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2075,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2074 req->buffer = bio_data(req->bio); 2075 req->buffer = bio_data(req->bio);
2075 2076
2076 /* update sector only for requests with clear definition of sector */ 2077 /* update sector only for requests with clear definition of sector */
2077 if (blk_fs_request(req) || blk_discard_rq(req)) 2078 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2078 req->__sector += total_bytes >> 9; 2079 req->__sector += total_bytes >> 9;
2079 2080
2080 /* mixed attributes always follow the first bio */ 2081 /* mixed attributes always follow the first bio */
@@ -2127,7 +2128,7 @@ static void blk_finish_request(struct request *req, int error)
2127 2128
2128 BUG_ON(blk_queued_rq(req)); 2129 BUG_ON(blk_queued_rq(req));
2129 2130
2130 if (unlikely(laptop_mode) && blk_fs_request(req)) 2131 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2131 laptop_io_completion(&req->q->backing_dev_info); 2132 laptop_io_completion(&req->q->backing_dev_info);
2132 2133
2133 blk_delete_timer(req); 2134 blk_delete_timer(req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 49557e91f0da..e1672f14840e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
57 __elv_add_request(q, rq, where, 1); 57 __elv_add_request(q, rq, where, 1);
58 __generic_unplug_device(q); 58 __generic_unplug_device(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (blk_pm_resume_request(rq)) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
62 spin_unlock_irq(q->queue_lock); 62 spin_unlock_irq(q->queue_lock);
63} 63}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5e7dc9973458..87e4fb7d0e98 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -226,7 +226,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
226{ 226{
227 unsigned short max_sectors; 227 unsigned short max_sectors;
228 228
229 if (unlikely(blk_pc_request(req))) 229 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
230 max_sectors = queue_max_hw_sectors(q); 230 max_sectors = queue_max_hw_sectors(q);
231 else 231 else
232 max_sectors = queue_max_sectors(q); 232 max_sectors = queue_max_sectors(q);
@@ -250,7 +250,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
250{ 250{
251 unsigned short max_sectors; 251 unsigned short max_sectors;
252 252
253 if (unlikely(blk_pc_request(req))) 253 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
254 max_sectors = queue_max_hw_sectors(q); 254 max_sectors = queue_max_hw_sectors(q);
255 else 255 else
256 max_sectors = queue_max_sectors(q); 256 max_sectors = queue_max_sectors(q);
diff --git a/block/blk.h b/block/blk.h
index 5ee3d7e72feb..6e7dc87141e4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
161 */ 161 */
162static inline int blk_do_io_stat(struct request *rq) 162static inline int blk_do_io_stat(struct request *rq)
163{ 163{
164 return rq->rq_disk && blk_rq_io_stat(rq) && 164 return rq->rq_disk &&
165 (blk_fs_request(rq) || blk_discard_rq(rq)); 165 (rq->cmd_flags & REQ_IO_STAT) &&
166 (rq->cmd_type == REQ_TYPE_FS ||
167 (rq->cmd_flags & REQ_DISCARD));
166} 168}
167 169
168#endif 170#endif
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7982b830db58..d4edeb8fceb8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
646 return rq1; 646 return rq1;
647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
648 return rq2; 648 return rq2;
649 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 649 if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
650 return rq1; 650 return rq1;
651 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 651 else if ((rq2->cmd_flags & REQ_RW_META) &&
652 !(rq1->cmd_flags & REQ_RW_META))
652 return rq2; 653 return rq2;
653 654
654 s1 = blk_rq_pos(rq1); 655 s1 = blk_rq_pos(rq1);
@@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
1484 cfqq->cfqd->rq_queued--; 1485 cfqq->cfqd->rq_queued--;
1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1486 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1486 rq_data_dir(rq), rq_is_sync(rq)); 1487 rq_data_dir(rq), rq_is_sync(rq));
1487 if (rq_is_meta(rq)) { 1488 if (rq->cmd_flags & REQ_RW_META) {
1488 WARN_ON(!cfqq->meta_pending); 1489 WARN_ON(!cfqq->meta_pending);
1489 cfqq->meta_pending--; 1490 cfqq->meta_pending--;
1490 } 1491 }
@@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3176 * So both queues are sync. Let the new request get disk time if 3177 * So both queues are sync. Let the new request get disk time if
3177 * it's a metadata request and the current queue is doing regular IO. 3178 * it's a metadata request and the current queue is doing regular IO.
3178 */ 3179 */
3179 if (rq_is_meta(rq) && !cfqq->meta_pending) 3180 if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
3180 return true; 3181 return true;
3181 3182
3182 /* 3183 /*
@@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3230 struct cfq_io_context *cic = RQ_CIC(rq); 3231 struct cfq_io_context *cic = RQ_CIC(rq);
3231 3232
3232 cfqd->rq_queued++; 3233 cfqd->rq_queued++;
3233 if (rq_is_meta(rq)) 3234 if (rq->cmd_flags & REQ_RW_META)
3234 cfqq->meta_pending++; 3235 cfqq->meta_pending++;
3235 3236
3236 cfq_update_io_thinktime(cfqd, cic); 3237 cfq_update_io_thinktime(cfqd, cic);
@@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3365 unsigned long now; 3366 unsigned long now;
3366 3367
3367 now = jiffies; 3368 now = jiffies;
3368 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); 3369 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3370 !!(rq->cmd_flags & REQ_NOIDLE));
3369 3371
3370 cfq_update_hw_tag(cfqd); 3372 cfq_update_hw_tag(cfqd);
3371 3373
@@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3419 cfq_slice_expired(cfqd, 1); 3421 cfq_slice_expired(cfqd, 1);
3420 else if (sync && cfqq_empty && 3422 else if (sync && cfqq_empty &&
3421 !cfq_close_cooperator(cfqd, cfqq)) { 3423 !cfq_close_cooperator(cfqd, cfqq)) {
3422 cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); 3424 cfqd->noidle_tree_requires_idle |=
3425 !(rq->cmd_flags & REQ_NOIDLE);
3423 /* 3426 /*
3424 * Idling is enabled for SYNC_WORKLOAD. 3427 * Idling is enabled for SYNC_WORKLOAD.
3425 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree 3428 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3426 * only if we processed at least one !rq_noidle request 3429 * only if we processed at least one !REQ_NOIDLE request
3427 */ 3430 */
3428 if (cfqd->serving_type == SYNC_WORKLOAD 3431 if (cfqd->serving_type == SYNC_WORKLOAD
3429 || cfqd->noidle_tree_requires_idle 3432 || cfqd->noidle_tree_requires_idle
diff --git a/block/elevator.c b/block/elevator.c
index 923a9139106c..aa99b59c03d6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -428,7 +428,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
428 list_for_each_prev(entry, &q->queue_head) { 428 list_for_each_prev(entry, &q->queue_head) {
429 struct request *pos = list_entry_rq(entry); 429 struct request *pos = list_entry_rq(entry);
430 430
431 if (blk_discard_rq(rq) != blk_discard_rq(pos)) 431 if ((rq->cmd_flags & REQ_DISCARD) !=
432 (pos->cmd_flags & REQ_DISCARD))
432 break; 433 break;
433 if (rq_data_dir(rq) != rq_data_dir(pos)) 434 if (rq_data_dir(rq) != rq_data_dir(pos))
434 break; 435 break;
@@ -558,7 +559,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
558 */ 559 */
559 if (blk_account_rq(rq)) { 560 if (blk_account_rq(rq)) {
560 q->in_flight[rq_is_sync(rq)]--; 561 q->in_flight[rq_is_sync(rq)]--;
561 if (blk_sorted_rq(rq)) 562 if (rq->cmd_flags & REQ_SORTED)
562 elv_deactivate_rq(q, rq); 563 elv_deactivate_rq(q, rq);
563 } 564 }
564 565
@@ -644,7 +645,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
644 break; 645 break;
645 646
646 case ELEVATOR_INSERT_SORT: 647 case ELEVATOR_INSERT_SORT:
647 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 648 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
649 !(rq->cmd_flags & REQ_DISCARD));
648 rq->cmd_flags |= REQ_SORTED; 650 rq->cmd_flags |= REQ_SORTED;
649 q->nr_sorted++; 651 q->nr_sorted++;
650 if (rq_mergeable(rq)) { 652 if (rq_mergeable(rq)) {
@@ -716,7 +718,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
716 /* 718 /*
717 * toggle ordered color 719 * toggle ordered color
718 */ 720 */
719 if (blk_barrier_rq(rq)) 721 if (rq->cmd_flags & REQ_HARDBARRIER)
720 q->ordcolor ^= 1; 722 q->ordcolor ^= 1;
721 723
722 /* 724 /*
@@ -729,7 +731,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
729 * this request is scheduling boundary, update 731 * this request is scheduling boundary, update
730 * end_sector 732 * end_sector
731 */ 733 */
732 if (blk_fs_request(rq) || blk_discard_rq(rq)) { 734 if (rq->cmd_type == REQ_TYPE_FS ||
735 (rq->cmd_flags & REQ_DISCARD)) {
733 q->end_sector = rq_end_sector(rq); 736 q->end_sector = rq_end_sector(rq);
734 q->boundary_rq = rq; 737 q->boundary_rq = rq;
735 } 738 }
@@ -843,7 +846,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
843 */ 846 */
844 if (blk_account_rq(rq)) { 847 if (blk_account_rq(rq)) {
845 q->in_flight[rq_is_sync(rq)]--; 848 q->in_flight[rq_is_sync(rq)]--;
846 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 849 if ((rq->cmd_flags & REQ_SORTED) &&
850 e->ops->elevator_completed_req_fn)
847 e->ops->elevator_completed_req_fn(q, rq); 851 e->ops->elevator_completed_req_fn(q, rq);
848 } 852 }
849 853