diff options
Diffstat (limited to 'block/blk.h')
| -rw-r--r-- | block/blk.h | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/block/blk.h b/block/blk.h index d6b911ac002c..1e675e5ade02 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
| 51 | */ | 51 | */ |
| 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
| 53 | 53 | ||
| 54 | struct request *blk_do_flush(struct request_queue *q, struct request *rq); | ||
| 55 | |||
| 54 | static inline struct request *__elv_next_request(struct request_queue *q) | 56 | static inline struct request *__elv_next_request(struct request_queue *q) |
| 55 | { | 57 | { |
| 56 | struct request *rq; | 58 | struct request *rq; |
| @@ -58,7 +60,11 @@ static inline struct request *__elv_next_request(struct request_queue *q) | |||
| 58 | while (1) { | 60 | while (1) { |
| 59 | while (!list_empty(&q->queue_head)) { | 61 | while (!list_empty(&q->queue_head)) { |
| 60 | rq = list_entry_rq(q->queue_head.next); | 62 | rq = list_entry_rq(q->queue_head.next); |
| 61 | if (blk_do_ordered(q, &rq)) | 63 | if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || |
| 64 | rq == &q->flush_rq) | ||
| 65 | return rq; | ||
| 66 | rq = blk_do_flush(q, rq); | ||
| 67 | if (rq) | ||
| 62 | return rq; | 68 | return rq; |
| 63 | } | 69 | } |
| 64 | 70 | ||
| @@ -110,10 +116,6 @@ void blk_queue_congestion_threshold(struct request_queue *q); | |||
| 110 | 116 | ||
| 111 | int blk_dev_init(void); | 117 | int blk_dev_init(void); |
| 112 | 118 | ||
| 113 | void elv_quiesce_start(struct request_queue *q); | ||
| 114 | void elv_quiesce_end(struct request_queue *q); | ||
| 115 | |||
| 116 | |||
| 117 | /* | 119 | /* |
| 118 | * Return the threshold (number of used requests) at which the queue is | 120 | * Return the threshold (number of used requests) at which the queue is |
| 119 | * considered to be congested. It include a little hysteresis to keep the | 121 | * considered to be congested. It include a little hysteresis to keep the |
| @@ -132,14 +134,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
| 132 | return q->nr_congestion_off; | 134 | return q->nr_congestion_off; |
| 133 | } | 135 | } |
| 134 | 136 | ||
| 135 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
| 136 | |||
| 137 | #define rq_for_each_integrity_segment(bvl, _rq, _iter) \ | ||
| 138 | __rq_for_each_bio(_iter.bio, _rq) \ | ||
| 139 | bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) | ||
| 140 | |||
| 141 | #endif /* BLK_DEV_INTEGRITY */ | ||
| 142 | |||
| 143 | static inline int blk_cpu_to_group(int cpu) | 137 | static inline int blk_cpu_to_group(int cpu) |
| 144 | { | 138 | { |
| 145 | int group = NR_CPUS; | 139 | int group = NR_CPUS; |
