aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-10-07 14:02:57 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-07 14:02:57 -0400
commita6151c3a5c8e1ff5a28450bc8d6a99a2a0add0a7 (patch)
tree47896def7d76c1f81d3c358a9aaee158a253b2b6 /block
parentec60e4f6749daf535329dac571293cf19c627aff (diff)
cfq-iosched: apply bool value where we return 0/1
Saves 16 bytes of text, woohoo. But the more important point is that it makes the code more readable when returning bool for 0/1 cases. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c68
1 files changed, 31 insertions, 37 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b35cc56dfd94..a592afcf1e6d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
231 231
232static void cfq_dispatch_insert(struct request_queue *, struct request *); 232static void cfq_dispatch_insert(struct request_queue *, struct request *);
233static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 233static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
234 struct io_context *, gfp_t); 234 struct io_context *, gfp_t);
235static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 235static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
236 struct io_context *); 236 struct io_context *);
@@ -241,27 +241,24 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
241} 241}
242 242
243static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 243static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
244 int is_sync) 244 bool is_sync)
245{ 245{
246 return cic->cfqq[!!is_sync]; 246 return cic->cfqq[is_sync];
247} 247}
248 248
249static inline void cic_set_cfqq(struct cfq_io_context *cic, 249static inline void cic_set_cfqq(struct cfq_io_context *cic,
250 struct cfq_queue *cfqq, int is_sync) 250 struct cfq_queue *cfqq, bool is_sync)
251{ 251{
252 cic->cfqq[!!is_sync] = cfqq; 252 cic->cfqq[is_sync] = cfqq;
253} 253}
254 254
255/* 255/*
256 * We regard a request as SYNC, if it's either a read or has the SYNC bit 256 * We regard a request as SYNC, if it's either a read or has the SYNC bit
257 * set (in which case it could also be direct WRITE). 257 * set (in which case it could also be direct WRITE).
258 */ 258 */
259static inline int cfq_bio_sync(struct bio *bio) 259static inline bool cfq_bio_sync(struct bio *bio)
260{ 260{
261 if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) 261 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
262 return 1;
263
264 return 0;
265} 262}
266 263
267/* 264/*
@@ -288,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
288 * if a queue is marked sync and has sync io queued. A sync queue with async 285 * if a queue is marked sync and has sync io queued. A sync queue with async
289 * io only, should not get full sync slice length. 286 * io only, should not get full sync slice length.
290 */ 287 */
291static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 288static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
292 unsigned short prio) 289 unsigned short prio)
293{ 290{
294 const int base_slice = cfqd->cfq_slice[sync]; 291 const int base_slice = cfqd->cfq_slice[sync];
@@ -316,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
316 * isn't valid until the first request from the dispatch is activated 313 * isn't valid until the first request from the dispatch is activated
317 * and the slice time set. 314 * and the slice time set.
318 */ 315 */
319static inline int cfq_slice_used(struct cfq_queue *cfqq) 316static inline bool cfq_slice_used(struct cfq_queue *cfqq)
320{ 317{
321 if (cfq_cfqq_slice_new(cfqq)) 318 if (cfq_cfqq_slice_new(cfqq))
322 return 0; 319 return 0;
@@ -491,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
491 * we will service the queues. 488 * we will service the queues.
492 */ 489 */
493static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 490static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
494 int add_front) 491 bool add_front)
495{ 492{
496 struct rb_node **p, *parent; 493 struct rb_node **p, *parent;
497 struct cfq_queue *__cfqq; 494 struct cfq_queue *__cfqq;
@@ -853,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
853 * Disallow merge of a sync bio into an async request. 850 * Disallow merge of a sync bio into an async request.
854 */ 851 */
855 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 852 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
856 return 0; 853 return false;
857 854
858 /* 855 /*
859 * Lookup the cfqq that this bio will be queued with. Allow 856 * Lookup the cfqq that this bio will be queued with. Allow
@@ -861,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
861 */ 858 */
862 cic = cfq_cic_lookup(cfqd, current->io_context); 859 cic = cfq_cic_lookup(cfqd, current->io_context);
863 if (!cic) 860 if (!cic)
864 return 0; 861 return false;
865 862
866 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 863 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
867 if (cfqq == RQ_CFQQ(rq)) 864 return cfqq == RQ_CFQQ(rq);
868 return 1;
869
870 return 0;
871} 865}
872 866
873static void __cfq_set_active_queue(struct cfq_data *cfqd, 867static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -895,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
895 */ 889 */
896static void 890static void
897__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 891__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
898 int timed_out) 892 bool timed_out)
899{ 893{
900 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 894 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
901 895
@@ -923,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
923 } 917 }
924} 918}
925 919
926static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 920static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
927{ 921{
928 struct cfq_queue *cfqq = cfqd->active_queue; 922 struct cfq_queue *cfqq = cfqd->active_queue;
929 923
@@ -1035,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1035 */ 1029 */
1036static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1030static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1037 struct cfq_queue *cur_cfqq, 1031 struct cfq_queue *cur_cfqq,
1038 int probe) 1032 bool probe)
1039{ 1033{
1040 struct cfq_queue *cfqq; 1034 struct cfq_queue *cfqq;
1041 1035
@@ -1676,7 +1670,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
1676} 1670}
1677 1671
1678static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1672static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1679 pid_t pid, int is_sync) 1673 pid_t pid, bool is_sync)
1680{ 1674{
1681 RB_CLEAR_NODE(&cfqq->rb_node); 1675 RB_CLEAR_NODE(&cfqq->rb_node);
1682 RB_CLEAR_NODE(&cfqq->p_node); 1676 RB_CLEAR_NODE(&cfqq->p_node);
@@ -1696,7 +1690,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1696} 1690}
1697 1691
1698static struct cfq_queue * 1692static struct cfq_queue *
1699cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1693cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
1700 struct io_context *ioc, gfp_t gfp_mask) 1694 struct io_context *ioc, gfp_t gfp_mask)
1701{ 1695{
1702 struct cfq_queue *cfqq, *new_cfqq = NULL; 1696 struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1760,7 +1754,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1760} 1754}
1761 1755
1762static struct cfq_queue * 1756static struct cfq_queue *
1763cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1757cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
1764 gfp_t gfp_mask) 1758 gfp_t gfp_mask)
1765{ 1759{
1766 const int ioprio = task_ioprio(ioc); 1760 const int ioprio = task_ioprio(ioc);
@@ -2017,7 +2011,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2017 * Check if new_cfqq should preempt the currently active queue. Return 0 for 2011 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2018 * no or if we aren't sure, a 1 will cause a preempt. 2012 * no or if we aren't sure, a 1 will cause a preempt.
2019 */ 2013 */
2020static int 2014static bool
2021cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2015cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2022 struct request *rq) 2016 struct request *rq)
2023{ 2017{
@@ -2025,48 +2019,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2025 2019
2026 cfqq = cfqd->active_queue; 2020 cfqq = cfqd->active_queue;
2027 if (!cfqq) 2021 if (!cfqq)
2028 return 0; 2022 return false;
2029 2023
2030 if (cfq_slice_used(cfqq)) 2024 if (cfq_slice_used(cfqq))
2031 return 1; 2025 return true;
2032 2026
2033 if (cfq_class_idle(new_cfqq)) 2027 if (cfq_class_idle(new_cfqq))
2034 return 0; 2028 return false;
2035 2029
2036 if (cfq_class_idle(cfqq)) 2030 if (cfq_class_idle(cfqq))
2037 return 1; 2031 return true;
2038 2032
2039 /* 2033 /*
2040 * if the new request is sync, but the currently running queue is 2034 * if the new request is sync, but the currently running queue is
2041 * not, let the sync request have priority. 2035 * not, let the sync request have priority.
2042 */ 2036 */
2043 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2037 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2044 return 1; 2038 return true;
2045 2039
2046 /* 2040 /*
2047 * So both queues are sync. Let the new request get disk time if 2041 * So both queues are sync. Let the new request get disk time if
2048 * it's a metadata request and the current queue is doing regular IO. 2042 * it's a metadata request and the current queue is doing regular IO.
2049 */ 2043 */
2050 if (rq_is_meta(rq) && !cfqq->meta_pending) 2044 if (rq_is_meta(rq) && !cfqq->meta_pending)
2051 return 1; 2045 return false;
2052 2046
2053 /* 2047 /*
2054 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2048 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2055 */ 2049 */
2056 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2050 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2057 return 1; 2051 return true;
2058 2052
2059 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2053 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2060 return 0; 2054 return false;
2061 2055
2062 /* 2056 /*
2063 * if this request is as-good as one we would expect from the 2057 * if this request is as-good as one we would expect from the
2064 * current cfqq, let it preempt 2058 * current cfqq, let it preempt
2065 */ 2059 */
2066 if (cfq_rq_close(cfqd, rq)) 2060 if (cfq_rq_close(cfqd, rq))
2067 return 1; 2061 return true;
2068 2062
2069 return 0; 2063 return false;
2070} 2064}
2071 2065
2072/* 2066/*
@@ -2331,7 +2325,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2331 struct cfq_data *cfqd = q->elevator->elevator_data; 2325 struct cfq_data *cfqd = q->elevator->elevator_data;
2332 struct cfq_io_context *cic; 2326 struct cfq_io_context *cic;
2333 const int rw = rq_data_dir(rq); 2327 const int rw = rq_data_dir(rq);
2334 const int is_sync = rq_is_sync(rq); 2328 const bool is_sync = rq_is_sync(rq);
2335 struct cfq_queue *cfqq; 2329 struct cfq_queue *cfqq;
2336 unsigned long flags; 2330 unsigned long flags;
2337 2331