diff options
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/block/blk.h b/block/blk.h index c79f30e1df52..e5c579769963 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -17,6 +17,42 @@ void __blk_queue_free_tags(struct request_queue *q); | |||
17 | 17 | ||
18 | void blk_unplug_work(struct work_struct *work); | 18 | void blk_unplug_work(struct work_struct *work); |
19 | void blk_unplug_timeout(unsigned long data); | 19 | void blk_unplug_timeout(unsigned long data); |
20 | void blk_rq_timed_out_timer(unsigned long data); | ||
21 | void blk_delete_timer(struct request *); | ||
22 | void blk_add_timer(struct request *); | ||
23 | |||
24 | /* | ||
25 | * Internal atomic flags for request handling | ||
26 | */ | ||
27 | enum rq_atomic_flags { | ||
28 | REQ_ATOM_COMPLETE = 0, | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * EH timer and IO completion will both attempt to 'grab' the request, make | ||
33 | * sure that only one of them suceeds | ||
34 | */ | ||
35 | static inline int blk_mark_rq_complete(struct request *rq) | ||
36 | { | ||
37 | return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
38 | } | ||
39 | |||
40 | static inline void blk_clear_rq_complete(struct request *rq) | ||
41 | { | ||
42 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
43 | } | ||
44 | |||
45 | #ifdef CONFIG_FAIL_IO_TIMEOUT | ||
46 | int blk_should_fake_timeout(struct request_queue *); | ||
47 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); | ||
48 | ssize_t part_timeout_store(struct device *, struct device_attribute *, | ||
49 | const char *, size_t); | ||
50 | #else | ||
51 | static inline int blk_should_fake_timeout(struct request_queue *q) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | #endif | ||
20 | 56 | ||
21 | struct io_context *current_io_context(gfp_t gfp_flags, int node); | 57 | struct io_context *current_io_context(gfp_t gfp_flags, int node); |
22 | 58 | ||
@@ -59,4 +95,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
59 | 95 | ||
60 | #endif /* BLK_DEV_INTEGRITY */ | 96 | #endif /* BLK_DEV_INTEGRITY */ |
61 | 97 | ||
98 | static inline int blk_cpu_to_group(int cpu) | ||
99 | { | ||
100 | #ifdef CONFIG_SCHED_MC | ||
101 | cpumask_t mask = cpu_coregroup_map(cpu); | ||
102 | return first_cpu(mask); | ||
103 | #elif defined(CONFIG_SCHED_SMT) | ||
104 | return first_cpu(per_cpu(cpu_sibling_map, cpu)); | ||
105 | #else | ||
106 | return cpu; | ||
107 | #endif | ||
108 | } | ||
109 | |||
62 | #endif | 110 | #endif |