aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk.h
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h48
1 files changed, 48 insertions, 0 deletions
diff --git a/block/blk.h b/block/blk.h
index c79f30e1df52..e5c579769963 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -17,6 +17,42 @@ void __blk_queue_free_tags(struct request_queue *q);
17 17
18void blk_unplug_work(struct work_struct *work); 18void blk_unplug_work(struct work_struct *work);
19void blk_unplug_timeout(unsigned long data); 19void blk_unplug_timeout(unsigned long data);
20void blk_rq_timed_out_timer(unsigned long data);
21void blk_delete_timer(struct request *);
22void blk_add_timer(struct request *);
23
24/*
25 * Internal atomic flags for request handling
26 */
27enum rq_atomic_flags {
28 REQ_ATOM_COMPLETE = 0,
29};
30
31/*
32 * EH timer and IO completion will both attempt to 'grab' the request, make
33 * sure that only one of them suceeds
34 */
35static inline int blk_mark_rq_complete(struct request *rq)
36{
37 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
38}
39
40static inline void blk_clear_rq_complete(struct request *rq)
41{
42 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
43}
44
45#ifdef CONFIG_FAIL_IO_TIMEOUT
46int blk_should_fake_timeout(struct request_queue *);
47ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
48ssize_t part_timeout_store(struct device *, struct device_attribute *,
49 const char *, size_t);
50#else
51static inline int blk_should_fake_timeout(struct request_queue *q)
52{
53 return 0;
54}
55#endif
20 56
21struct io_context *current_io_context(gfp_t gfp_flags, int node); 57struct io_context *current_io_context(gfp_t gfp_flags, int node);
22 58
@@ -59,4 +95,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
59 95
60#endif /* BLK_DEV_INTEGRITY */ 96#endif /* BLK_DEV_INTEGRITY */
61 97
98static inline int blk_cpu_to_group(int cpu)
99{
100#ifdef CONFIG_SCHED_MC
101 cpumask_t mask = cpu_coregroup_map(cpu);
102 return first_cpu(mask);
103#elif defined(CONFIG_SCHED_SMT)
104 return first_cpu(per_cpu(cpu_sibling_map, cpu));
105#else
106 return cpu;
107#endif
108}
109
62#endif 110#endif