blob: d61b0b8b5cd19eeb907e34655d0ec64ed182733d (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
#ifndef IOCONTEXT_H
#define IOCONTEXT_H
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
/*
* This is the per-process anticipatory I/O scheduler state.
*/
struct as_io_context {
spinlock_t lock;
void (*dtor)(struct as_io_context *aic); /* destructor */
void (*exit)(struct as_io_context *aic); /* called on task exit */
unsigned long state;
atomic_t nr_queued; /* queued reads & sync writes */
atomic_t nr_dispatched; /* number of requests gone to the drivers */
/* IO History tracking */
/* Thinktime */
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
/* Layout pattern */
unsigned int seek_samples;
sector_t last_request_pos;
u64 seek_total;
sector_t seek_mean;
};
struct cfq_queue;
struct cfq_io_context {
void *key;
unsigned long dead_key;
struct cfq_queue *cfqq[2];
struct io_context *ioc;
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
struct list_head queue_list;
struct hlist_node cic_list;
void (*dtor)(struct io_context *); /* destructor */
void (*exit)(struct io_context *); /* called on task exit */
struct rcu_head rcu_head;
};
/*
* I/O subsystem state of the associated processes. It is refcounted
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
atomic_long_t refcount;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
spinlock_t lock;
unsigned short ioprio;
unsigned short ioprio_changed;
#ifdef CONFIG_BLK_CGROUP
unsigned short cgroup_changed;
#endif
/*
* For request batching
*/
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
struct as_io_context *aic;
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void *ioc_data;
};
static inline struct io_context *ioc_task_link(struct io_context *ioc)
{
/*
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
atomic_inc(&ioc->nr_tasks);
return ioc;
}
return NULL;
}
#ifdef CONFIG_BLOCK
int put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
#else
static inline void exit_io_context(void)
{
}
struct io_context;
static inline int put_io_context(struct io_context *ioc)
{
return 1;
}
#endif
#endif
|