aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cgroup_subsys.h6
-rw-r--r--include/linux/drbd.h5
-rw-r--r--include/linux/drbd_limits.h11
-rw-r--r--include/linux/idr.h10
-rw-r--r--include/linux/lru_cache.h1
-rw-r--r--include/linux/rwsem.h10
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/trace/events/bcache.h271
8 files changed, 306 insertions, 12 deletions
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index f204a7a9cf38..6e7ec64b69ab 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -78,3 +78,9 @@ SUBSYS(hugetlb)
78#endif 78#endif
79 79
80/* */ 80/* */
81
82#ifdef CONFIG_CGROUP_BCACHE
83SUBSYS(bcache)
84#endif
85
86/* */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 0c5a18ec322c..1b4d4ee1168f 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -52,7 +52,7 @@
52#endif 52#endif
53 53
54extern const char *drbd_buildtag(void); 54extern const char *drbd_buildtag(void);
55#define REL_VERSION "8.4.2" 55#define REL_VERSION "8.4.3"
56#define API_VERSION 1 56#define API_VERSION 1
57#define PRO_VERSION_MIN 86 57#define PRO_VERSION_MIN 86
58#define PRO_VERSION_MAX 101 58#define PRO_VERSION_MAX 101
@@ -319,7 +319,8 @@ enum drbd_state_rv {
319 SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */ 319 SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */
320 SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */ 320 SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */
321 SS_O_VOL_PEER_PRI = -20, 321 SS_O_VOL_PEER_PRI = -20,
322 SS_AFTER_LAST_ERROR = -21, /* Keep this at bottom */ 322 SS_OUTDATE_WO_CONN = -21,
323 SS_AFTER_LAST_ERROR = -22, /* Keep this at bottom */
323}; 324};
324 325
325/* from drbd_strings.c */ 326/* from drbd_strings.c */
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fa19c5f5e64..1fedf2b17cc8 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -126,13 +126,12 @@
126#define DRBD_RESYNC_RATE_DEF 250 126#define DRBD_RESYNC_RATE_DEF 250
127#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ 127#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
128 128
129 /* less than 7 would hit performance unnecessarily. 129 /* less than 7 would hit performance unnecessarily. */
130 * 919 slots context information per transaction,
131 * 32k activity log, 4k transaction size,
132 * one transaction in flight:
133 * 919 * 7 = 6433 */
134#define DRBD_AL_EXTENTS_MIN 7 130#define DRBD_AL_EXTENTS_MIN 7
135#define DRBD_AL_EXTENTS_MAX 6433 131 /* we use u16 as "slot number", (u16)~0 is "FREE".
132 * If you use >= 292 kB on-disk ring buffer,
133 * this is the maximum you can use: */
134#define DRBD_AL_EXTENTS_MAX 0xfffe
136#define DRBD_AL_EXTENTS_DEF 1237 135#define DRBD_AL_EXTENTS_DEF 1237
137#define DRBD_AL_EXTENTS_SCALE '1' 136#define DRBD_AL_EXTENTS_SCALE '1'
138 137
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a470ac3ef49d..871a213a8477 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -124,11 +124,13 @@ static inline void *idr_find(struct idr *idr, int id)
124 * @idp: idr handle 124 * @idp: idr handle
125 * @entry: the type * to use as cursor 125 * @entry: the type * to use as cursor
126 * @id: id entry's key 126 * @id: id entry's key
127 *
128 * @entry and @id do not need to be initialized before the loop, and
129 * after normal terminatinon @entry is left with the value NULL. This
130 * is convenient for a "not found" value.
127 */ 131 */
128#define idr_for_each_entry(idp, entry, id) \ 132#define idr_for_each_entry(idp, entry, id) \
129 for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \ 133 for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
130 entry != NULL; \
131 ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
132 134
133/* 135/*
134 * Don't use the following functions. These exist only to suppress 136 * Don't use the following functions. These exist only to suppress
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 4019013c6593..46262284de47 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -256,6 +256,7 @@ extern void lc_destroy(struct lru_cache *lc);
256extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); 256extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
257extern void lc_del(struct lru_cache *lc, struct lc_element *element); 257extern void lc_del(struct lru_cache *lc, struct lc_element *element);
258 258
259extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
259extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); 260extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
260extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); 261extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
261extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); 262extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8da67d625e13..0616ffe45702 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -133,10 +133,20 @@ do { \
133 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ 133 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
134} while (0); 134} while (0);
135 135
136/*
137 * Take/release a lock when not the owner will release it.
138 *
139 * [ This API should be avoided as much as possible - the
140 * proper abstraction for this case is completions. ]
141 */
142extern void down_read_non_owner(struct rw_semaphore *sem);
143extern void up_read_non_owner(struct rw_semaphore *sem);
136#else 144#else
137# define down_read_nested(sem, subclass) down_read(sem) 145# define down_read_nested(sem, subclass) down_read(sem)
138# define down_write_nest_lock(sem, nest_lock) down_write(sem) 146# define down_write_nest_lock(sem, nest_lock) down_write(sem)
139# define down_write_nested(sem, subclass) down_write(sem) 147# define down_write_nested(sem, subclass) down_write(sem)
148# define down_read_non_owner(sem) down_read(sem)
149# define up_read_non_owner(sem) up_read(sem)
140#endif 150#endif
141 151
142#endif /* _LINUX_RWSEM_H */ 152#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 022c085ac3c5..caa8f4d0186b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1411,6 +1411,10 @@ struct task_struct {
1411#ifdef CONFIG_UPROBES 1411#ifdef CONFIG_UPROBES
1412 struct uprobe_task *utask; 1412 struct uprobe_task *utask;
1413#endif 1413#endif
1414#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1415 unsigned int sequential_io;
1416 unsigned int sequential_io_avg;
1417#endif
1414}; 1418};
1415 1419
1416/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1420/* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
new file mode 100644
index 000000000000..3cc5a0b278c3
--- /dev/null
+++ b/include/trace/events/bcache.h
@@ -0,0 +1,271 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM bcache
3
4#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BCACHE_H
6
7#include <linux/tracepoint.h>
8
9struct search;
10
11DECLARE_EVENT_CLASS(bcache_request,
12
13 TP_PROTO(struct search *s, struct bio *bio),
14
15 TP_ARGS(s, bio),
16
17 TP_STRUCT__entry(
18 __field(dev_t, dev )
19 __field(unsigned int, orig_major )
20 __field(unsigned int, orig_minor )
21 __field(sector_t, sector )
22 __field(dev_t, orig_sector )
23 __field(unsigned int, nr_sector )
24 __array(char, rwbs, 6 )
25 __array(char, comm, TASK_COMM_LEN )
26 ),
27
28 TP_fast_assign(
29 __entry->dev = bio->bi_bdev->bd_dev;
30 __entry->orig_major = s->d->disk->major;
31 __entry->orig_minor = s->d->disk->first_minor;
32 __entry->sector = bio->bi_sector;
33 __entry->orig_sector = bio->bi_sector - 16;
34 __entry->nr_sector = bio->bi_size >> 9;
35 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
36 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
37 ),
38
39 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)",
40 MAJOR(__entry->dev), MINOR(__entry->dev),
41 __entry->rwbs,
42 (unsigned long long)__entry->sector,
43 __entry->nr_sector, __entry->comm,
44 __entry->orig_major, __entry->orig_minor,
45 (unsigned long long)__entry->orig_sector)
46);
47
48DEFINE_EVENT(bcache_request, bcache_request_start,
49
50 TP_PROTO(struct search *s, struct bio *bio),
51
52 TP_ARGS(s, bio)
53);
54
55DEFINE_EVENT(bcache_request, bcache_request_end,
56
57 TP_PROTO(struct search *s, struct bio *bio),
58
59 TP_ARGS(s, bio)
60);
61
62DECLARE_EVENT_CLASS(bcache_bio,
63
64 TP_PROTO(struct bio *bio),
65
66 TP_ARGS(bio),
67
68 TP_STRUCT__entry(
69 __field(dev_t, dev )
70 __field(sector_t, sector )
71 __field(unsigned int, nr_sector )
72 __array(char, rwbs, 6 )
73 __array(char, comm, TASK_COMM_LEN )
74 ),
75
76 TP_fast_assign(
77 __entry->dev = bio->bi_bdev->bd_dev;
78 __entry->sector = bio->bi_sector;
79 __entry->nr_sector = bio->bi_size >> 9;
80 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
81 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
82 ),
83
84 TP_printk("%d,%d %s %llu + %u [%s]",
85 MAJOR(__entry->dev), MINOR(__entry->dev),
86 __entry->rwbs,
87 (unsigned long long)__entry->sector,
88 __entry->nr_sector, __entry->comm)
89);
90
91
92DEFINE_EVENT(bcache_bio, bcache_passthrough,
93
94 TP_PROTO(struct bio *bio),
95
96 TP_ARGS(bio)
97);
98
99DEFINE_EVENT(bcache_bio, bcache_cache_hit,
100
101 TP_PROTO(struct bio *bio),
102
103 TP_ARGS(bio)
104);
105
106DEFINE_EVENT(bcache_bio, bcache_cache_miss,
107
108 TP_PROTO(struct bio *bio),
109
110 TP_ARGS(bio)
111);
112
113DEFINE_EVENT(bcache_bio, bcache_read_retry,
114
115 TP_PROTO(struct bio *bio),
116
117 TP_ARGS(bio)
118);
119
120DEFINE_EVENT(bcache_bio, bcache_writethrough,
121
122 TP_PROTO(struct bio *bio),
123
124 TP_ARGS(bio)
125);
126
127DEFINE_EVENT(bcache_bio, bcache_writeback,
128
129 TP_PROTO(struct bio *bio),
130
131 TP_ARGS(bio)
132);
133
134DEFINE_EVENT(bcache_bio, bcache_write_skip,
135
136 TP_PROTO(struct bio *bio),
137
138 TP_ARGS(bio)
139);
140
141DEFINE_EVENT(bcache_bio, bcache_btree_read,
142
143 TP_PROTO(struct bio *bio),
144
145 TP_ARGS(bio)
146);
147
148DEFINE_EVENT(bcache_bio, bcache_btree_write,
149
150 TP_PROTO(struct bio *bio),
151
152 TP_ARGS(bio)
153);
154
155DEFINE_EVENT(bcache_bio, bcache_write_dirty,
156
157 TP_PROTO(struct bio *bio),
158
159 TP_ARGS(bio)
160);
161
162DEFINE_EVENT(bcache_bio, bcache_read_dirty,
163
164 TP_PROTO(struct bio *bio),
165
166 TP_ARGS(bio)
167);
168
169DEFINE_EVENT(bcache_bio, bcache_write_moving,
170
171 TP_PROTO(struct bio *bio),
172
173 TP_ARGS(bio)
174);
175
176DEFINE_EVENT(bcache_bio, bcache_read_moving,
177
178 TP_PROTO(struct bio *bio),
179
180 TP_ARGS(bio)
181);
182
183DEFINE_EVENT(bcache_bio, bcache_journal_write,
184
185 TP_PROTO(struct bio *bio),
186
187 TP_ARGS(bio)
188);
189
190DECLARE_EVENT_CLASS(bcache_cache_bio,
191
192 TP_PROTO(struct bio *bio,
193 sector_t orig_sector,
194 struct block_device* orig_bdev),
195
196 TP_ARGS(bio, orig_sector, orig_bdev),
197
198 TP_STRUCT__entry(
199 __field(dev_t, dev )
200 __field(dev_t, orig_dev )
201 __field(sector_t, sector )
202 __field(sector_t, orig_sector )
203 __field(unsigned int, nr_sector )
204 __array(char, rwbs, 6 )
205 __array(char, comm, TASK_COMM_LEN )
206 ),
207
208 TP_fast_assign(
209 __entry->dev = bio->bi_bdev->bd_dev;
210 __entry->orig_dev = orig_bdev->bd_dev;
211 __entry->sector = bio->bi_sector;
212 __entry->orig_sector = orig_sector;
213 __entry->nr_sector = bio->bi_size >> 9;
214 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
215 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
216 ),
217
218 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)",
219 MAJOR(__entry->dev), MINOR(__entry->dev),
220 __entry->rwbs,
221 (unsigned long long)__entry->sector,
222 __entry->nr_sector, __entry->comm,
223 MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
224 (unsigned long long)__entry->orig_sector)
225);
226
227DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert,
228
229 TP_PROTO(struct bio *bio,
230 sector_t orig_sector,
231 struct block_device *orig_bdev),
232
233 TP_ARGS(bio, orig_sector, orig_bdev)
234);
235
236DECLARE_EVENT_CLASS(bcache_gc,
237
238 TP_PROTO(uint8_t *uuid),
239
240 TP_ARGS(uuid),
241
242 TP_STRUCT__entry(
243 __field(uint8_t *, uuid)
244 ),
245
246 TP_fast_assign(
247 __entry->uuid = uuid;
248 ),
249
250 TP_printk("%pU", __entry->uuid)
251);
252
253
254DEFINE_EVENT(bcache_gc, bcache_gc_start,
255
256 TP_PROTO(uint8_t *uuid),
257
258 TP_ARGS(uuid)
259);
260
261DEFINE_EVENT(bcache_gc, bcache_gc_end,
262
263 TP_PROTO(uint8_t *uuid),
264
265 TP_ARGS(uuid)
266);
267
268#endif /* _TRACE_BCACHE_H */
269
270/* This part must be outside protection */
271#include <trace/define_trace.h>