aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-01-23 07:42:58 -0500
committerKent Overstreet <kmo@daterainc.com>2014-03-18 15:22:35 -0400
commit3f5e0a34daed197aa55d0c6b466bb4cd03babb4f (patch)
treeb272bd69976053f6ce802c896ec0a14431ee0bed /drivers/md
parent3f6ef38110b6955327fea3105f004a3b61a3f65f (diff)
bcache: Kill dead cgroup code
This hasn't been used or even enabled in ages. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/Kconfig8
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/request.c169
-rw-r--r--drivers/md/bcache/request.h18
-rw-r--r--drivers/md/bcache/stats.c3
5 files changed, 0 insertions, 202 deletions
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 2638417b19aa..4d200883c505 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -24,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG
24 Keeps all active closures in a linked list and provides a debugfs 24 Keeps all active closures in a linked list and provides a debugfs
25 interface to list them, which makes it possible to see asynchronous 25 interface to list them, which makes it possible to see asynchronous
26 operations that get stuck. 26 operations that get stuck.
27
28# cgroup code needs to be updated:
29#
30#config CGROUP_BCACHE
31# bool "Cgroup controls for bcache"
32# depends on BCACHE && BLK_CGROUP
33# ---help---
34# TODO
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5f587ce57e3a..ea5a59e2d740 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -68,15 +68,11 @@
68 * alloc_bucket() cannot fail. This should be true but is not completely 68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious. 69 * obvious.
70 * 70 *
71 * Make sure all allocations get charged to the root cgroup
72 *
73 * Plugging? 71 * Plugging?
74 * 72 *
75 * If data write is less than hard sector size of ssd, round up offset in open 73 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector 74 * bucket to the next whole sector
77 * 75 *
78 * Also lookup by cgroup in get_open_bucket()
79 *
80 * Superblock needs to be fleshed out for multiple cache devices 76 * Superblock needs to be fleshed out for multiple cache devices
81 * 77 *
82 * Add a sysfs tunable for the number of writeback IOs in flight 78 * Add a sysfs tunable for the number of writeback IOs in flight
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 3e880869871f..15fff4f68a7c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -12,11 +12,9 @@
12#include "request.h" 12#include "request.h"
13#include "writeback.h" 13#include "writeback.h"
14 14
15#include <linux/cgroup.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/hash.h> 16#include <linux/hash.h>
18#include <linux/random.h> 17#include <linux/random.h>
19#include "blk-cgroup.h"
20 18
21#include <trace/events/bcache.h> 19#include <trace/events/bcache.h>
22 20
@@ -27,171 +25,13 @@ struct kmem_cache *bch_search_cache;
27 25
28static void bch_data_insert_start(struct closure *); 26static void bch_data_insert_start(struct closure *);
29 27
30/* Cgroup interface */
31
32#ifdef CONFIG_CGROUP_BCACHE
33static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
34
35static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
36{
37 struct cgroup_subsys_state *css;
38 return cgroup &&
39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
40 ? container_of(css, struct bch_cgroup, css)
41 : &bcache_default_cgroup;
42}
43
44struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
45{
46 struct cgroup_subsys_state *css = bio->bi_css
47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
48 : task_subsys_state(current, bcache_subsys_id);
49
50 return css
51 ? container_of(css, struct bch_cgroup, css)
52 : &bcache_default_cgroup;
53}
54
55static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
56 struct file *file,
57 char __user *buf, size_t nbytes, loff_t *ppos)
58{
59 char tmp[1024];
60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
61 cgroup_to_bcache(cgrp)->cache_mode + 1);
62
63 if (len < 0)
64 return len;
65
66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
67}
68
69static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
70 const char *buf)
71{
72 int v = bch_read_string_list(buf, bch_cache_modes);
73 if (v < 0)
74 return v;
75
76 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
77 return 0;
78}
79
80static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
81{
82 return cgroup_to_bcache(cgrp)->verify;
83}
84
85static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
86{
87 cgroup_to_bcache(cgrp)->verify = val;
88 return 0;
89}
90
91static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
92{
93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
94 return atomic_read(&bcachecg->stats.cache_hits);
95}
96
97static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
98{
99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
100 return atomic_read(&bcachecg->stats.cache_misses);
101}
102
103static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
104 struct cftype *cft)
105{
106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
107 return atomic_read(&bcachecg->stats.cache_bypass_hits);
108}
109
110static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
111 struct cftype *cft)
112{
113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
114 return atomic_read(&bcachecg->stats.cache_bypass_misses);
115}
116
117static struct cftype bch_files[] = {
118 {
119 .name = "cache_mode",
120 .read = cache_mode_read,
121 .write_string = cache_mode_write,
122 },
123 {
124 .name = "verify",
125 .read_u64 = bch_verify_read,
126 .write_u64 = bch_verify_write,
127 },
128 {
129 .name = "cache_hits",
130 .read_u64 = bch_cache_hits_read,
131 },
132 {
133 .name = "cache_misses",
134 .read_u64 = bch_cache_misses_read,
135 },
136 {
137 .name = "cache_bypass_hits",
138 .read_u64 = bch_cache_bypass_hits_read,
139 },
140 {
141 .name = "cache_bypass_misses",
142 .read_u64 = bch_cache_bypass_misses_read,
143 },
144 { } /* terminate */
145};
146
147static void init_bch_cgroup(struct bch_cgroup *cg)
148{
149 cg->cache_mode = -1;
150}
151
152static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
153{
154 struct bch_cgroup *cg;
155
156 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
157 if (!cg)
158 return ERR_PTR(-ENOMEM);
159 init_bch_cgroup(cg);
160 return &cg->css;
161}
162
163static void bcachecg_destroy(struct cgroup *cgroup)
164{
165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
166 kfree(cg);
167}
168
169struct cgroup_subsys bcache_subsys = {
170 .create = bcachecg_create,
171 .destroy = bcachecg_destroy,
172 .subsys_id = bcache_subsys_id,
173 .name = "bcache",
174 .module = THIS_MODULE,
175};
176EXPORT_SYMBOL_GPL(bcache_subsys);
177#endif
178
179static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) 28static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
180{ 29{
181#ifdef CONFIG_CGROUP_BCACHE
182 int r = bch_bio_to_cgroup(bio)->cache_mode;
183 if (r >= 0)
184 return r;
185#endif
186 return BDEV_CACHE_MODE(&dc->sb); 30 return BDEV_CACHE_MODE(&dc->sb);
187} 31}
188 32
189static bool verify(struct cached_dev *dc, struct bio *bio) 33static bool verify(struct cached_dev *dc, struct bio *bio)
190{ 34{
191#ifdef CONFIG_CGROUP_BCACHE
192 if (bch_bio_to_cgroup(bio)->verify)
193 return true;
194#endif
195 return dc->verify; 35 return dc->verify;
196} 36}
197 37
@@ -1305,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
1305 1145
1306void bch_request_exit(void) 1146void bch_request_exit(void)
1307{ 1147{
1308#ifdef CONFIG_CGROUP_BCACHE
1309 cgroup_unload_subsys(&bcache_subsys);
1310#endif
1311 if (bch_search_cache) 1148 if (bch_search_cache)
1312 kmem_cache_destroy(bch_search_cache); 1149 kmem_cache_destroy(bch_search_cache);
1313} 1150}
@@ -1318,11 +1155,5 @@ int __init bch_request_init(void)
1318 if (!bch_search_cache) 1155 if (!bch_search_cache)
1319 return -ENOMEM; 1156 return -ENOMEM;
1320 1157
1321#ifdef CONFIG_CGROUP_BCACHE
1322 cgroup_load_subsys(&bcache_subsys);
1323 init_bch_cgroup(&bcache_default_cgroup);
1324
1325 cgroup_add_cftypes(&bcache_subsys, bch_files);
1326#endif
1327 return 0; 1158 return 0;
1328} 1159}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index c117c4082aa2..1ff36875c2b3 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -1,8 +1,6 @@
1#ifndef _BCACHE_REQUEST_H_ 1#ifndef _BCACHE_REQUEST_H_
2#define _BCACHE_REQUEST_H_ 2#define _BCACHE_REQUEST_H_
3 3
4#include <linux/cgroup.h>
5
6struct data_insert_op { 4struct data_insert_op {
7 struct closure cl; 5 struct closure cl;
8 struct cache_set *c; 6 struct cache_set *c;
@@ -42,20 +40,4 @@ void bch_flash_dev_request_init(struct bcache_device *d);
42 40
43extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; 41extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
44 42
45struct bch_cgroup {
46#ifdef CONFIG_CGROUP_BCACHE
47 struct cgroup_subsys_state css;
48#endif
49 /*
50 * We subtract one from the index into bch_cache_modes[], so that
51 * default == -1; this makes it so the rest match up with d->cache_mode,
52 * and we use d->cache_mode if cgrp->cache_mode < 0
53 */
54 short cache_mode;
55 bool verify;
56 struct cache_stat_collector stats;
57};
58
59struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
60
61#endif /* _BCACHE_REQUEST_H_ */ 43#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 84d0782f702e..0ca072c20d0d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -201,9 +201,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
201 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 201 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
202 mark_cache_stats(&dc->accounting.collector, hit, bypass); 202 mark_cache_stats(&dc->accounting.collector, hit, bypass);
203 mark_cache_stats(&c->accounting.collector, hit, bypass); 203 mark_cache_stats(&c->accounting.collector, hit, bypass);
204#ifdef CONFIG_CGROUP_BCACHE
205 mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
206#endif
207} 204}
208 205
209void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) 206void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)