diff options
author | Dan Williams <dan.j.williams@intel.com> | 2017-04-19 18:14:31 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2017-04-19 18:14:31 -0400 |
commit | 72058005411ffddcae6c06f7b691d635489132af (patch) | |
tree | a92ff7c7c6c968e2c7fc17a0b0e85b5fc3514af5 | |
parent | 7b6be8444e0f0dd675b54d059793423d3c9b4c03 (diff) |
dax: add a facility to lookup a dax device by 'host' device name
For the current block_device based filesystem-dax path, we need a way
for it to lookup the dax_device associated with a block_device. Add a
'host' property of a dax_device that can be used for this purpose. It is
a free form string, but for a dax_device associated with a block device
it is the bdev name.
This is a stop-gap until filesystems are able to mount on a dax-inode
directly.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dax/dax.h | 2 | ||||
-rw-r--r-- | drivers/dax/device.c | 2 | ||||
-rw-r--r-- | drivers/dax/super.c | 87 | ||||
-rw-r--r-- | include/linux/dax.h | 1 |
4 files changed, 86 insertions, 6 deletions
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h index 2472d9da96db..246a24d68d4c 100644 --- a/drivers/dax/dax.h +++ b/drivers/dax/dax.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #ifndef __DAX_H__ | 13 | #ifndef __DAX_H__ |
14 | #define __DAX_H__ | 14 | #define __DAX_H__ |
15 | struct dax_device; | 15 | struct dax_device; |
16 | struct dax_device *alloc_dax(void *private); | 16 | struct dax_device *alloc_dax(void *private, const char *host); |
17 | void put_dax(struct dax_device *dax_dev); | 17 | void put_dax(struct dax_device *dax_dev); |
18 | bool dax_alive(struct dax_device *dax_dev); | 18 | bool dax_alive(struct dax_device *dax_dev); |
19 | void kill_dax(struct dax_device *dax_dev); | 19 | void kill_dax(struct dax_device *dax_dev); |
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 19a42edbfa03..db68f4fa8ce0 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
@@ -645,7 +645,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, | |||
645 | goto err_id; | 645 | goto err_id; |
646 | } | 646 | } |
647 | 647 | ||
648 | dax_dev = alloc_dax(dev_dax); | 648 | dax_dev = alloc_dax(dev_dax, NULL); |
649 | if (!dax_dev) | 649 | if (!dax_dev) |
650 | goto err_dax; | 650 | goto err_dax; |
651 | 651 | ||
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index c9f85f1c086e..8d446674c1da 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
@@ -30,6 +30,10 @@ static DEFINE_IDA(dax_minor_ida); | |||
30 | static struct kmem_cache *dax_cache __read_mostly; | 30 | static struct kmem_cache *dax_cache __read_mostly; |
31 | static struct super_block *dax_superblock __read_mostly; | 31 | static struct super_block *dax_superblock __read_mostly; |
32 | 32 | ||
33 | #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) | ||
34 | static struct hlist_head dax_host_list[DAX_HASH_SIZE]; | ||
35 | static DEFINE_SPINLOCK(dax_host_lock); | ||
36 | |||
33 | int dax_read_lock(void) | 37 | int dax_read_lock(void) |
34 | { | 38 | { |
35 | return srcu_read_lock(&dax_srcu); | 39 | return srcu_read_lock(&dax_srcu); |
@@ -46,12 +50,15 @@ EXPORT_SYMBOL_GPL(dax_read_unlock); | |||
46 | * struct dax_device - anchor object for dax services | 50 | * struct dax_device - anchor object for dax services |
47 | * @inode: core vfs | 51 | * @inode: core vfs |
48 | * @cdev: optional character interface for "device dax" | 52 | * @cdev: optional character interface for "device dax" |
53 | * @host: optional name for lookups where the device path is not available | ||
49 | * @private: dax driver private data | 54 | * @private: dax driver private data |
50 | * @alive: !alive + rcu grace period == no new operations / mappings | 55 | * @alive: !alive + rcu grace period == no new operations / mappings |
51 | */ | 56 | */ |
52 | struct dax_device { | 57 | struct dax_device { |
58 | struct hlist_node list; | ||
53 | struct inode inode; | 59 | struct inode inode; |
54 | struct cdev cdev; | 60 | struct cdev cdev; |
61 | const char *host; | ||
55 | void *private; | 62 | void *private; |
56 | bool alive; | 63 | bool alive; |
57 | }; | 64 | }; |
@@ -63,6 +70,11 @@ bool dax_alive(struct dax_device *dax_dev) | |||
63 | } | 70 | } |
64 | EXPORT_SYMBOL_GPL(dax_alive); | 71 | EXPORT_SYMBOL_GPL(dax_alive); |
65 | 72 | ||
73 | static int dax_host_hash(const char *host) | ||
74 | { | ||
75 | return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; | ||
76 | } | ||
77 | |||
66 | /* | 78 | /* |
67 | * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring | 79 | * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring |
68 | * that any fault handlers or operations that might have seen | 80 | * that any fault handlers or operations that might have seen |
@@ -75,7 +87,13 @@ void kill_dax(struct dax_device *dax_dev) | |||
75 | return; | 87 | return; |
76 | 88 | ||
77 | dax_dev->alive = false; | 89 | dax_dev->alive = false; |
90 | |||
78 | synchronize_srcu(&dax_srcu); | 91 | synchronize_srcu(&dax_srcu); |
92 | |||
93 | spin_lock(&dax_host_lock); | ||
94 | hlist_del_init(&dax_dev->list); | ||
95 | spin_unlock(&dax_host_lock); | ||
96 | |||
79 | dax_dev->private = NULL; | 97 | dax_dev->private = NULL; |
80 | } | 98 | } |
81 | EXPORT_SYMBOL_GPL(kill_dax); | 99 | EXPORT_SYMBOL_GPL(kill_dax); |
@@ -98,6 +116,8 @@ static void dax_i_callback(struct rcu_head *head) | |||
98 | struct inode *inode = container_of(head, struct inode, i_rcu); | 116 | struct inode *inode = container_of(head, struct inode, i_rcu); |
99 | struct dax_device *dax_dev = to_dax_dev(inode); | 117 | struct dax_device *dax_dev = to_dax_dev(inode); |
100 | 118 | ||
119 | kfree(dax_dev->host); | ||
120 | dax_dev->host = NULL; | ||
101 | ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); | 121 | ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); |
102 | kmem_cache_free(dax_cache, dax_dev); | 122 | kmem_cache_free(dax_cache, dax_dev); |
103 | } | 123 | } |
@@ -169,26 +189,53 @@ static struct dax_device *dax_dev_get(dev_t devt) | |||
169 | return dax_dev; | 189 | return dax_dev; |
170 | } | 190 | } |
171 | 191 | ||
172 | struct dax_device *alloc_dax(void *private) | 192 | static void dax_add_host(struct dax_device *dax_dev, const char *host) |
193 | { | ||
194 | int hash; | ||
195 | |||
196 | /* | ||
197 | * Unconditionally init dax_dev since it's coming from a | ||
198 | * non-zeroed slab cache | ||
199 | */ | ||
200 | INIT_HLIST_NODE(&dax_dev->list); | ||
201 | dax_dev->host = host; | ||
202 | if (!host) | ||
203 | return; | ||
204 | |||
205 | hash = dax_host_hash(host); | ||
206 | spin_lock(&dax_host_lock); | ||
207 | hlist_add_head(&dax_dev->list, &dax_host_list[hash]); | ||
208 | spin_unlock(&dax_host_lock); | ||
209 | } | ||
210 | |||
211 | struct dax_device *alloc_dax(void *private, const char *__host) | ||
173 | { | 212 | { |
174 | struct dax_device *dax_dev; | 213 | struct dax_device *dax_dev; |
214 | const char *host; | ||
175 | dev_t devt; | 215 | dev_t devt; |
176 | int minor; | 216 | int minor; |
177 | 217 | ||
218 | host = kstrdup(__host, GFP_KERNEL); | ||
219 | if (__host && !host) | ||
220 | return NULL; | ||
221 | |||
178 | minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL); | 222 | minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL); |
179 | if (minor < 0) | 223 | if (minor < 0) |
180 | return NULL; | 224 | goto err_minor; |
181 | 225 | ||
182 | devt = MKDEV(MAJOR(dax_devt), minor); | 226 | devt = MKDEV(MAJOR(dax_devt), minor); |
183 | dax_dev = dax_dev_get(devt); | 227 | dax_dev = dax_dev_get(devt); |
184 | if (!dax_dev) | 228 | if (!dax_dev) |
185 | goto err_inode; | 229 | goto err_dev; |
186 | 230 | ||
231 | dax_add_host(dax_dev, host); | ||
187 | dax_dev->private = private; | 232 | dax_dev->private = private; |
188 | return dax_dev; | 233 | return dax_dev; |
189 | 234 | ||
190 | err_inode: | 235 | err_dev: |
191 | ida_simple_remove(&dax_minor_ida, minor); | 236 | ida_simple_remove(&dax_minor_ida, minor); |
237 | err_minor: | ||
238 | kfree(host); | ||
192 | return NULL; | 239 | return NULL; |
193 | } | 240 | } |
194 | EXPORT_SYMBOL_GPL(alloc_dax); | 241 | EXPORT_SYMBOL_GPL(alloc_dax); |
@@ -202,6 +249,38 @@ void put_dax(struct dax_device *dax_dev) | |||
202 | EXPORT_SYMBOL_GPL(put_dax); | 249 | EXPORT_SYMBOL_GPL(put_dax); |
203 | 250 | ||
204 | /** | 251 | /** |
252 | * dax_get_by_host() - temporary lookup mechanism for filesystem-dax | ||
253 | * @host: alternate name for the device registered by a dax driver | ||
254 | */ | ||
255 | struct dax_device *dax_get_by_host(const char *host) | ||
256 | { | ||
257 | struct dax_device *dax_dev, *found = NULL; | ||
258 | int hash, id; | ||
259 | |||
260 | if (!host) | ||
261 | return NULL; | ||
262 | |||
263 | hash = dax_host_hash(host); | ||
264 | |||
265 | id = dax_read_lock(); | ||
266 | spin_lock(&dax_host_lock); | ||
267 | hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { | ||
268 | if (!dax_alive(dax_dev) | ||
269 | || strcmp(host, dax_dev->host) != 0) | ||
270 | continue; | ||
271 | |||
272 | if (igrab(&dax_dev->inode)) | ||
273 | found = dax_dev; | ||
274 | break; | ||
275 | } | ||
276 | spin_unlock(&dax_host_lock); | ||
277 | dax_read_unlock(id); | ||
278 | |||
279 | return found; | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(dax_get_by_host); | ||
282 | |||
283 | /** | ||
205 | * inode_dax: convert a public inode into its dax_dev | 284 | * inode_dax: convert a public inode into its dax_dev |
206 | * @inode: An inode with i_cdev pointing to a dax_dev | 285 | * @inode: An inode with i_cdev pointing to a dax_dev |
207 | * | 286 | * |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 5b62f5d19aea..9b2d5ba10d7d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
@@ -10,6 +10,7 @@ struct iomap_ops; | |||
10 | 10 | ||
11 | int dax_read_lock(void); | 11 | int dax_read_lock(void); |
12 | void dax_read_unlock(int id); | 12 | void dax_read_unlock(int id); |
13 | struct dax_device *dax_get_by_host(const char *host); | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * We use lowest available bit in exceptional entry for locking, one bit for | 16 | * We use lowest available bit in exceptional entry for locking, one bit for |