summaryrefslogtreecommitdiffstats
path: root/drivers/dax
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-08-07 11:23:56 -0400
committerDan Williams <dan.j.williams@intel.com>2016-08-24 01:58:50 -0400
commit043a9255021bad498e31365d104d33915b6a6e33 (patch)
treec28069486e847bafc7b9ae4bd7486441be3da284 /drivers/dax
parentccdb07f62986968ecd687a71550ed187c8cf875c (diff)
dax: reorder dax_fops function definitions
In order to convert devm_create_dax_dev() to use cdev, it will need access to dax_fops. Move dax_fops and related function definitions before devm_create_dax_dev(). Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dax')
-rw-r--r--drivers/dax/dax.c337
1 files changed, 168 insertions, 169 deletions
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 736c03830fd0..3774fc9709bb 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -145,175 +145,6 @@ static const struct attribute_group *dax_attribute_groups[] = {
145 NULL, 145 NULL,
146}; 146};
147 147
148static void unregister_dax_dev(void *_dev)
149{
150 struct device *dev = _dev;
151 struct dax_dev *dax_dev = dev_get_drvdata(dev);
152 struct dax_region *dax_region = dax_dev->region;
153
154 dev_dbg(dev, "%s\n", __func__);
155
156 /*
157 * Note, rcu is not protecting the liveness of dax_dev, rcu is
158 * ensuring that any fault handlers that might have seen
159 * dax_dev->alive == true, have completed. Any fault handlers
160 * that start after synchronize_rcu() has started will abort
161 * upon seeing dax_dev->alive == false.
162 */
163 dax_dev->alive = false;
164 synchronize_rcu();
165
166 get_device(dev);
167 device_unregister(dev);
168 ida_simple_remove(&dax_region->ida, dax_dev->id);
169 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
170 put_device(dev);
171 dax_dev_put(dax_dev);
172}
173
174int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
175 int count)
176{
177 struct device *parent = dax_region->dev;
178 struct dax_dev *dax_dev;
179 struct device *dev;
180 int rc, minor;
181 dev_t dev_t;
182
183 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
184 if (!dax_dev)
185 return -ENOMEM;
186 memcpy(dax_dev->res, res, sizeof(*res) * count);
187 dax_dev->num_resources = count;
188 kref_init(&dax_dev->kref);
189 dax_dev->alive = true;
190 dax_dev->region = dax_region;
191 kref_get(&dax_region->kref);
192
193 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
194 if (dax_dev->id < 0) {
195 rc = dax_dev->id;
196 goto err_id;
197 }
198
199 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
200 if (minor < 0) {
201 rc = minor;
202 goto err_minor;
203 }
204
205 dev_t = MKDEV(dax_major, minor);
206 dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
207 dax_attribute_groups, "dax%d.%d", dax_region->id,
208 dax_dev->id);
209 if (IS_ERR(dev)) {
210 rc = PTR_ERR(dev);
211 goto err_create;
212 }
213 dax_dev->dev = dev;
214
215 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
216 if (rc)
217 return rc;
218
219 return 0;
220
221 err_create:
222 ida_simple_remove(&dax_minor_ida, minor);
223 err_minor:
224 ida_simple_remove(&dax_region->ida, dax_dev->id);
225 err_id:
226 dax_dev_put(dax_dev);
227
228 return rc;
229}
230EXPORT_SYMBOL_GPL(devm_create_dax_dev);
231
232/* return an unmapped area aligned to the dax region specified alignment */
233static unsigned long dax_dev_get_unmapped_area(struct file *filp,
234 unsigned long addr, unsigned long len, unsigned long pgoff,
235 unsigned long flags)
236{
237 unsigned long off, off_end, off_align, len_align, addr_align, align;
238 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
239 struct dax_region *dax_region;
240
241 if (!dax_dev || addr)
242 goto out;
243
244 dax_region = dax_dev->region;
245 align = dax_region->align;
246 off = pgoff << PAGE_SHIFT;
247 off_end = off + len;
248 off_align = round_up(off, align);
249
250 if ((off_end <= off_align) || ((off_end - off_align) < align))
251 goto out;
252
253 len_align = len + align;
254 if ((off + len_align) < off)
255 goto out;
256
257 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
258 pgoff, flags);
259 if (!IS_ERR_VALUE(addr_align)) {
260 addr_align += (off - addr_align) & (align - 1);
261 return addr_align;
262 }
263 out:
264 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
265}
266
267static int __match_devt(struct device *dev, const void *data)
268{
269 const dev_t *devt = data;
270
271 return dev->devt == *devt;
272}
273
274static struct device *dax_dev_find(dev_t dev_t)
275{
276 return class_find_device(dax_class, NULL, &dev_t, __match_devt);
277}
278
279static int dax_dev_open(struct inode *inode, struct file *filp)
280{
281 struct dax_dev *dax_dev = NULL;
282 struct device *dev;
283
284 dev = dax_dev_find(inode->i_rdev);
285 if (!dev)
286 return -ENXIO;
287
288 device_lock(dev);
289 dax_dev = dev_get_drvdata(dev);
290 if (dax_dev) {
291 dev_dbg(dev, "%s\n", __func__);
292 filp->private_data = dax_dev;
293 kref_get(&dax_dev->kref);
294 inode->i_flags = S_DAX;
295 }
296 device_unlock(dev);
297
298 if (!dax_dev) {
299 put_device(dev);
300 return -ENXIO;
301 }
302 return 0;
303}
304
305static int dax_dev_release(struct inode *inode, struct file *filp)
306{
307 struct dax_dev *dax_dev = filp->private_data;
308 struct device *dev = dax_dev->dev;
309
310 dev_dbg(dax_dev->dev, "%s\n", __func__);
311 dax_dev_put(dax_dev);
312 put_device(dev);
313
314 return 0;
315}
316
317static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma, 148static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
318 const char *func) 149 const char *func)
319{ 150{
@@ -531,7 +362,91 @@ static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
531 vma->vm_ops = &dax_dev_vm_ops; 362 vma->vm_ops = &dax_dev_vm_ops;
532 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 363 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
533 return 0; 364 return 0;
365}
366
367/* return an unmapped area aligned to the dax region specified alignment */
368static unsigned long dax_dev_get_unmapped_area(struct file *filp,
369 unsigned long addr, unsigned long len, unsigned long pgoff,
370 unsigned long flags)
371{
372 unsigned long off, off_end, off_align, len_align, addr_align, align;
373 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
374 struct dax_region *dax_region;
375
376 if (!dax_dev || addr)
377 goto out;
378
379 dax_region = dax_dev->region;
380 align = dax_region->align;
381 off = pgoff << PAGE_SHIFT;
382 off_end = off + len;
383 off_align = round_up(off, align);
384
385 if ((off_end <= off_align) || ((off_end - off_align) < align))
386 goto out;
387
388 len_align = len + align;
389 if ((off + len_align) < off)
390 goto out;
391
392 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
393 pgoff, flags);
394 if (!IS_ERR_VALUE(addr_align)) {
395 addr_align += (off - addr_align) & (align - 1);
396 return addr_align;
397 }
398 out:
399 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
400}
401
402static int __match_devt(struct device *dev, const void *data)
403{
404 const dev_t *devt = data;
405
406 return dev->devt == *devt;
407}
408
409static struct device *dax_dev_find(dev_t dev_t)
410{
411 return class_find_device(dax_class, NULL, &dev_t, __match_devt);
412}
413
414static int dax_dev_open(struct inode *inode, struct file *filp)
415{
416 struct dax_dev *dax_dev = NULL;
417 struct device *dev;
418
419 dev = dax_dev_find(inode->i_rdev);
420 if (!dev)
421 return -ENXIO;
422
423 device_lock(dev);
424 dax_dev = dev_get_drvdata(dev);
425 if (dax_dev) {
426 dev_dbg(dev, "%s\n", __func__);
427 filp->private_data = dax_dev;
428 kref_get(&dax_dev->kref);
429 inode->i_flags = S_DAX;
430 }
431 device_unlock(dev);
432
433 if (!dax_dev) {
434 put_device(dev);
435 return -ENXIO;
436 }
437 return 0;
438}
534 439
440static int dax_dev_release(struct inode *inode, struct file *filp)
441{
442 struct dax_dev *dax_dev = filp->private_data;
443 struct device *dev = dax_dev->dev;
444
445 dev_dbg(dax_dev->dev, "%s\n", __func__);
446 dax_dev_put(dax_dev);
447 put_device(dev);
448
449 return 0;
535} 450}
536 451
537static const struct file_operations dax_fops = { 452static const struct file_operations dax_fops = {
@@ -543,6 +458,90 @@ static const struct file_operations dax_fops = {
543 .mmap = dax_dev_mmap, 458 .mmap = dax_dev_mmap,
544}; 459};
545 460
461static void unregister_dax_dev(void *_dev)
462{
463 struct device *dev = _dev;
464 struct dax_dev *dax_dev = dev_get_drvdata(dev);
465 struct dax_region *dax_region = dax_dev->region;
466
467 dev_dbg(dev, "%s\n", __func__);
468
469 /*
470 * Note, rcu is not protecting the liveness of dax_dev, rcu is
471 * ensuring that any fault handlers that might have seen
472 * dax_dev->alive == true, have completed. Any fault handlers
473 * that start after synchronize_rcu() has started will abort
474 * upon seeing dax_dev->alive == false.
475 */
476 dax_dev->alive = false;
477 synchronize_rcu();
478
479 get_device(dev);
480 device_unregister(dev);
481 ida_simple_remove(&dax_region->ida, dax_dev->id);
482 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
483 put_device(dev);
484 dax_dev_put(dax_dev);
485}
486
487int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
488 int count)
489{
490 struct device *parent = dax_region->dev;
491 struct dax_dev *dax_dev;
492 struct device *dev;
493 int rc, minor;
494 dev_t dev_t;
495
496 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
497 if (!dax_dev)
498 return -ENOMEM;
499 memcpy(dax_dev->res, res, sizeof(*res) * count);
500 dax_dev->num_resources = count;
501 kref_init(&dax_dev->kref);
502 dax_dev->alive = true;
503 dax_dev->region = dax_region;
504 kref_get(&dax_region->kref);
505
506 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
507 if (dax_dev->id < 0) {
508 rc = dax_dev->id;
509 goto err_id;
510 }
511
512 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
513 if (minor < 0) {
514 rc = minor;
515 goto err_minor;
516 }
517
518 dev_t = MKDEV(dax_major, minor);
519 dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
520 dax_attribute_groups, "dax%d.%d", dax_region->id,
521 dax_dev->id);
522 if (IS_ERR(dev)) {
523 rc = PTR_ERR(dev);
524 goto err_create;
525 }
526 dax_dev->dev = dev;
527
528 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
529 if (rc)
530 return rc;
531
532 return 0;
533
534 err_create:
535 ida_simple_remove(&dax_minor_ida, minor);
536 err_minor:
537 ida_simple_remove(&dax_region->ida, dax_dev->id);
538 err_id:
539 dax_dev_put(dax_dev);
540
541 return rc;
542}
543EXPORT_SYMBOL_GPL(devm_create_dax_dev);
544
546static int __init dax_init(void) 545static int __init dax_init(void)
547{ 546{
548 int rc; 547 int rc;