aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvmem
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-07-27 07:13:34 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-08-05 16:43:12 -0400
commit69aba7948cbe53f2f1827e84e9dd0ae470a5072e (patch)
treefddabe4f391050c614033852141d3da9993e2b03 /drivers/nvmem
parenteace75cfdcf7d9937d8c1fb226780123c64d72c4 (diff)
nvmem: Add a simple NVMEM framework for consumers
This patch adds just consumers part of the framework just to enable easy review. Up until now, nvmem drivers were stored in drivers/misc, where they all had to duplicate pretty much the same code to register a sysfs file, allow in-kernel users to access the content of the devices they were driving, etc. This was also a problem as far as other in-kernel users were involved, since the solutions used were pretty much different from on driver to another, there was a rather big abstraction leak. This introduction of this framework aims at solving this. It also introduces DT representation for consumer devices to go get the data they require (MAC Addresses, SoC/Revision ID, part numbers, and so on) from the nvmems. Having regmap interface to this framework would give much better abstraction for nvmems on different buses. Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com> [Maxime Ripard: intial version of the framework] Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Tested-by: Stefan Wahren <stefan.wahren@i2se.com> Tested-by: Philipp Zabel <p.zabel@pengutronix.de> Tested-by: Rajendra Nayak <rnayak@codeaurora.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/nvmem')
-rw-r--r--drivers/nvmem/core.c421
1 files changed, 420 insertions, 1 deletions
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 2b024915e224..8c16ae2e1308 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -377,8 +377,12 @@ EXPORT_SYMBOL_GPL(nvmem_register);
377 */ 377 */
378int nvmem_unregister(struct nvmem_device *nvmem) 378int nvmem_unregister(struct nvmem_device *nvmem)
379{ 379{
380 if (nvmem->users) 380 mutex_lock(&nvmem_mutex);
381 if (nvmem->users) {
382 mutex_unlock(&nvmem_mutex);
381 return -EBUSY; 383 return -EBUSY;
384 }
385 mutex_unlock(&nvmem_mutex);
382 386
383 nvmem_device_remove_all_cells(nvmem); 387 nvmem_device_remove_all_cells(nvmem);
384 device_del(&nvmem->dev); 388 device_del(&nvmem->dev);
@@ -387,6 +391,421 @@ int nvmem_unregister(struct nvmem_device *nvmem)
387} 391}
388EXPORT_SYMBOL_GPL(nvmem_unregister); 392EXPORT_SYMBOL_GPL(nvmem_unregister);
389 393
394static struct nvmem_device *__nvmem_device_get(struct device_node *np,
395 struct nvmem_cell **cellp,
396 const char *cell_id)
397{
398 struct nvmem_device *nvmem = NULL;
399
400 mutex_lock(&nvmem_mutex);
401
402 if (np) {
403 nvmem = of_nvmem_find(np);
404 if (!nvmem) {
405 mutex_unlock(&nvmem_mutex);
406 return ERR_PTR(-EPROBE_DEFER);
407 }
408 } else {
409 struct nvmem_cell *cell = nvmem_find_cell(cell_id);
410
411 if (cell) {
412 nvmem = cell->nvmem;
413 *cellp = cell;
414 }
415
416 if (!nvmem) {
417 mutex_unlock(&nvmem_mutex);
418 return ERR_PTR(-ENOENT);
419 }
420 }
421
422 nvmem->users++;
423 mutex_unlock(&nvmem_mutex);
424
425 if (!try_module_get(nvmem->owner)) {
426 dev_err(&nvmem->dev,
427 "could not increase module refcount for cell %s\n",
428 nvmem->name);
429
430 mutex_lock(&nvmem_mutex);
431 nvmem->users--;
432 mutex_unlock(&nvmem_mutex);
433
434 return ERR_PTR(-EINVAL);
435 }
436
437 return nvmem;
438}
439
440static void __nvmem_device_put(struct nvmem_device *nvmem)
441{
442 module_put(nvmem->owner);
443 mutex_lock(&nvmem_mutex);
444 nvmem->users--;
445 mutex_unlock(&nvmem_mutex);
446}
447
448static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
449{
450 struct nvmem_cell *cell = NULL;
451 struct nvmem_device *nvmem;
452
453 nvmem = __nvmem_device_get(NULL, &cell, cell_id);
454 if (IS_ERR(nvmem))
455 return ERR_CAST(nvmem);
456
457 return cell;
458}
459
460#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
461/**
462 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
463 *
464 * @dev node: Device tree node that uses the nvmem cell
465 * @id: nvmem cell name from nvmem-cell-names property.
466 *
467 * Return: Will be an ERR_PTR() on error or a valid pointer
468 * to a struct nvmem_cell. The nvmem_cell will be freed by the
469 * nvmem_cell_put().
470 */
471struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
472 const char *name)
473{
474 struct device_node *cell_np, *nvmem_np;
475 struct nvmem_cell *cell;
476 struct nvmem_device *nvmem;
477 const __be32 *addr;
478 int rval, len, index;
479
480 index = of_property_match_string(np, "nvmem-cell-names", name);
481
482 cell_np = of_parse_phandle(np, "nvmem-cells", index);
483 if (!cell_np)
484 return ERR_PTR(-EINVAL);
485
486 nvmem_np = of_get_next_parent(cell_np);
487 if (!nvmem_np)
488 return ERR_PTR(-EINVAL);
489
490 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
491 if (IS_ERR(nvmem))
492 return ERR_CAST(nvmem);
493
494 addr = of_get_property(cell_np, "reg", &len);
495 if (!addr || (len < 2 * sizeof(u32))) {
496 dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
497 cell_np->full_name);
498 rval = -EINVAL;
499 goto err_mem;
500 }
501
502 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
503 if (!cell) {
504 rval = -ENOMEM;
505 goto err_mem;
506 }
507
508 cell->nvmem = nvmem;
509 cell->offset = be32_to_cpup(addr++);
510 cell->bytes = be32_to_cpup(addr);
511 cell->name = cell_np->name;
512
513 addr = of_get_property(cell_np, "bits", &len);
514 if (addr && len == (2 * sizeof(u32))) {
515 cell->bit_offset = be32_to_cpup(addr++);
516 cell->nbits = be32_to_cpup(addr);
517 }
518
519 if (cell->nbits)
520 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
521 BITS_PER_BYTE);
522
523 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
524 dev_err(&nvmem->dev,
525 "cell %s unaligned to nvmem stride %d\n",
526 cell->name, nvmem->stride);
527 rval = -EINVAL;
528 goto err_sanity;
529 }
530
531 nvmem_cell_add(cell);
532
533 return cell;
534
535err_sanity:
536 kfree(cell);
537
538err_mem:
539 __nvmem_device_put(nvmem);
540
541 return ERR_PTR(rval);
542}
543EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
544#endif
545
546/**
547 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
548 *
549 * @dev node: Device tree node that uses the nvmem cell
550 * @id: nvmem cell name to get.
551 *
552 * Return: Will be an ERR_PTR() on error or a valid pointer
553 * to a struct nvmem_cell. The nvmem_cell will be freed by the
554 * nvmem_cell_put().
555 */
556struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
557{
558 struct nvmem_cell *cell;
559
560 if (dev->of_node) { /* try dt first */
561 cell = of_nvmem_cell_get(dev->of_node, cell_id);
562 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
563 return cell;
564 }
565
566 return nvmem_cell_get_from_list(cell_id);
567}
568EXPORT_SYMBOL_GPL(nvmem_cell_get);
569
570static void devm_nvmem_cell_release(struct device *dev, void *res)
571{
572 nvmem_cell_put(*(struct nvmem_cell **)res);
573}
574
575/**
576 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
577 *
578 * @dev node: Device tree node that uses the nvmem cell
579 * @id: nvmem id in nvmem-names property.
580 *
581 * Return: Will be an ERR_PTR() on error or a valid pointer
582 * to a struct nvmem_cell. The nvmem_cell will be freed by the
583 * automatically once the device is freed.
584 */
585struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
586{
587 struct nvmem_cell **ptr, *cell;
588
589 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
590 if (!ptr)
591 return ERR_PTR(-ENOMEM);
592
593 cell = nvmem_cell_get(dev, id);
594 if (!IS_ERR(cell)) {
595 *ptr = cell;
596 devres_add(dev, ptr);
597 } else {
598 devres_free(ptr);
599 }
600
601 return cell;
602}
603EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
604
605static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
606{
607 struct nvmem_cell **c = res;
608
609 if (WARN_ON(!c || !*c))
610 return 0;
611
612 return *c == data;
613}
614
615/**
616 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
617 * from devm_nvmem_cell_get.
618 *
619 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
620 */
621void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
622{
623 int ret;
624
625 ret = devres_release(dev, devm_nvmem_cell_release,
626 devm_nvmem_cell_match, cell);
627
628 WARN_ON(ret);
629}
630EXPORT_SYMBOL(devm_nvmem_cell_put);
631
632/**
633 * nvmem_cell_put() - Release previously allocated nvmem cell.
634 *
635 * @cell: Previously allocated nvmem cell by nvmem_cell_get()
636 */
637void nvmem_cell_put(struct nvmem_cell *cell)
638{
639 struct nvmem_device *nvmem = cell->nvmem;
640
641 __nvmem_device_put(nvmem);
642 nvmem_cell_drop(cell);
643}
644EXPORT_SYMBOL_GPL(nvmem_cell_put);
645
646static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
647 void *buf)
648{
649 u8 *p, *b;
650 int i, bit_offset = cell->bit_offset;
651
652 p = b = buf;
653 if (bit_offset) {
654 /* First shift */
655 *b++ >>= bit_offset;
656
657 /* setup rest of the bytes if any */
658 for (i = 1; i < cell->bytes; i++) {
659 /* Get bits from next byte and shift them towards msb */
660 *p |= *b << (BITS_PER_BYTE - bit_offset);
661
662 p = b;
663 *b++ >>= bit_offset;
664 }
665
666 /* result fits in less bytes */
667 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
668 *p-- = 0;
669 }
670 /* clear msb bits if any leftover in the last byte */
671 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
672}
673
674static int __nvmem_cell_read(struct nvmem_device *nvmem,
675 struct nvmem_cell *cell,
676 void *buf, size_t *len)
677{
678 int rc;
679
680 rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
681
682 if (IS_ERR_VALUE(rc))
683 return rc;
684
685 /* shift bits in-place */
686 if (cell->bit_offset || cell->bit_offset)
687 nvmem_shift_read_buffer_in_place(cell, buf);
688
689 *len = cell->bytes;
690
691 return 0;
692}
693
694/**
695 * nvmem_cell_read() - Read a given nvmem cell
696 *
697 * @cell: nvmem cell to be read.
698 * @len: pointer to length of cell which will be populated on successful read.
699 *
700 * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
701 * The buffer should be freed by the consumer with a kfree().
702 */
703void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
704{
705 struct nvmem_device *nvmem = cell->nvmem;
706 u8 *buf;
707 int rc;
708
709 if (!nvmem || !nvmem->regmap)
710 return ERR_PTR(-EINVAL);
711
712 buf = kzalloc(cell->bytes, GFP_KERNEL);
713 if (!buf)
714 return ERR_PTR(-ENOMEM);
715
716 rc = __nvmem_cell_read(nvmem, cell, buf, len);
717 if (IS_ERR_VALUE(rc)) {
718 kfree(buf);
719 return ERR_PTR(rc);
720 }
721
722 return buf;
723}
724EXPORT_SYMBOL_GPL(nvmem_cell_read);
725
726static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
727 u8 *_buf, int len)
728{
729 struct nvmem_device *nvmem = cell->nvmem;
730 int i, rc, nbits, bit_offset = cell->bit_offset;
731 u8 v, *p, *buf, *b, pbyte, pbits;
732
733 nbits = cell->nbits;
734 buf = kzalloc(cell->bytes, GFP_KERNEL);
735 if (!buf)
736 return ERR_PTR(-ENOMEM);
737
738 memcpy(buf, _buf, len);
739 p = b = buf;
740
741 if (bit_offset) {
742 pbyte = *b;
743 *b <<= bit_offset;
744
745 /* setup the first byte with lsb bits from nvmem */
746 rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
747 *b++ |= GENMASK(bit_offset - 1, 0) & v;
748
749 /* setup rest of the byte if any */
750 for (i = 1; i < cell->bytes; i++) {
751 /* Get last byte bits and shift them towards lsb */
752 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
753 pbyte = *b;
754 p = b;
755 *b <<= bit_offset;
756 *b++ |= pbits;
757 }
758 }
759
760 /* if it's not end on byte boundary */
761 if ((nbits + bit_offset) % BITS_PER_BYTE) {
762 /* setup the last byte with msb bits from nvmem */
763 rc = regmap_raw_read(nvmem->regmap,
764 cell->offset + cell->bytes - 1, &v, 1);
765 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
766
767 }
768
769 return buf;
770}
771
772/**
773 * nvmem_cell_write() - Write to a given nvmem cell
774 *
775 * @cell: nvmem cell to be written.
776 * @buf: Buffer to be written.
777 * @len: length of buffer to be written to nvmem cell.
778 *
779 * Return: length of bytes written or negative on failure.
780 */
781int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
782{
783 struct nvmem_device *nvmem = cell->nvmem;
784 int rc;
785
786 if (!nvmem || !nvmem->regmap || nvmem->read_only ||
787 (cell->bit_offset == 0 && len != cell->bytes))
788 return -EINVAL;
789
790 if (cell->bit_offset || cell->nbits) {
791 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
792 if (IS_ERR(buf))
793 return PTR_ERR(buf);
794 }
795
796 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
797
798 /* free the tmp buffer */
799 if (cell->bit_offset)
800 kfree(buf);
801
802 if (IS_ERR_VALUE(rc))
803 return rc;
804
805 return len;
806}
807EXPORT_SYMBOL_GPL(nvmem_cell_write);
808
390static int __init nvmem_init(void) 809static int __init nvmem_init(void)
391{ 810{
392 return bus_register(&nvmem_bus_type); 811 return bus_register(&nvmem_bus_type);