aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/regmap/regmap.c
diff options
context:
space:
mode:
authorKrystian Garbaciak <krystian.garbaciak@diasemi.com>2012-06-15 06:23:56 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-06-17 16:34:18 -0400
commit6863ca6227598d15c372f1e03449bbb4cfbcca7f (patch)
treef665b666606afe35dcfe38899bd153412fed4771 /drivers/base/regmap/regmap.c
parentfc3ebd788e894b4dd6c9524cb3874eeeb1e862d6 (diff)
regmap: Add support for register indirect addressing.
Devices with register paging or indirectly accessed registers can configure register mapping to map those on virtual address range. During access to virtually mapped register range, indirect addressing is processed automatically, in following steps: 1. selector for page or indirect register is updated (when needed); 2. register in data window is accessed. Configuration should provide minimum and maximum register for virtual range, details of selector field for page selection, minimum and maximum register of data window for indirect access. Virtual range registers are managed by cache as well as direct access registers. In order to make indirect access more efficient, selector register should be declared as non-volatile, if possible. struct regmap_config is extended with the following: struct regmap_range_cfg *ranges; unsigned int n_ranges; [Also reordered debugfs init to later on since the cleanup code was conflicting with the new cleanup code for ranges anyway -- broonie] Signed-off-by: Krystian Garbaciak <krystian.garbaciak@diasemi.com> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Diffstat (limited to 'drivers/base/regmap/regmap.c')
-rw-r--r--drivers/base/regmap/regmap.c201
1 files changed, 196 insertions, 5 deletions
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 652017991da6..83a0166420a4 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,12 +15,17 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/rbtree.h>
18 19
19#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
20#include <trace/events/regmap.h> 21#include <trace/events/regmap.h>
21 22
22#include "internal.h" 23#include "internal.h"
23 24
25static int _regmap_update_bits(struct regmap *map, unsigned int reg,
26 unsigned int mask, unsigned int val,
27 bool *change);
28
24bool regmap_writeable(struct regmap *map, unsigned int reg) 29bool regmap_writeable(struct regmap *map, unsigned int reg)
25{ 30{
26 if (map->max_register && reg > map->max_register) 31 if (map->max_register && reg > map->max_register)
@@ -208,6 +213,67 @@ static void dev_get_regmap_release(struct device *dev, void *res)
208 */ 213 */
209} 214}
210 215
216static bool _regmap_range_add(struct regmap *map,
217 struct regmap_range_node *data)
218{
219 struct rb_root *root = &map->range_tree;
220 struct rb_node **new = &(root->rb_node), *parent = NULL;
221
222 while (*new) {
223 struct regmap_range_node *this =
224 container_of(*new, struct regmap_range_node, node);
225
226 parent = *new;
227 if (data->range_max < this->range_min)
228 new = &((*new)->rb_left);
229 else if (data->range_min > this->range_max)
230 new = &((*new)->rb_right);
231 else
232 return false;
233 }
234
235 rb_link_node(&data->node, parent, new);
236 rb_insert_color(&data->node, root);
237
238 return true;
239}
240
241static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
242 unsigned int reg)
243{
244 struct rb_node *node = map->range_tree.rb_node;
245
246 while (node) {
247 struct regmap_range_node *this =
248 container_of(node, struct regmap_range_node, node);
249
250 if (reg < this->range_min)
251 node = node->rb_left;
252 else if (reg > this->range_max)
253 node = node->rb_right;
254 else
255 return this;
256 }
257
258 return NULL;
259}
260
261static void regmap_range_exit(struct regmap *map)
262{
263 struct rb_node *next;
264 struct regmap_range_node *range_node;
265
266 next = rb_first(&map->range_tree);
267 while (next) {
268 range_node = rb_entry(next, struct regmap_range_node, node);
269 next = rb_next(&range_node->node);
270 rb_erase(&range_node->node, &map->range_tree);
271 kfree(range_node);
272 }
273
274 kfree(map->selector_work_buf);
275}
276
211/** 277/**
212 * regmap_init(): Initialise register map 278 * regmap_init(): Initialise register map
213 * 279 *
@@ -227,6 +293,7 @@ struct regmap *regmap_init(struct device *dev,
227{ 293{
228 struct regmap *map, **m; 294 struct regmap *map, **m;
229 int ret = -EINVAL; 295 int ret = -EINVAL;
296 int i, j;
230 297
231 if (!bus || !config) 298 if (!bus || !config)
232 goto err; 299 goto err;
@@ -364,27 +431,88 @@ struct regmap *regmap_init(struct device *dev,
364 goto err_map; 431 goto err_map;
365 } 432 }
366 433
367 regmap_debugfs_init(map, config->name); 434 map->range_tree = RB_ROOT;
435 for (i = 0; i < config->n_ranges; i++) {
436 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
437 struct regmap_range_node *new;
438
439 /* Sanity check */
440 if (range_cfg->range_max < range_cfg->range_min ||
441 range_cfg->range_max > map->max_register ||
442 range_cfg->selector_reg > map->max_register ||
443 range_cfg->window_len == 0)
444 goto err_range;
445
446 /* Make sure, that this register range has no selector
447 or data window within its boundary */
448 for (j = 0; j < config->n_ranges; j++) {
449 unsigned sel_reg = config->ranges[j].selector_reg;
450 unsigned win_min = config->ranges[j].window_start;
451 unsigned win_max = win_min +
452 config->ranges[j].window_len - 1;
453
454 if (range_cfg->range_min <= sel_reg &&
455 sel_reg <= range_cfg->range_max) {
456 goto err_range;
457 }
458
459 if (!(win_max < range_cfg->range_min ||
460 win_min > range_cfg->range_max)) {
461 goto err_range;
462 }
463 }
464
465 new = kzalloc(sizeof(*new), GFP_KERNEL);
466 if (new == NULL) {
467 ret = -ENOMEM;
468 goto err_range;
469 }
470
471 new->range_min = range_cfg->range_min;
472 new->range_max = range_cfg->range_max;
473 new->selector_reg = range_cfg->selector_reg;
474 new->selector_mask = range_cfg->selector_mask;
475 new->selector_shift = range_cfg->selector_shift;
476 new->window_start = range_cfg->window_start;
477 new->window_len = range_cfg->window_len;
478
479 if (_regmap_range_add(map, new) == false) {
480 kfree(new);
481 goto err_range;
482 }
483
484 if (map->selector_work_buf == NULL) {
485 map->selector_work_buf =
486 kzalloc(map->format.buf_size, GFP_KERNEL);
487 if (map->selector_work_buf == NULL) {
488 ret = -ENOMEM;
489 goto err_range;
490 }
491 }
492 }
368 493
369 ret = regcache_init(map, config); 494 ret = regcache_init(map, config);
370 if (ret < 0) 495 if (ret < 0)
371 goto err_debugfs; 496 goto err_range;
497
498 regmap_debugfs_init(map, config->name);
372 499
373 /* Add a devres resource for dev_get_regmap() */ 500 /* Add a devres resource for dev_get_regmap() */
374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 501 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
375 if (!m) { 502 if (!m) {
376 ret = -ENOMEM; 503 ret = -ENOMEM;
377 goto err_cache; 504 goto err_debugfs;
378 } 505 }
379 *m = map; 506 *m = map;
380 devres_add(dev, m); 507 devres_add(dev, m);
381 508
382 return map; 509 return map;
383 510
384err_cache:
385 regcache_exit(map);
386err_debugfs: 511err_debugfs:
387 regmap_debugfs_exit(map); 512 regmap_debugfs_exit(map);
513 regcache_exit(map);
514err_range:
515 regmap_range_exit(map);
388 kfree(map->work_buf); 516 kfree(map->work_buf);
389err_map: 517err_map:
390 kfree(map); 518 kfree(map);
@@ -481,6 +609,7 @@ void regmap_exit(struct regmap *map)
481{ 609{
482 regcache_exit(map); 610 regcache_exit(map);
483 regmap_debugfs_exit(map); 611 regmap_debugfs_exit(map);
612 regmap_range_exit(map);
484 if (map->bus->free_context) 613 if (map->bus->free_context)
485 map->bus->free_context(map->bus_context); 614 map->bus->free_context(map->bus_context);
486 kfree(map->work_buf); 615 kfree(map->work_buf);
@@ -526,6 +655,56 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
526} 655}
527EXPORT_SYMBOL_GPL(dev_get_regmap); 656EXPORT_SYMBOL_GPL(dev_get_regmap);
528 657
658static int _regmap_select_page(struct regmap *map, unsigned int *reg,
659 unsigned int val_num)
660{
661 struct regmap_range_node *range;
662 void *orig_work_buf;
663 unsigned int win_offset;
664 unsigned int win_page;
665 bool page_chg;
666 int ret;
667
668 range = _regmap_range_lookup(map, *reg);
669 if (range) {
670 win_offset = (*reg - range->range_min) % range->window_len;
671 win_page = (*reg - range->range_min) / range->window_len;
672
673 if (val_num > 1) {
674 /* Bulk write shouldn't cross range boundary */
675 if (*reg + val_num - 1 > range->range_max)
676 return -EINVAL;
677
678 /* ... or single page boundary */
679 if (val_num > range->window_len - win_offset)
680 return -EINVAL;
681 }
682
683 /* It is possible to have selector register inside data window.
684 In that case, selector register is located on every page and
685 it needs no page switching, when accessed alone. */
686 if (val_num > 1 ||
687 range->window_start + win_offset != range->selector_reg) {
688 /* Use separate work_buf during page switching */
689 orig_work_buf = map->work_buf;
690 map->work_buf = map->selector_work_buf;
691
692 ret = _regmap_update_bits(map, range->selector_reg,
693 range->selector_mask,
694 win_page << range->selector_shift,
695 &page_chg);
696 if (ret < 0)
697 return ret;
698
699 map->work_buf = orig_work_buf;
700 }
701
702 *reg = range->window_start + win_offset;
703 }
704
705 return 0;
706}
707
529static int _regmap_raw_write(struct regmap *map, unsigned int reg, 708static int _regmap_raw_write(struct regmap *map, unsigned int reg,
530 const void *val, size_t val_len) 709 const void *val, size_t val_len)
531{ 710{
@@ -563,6 +742,10 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
563 } 742 }
564 } 743 }
565 744
745 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
746 if (ret < 0)
747 return ret;
748
566 map->format.format_reg(map->work_buf, reg, map->reg_shift); 749 map->format.format_reg(map->work_buf, reg, map->reg_shift);
567 750
568 u8[0] |= map->write_flag_mask; 751 u8[0] |= map->write_flag_mask;
@@ -626,6 +809,10 @@ int _regmap_write(struct regmap *map, unsigned int reg,
626 trace_regmap_reg_write(map->dev, reg, val); 809 trace_regmap_reg_write(map->dev, reg, val);
627 810
628 if (map->format.format_write) { 811 if (map->format.format_write) {
812 ret = _regmap_select_page(map, &reg, 1);
813 if (ret < 0)
814 return ret;
815
629 map->format.format_write(map, reg, val); 816 map->format.format_write(map, reg, val);
630 817
631 trace_regmap_hw_write_start(map->dev, reg, 1); 818 trace_regmap_hw_write_start(map->dev, reg, 1);
@@ -783,6 +970,10 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
783 u8 *u8 = map->work_buf; 970 u8 *u8 = map->work_buf;
784 int ret; 971 int ret;
785 972
973 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
974 if (ret < 0)
975 return ret;
976
786 map->format.format_reg(map->work_buf, reg, map->reg_shift); 977 map->format.format_reg(map->work_buf, reg, map->reg_shift);
787 978
788 /* 979 /*