diff options
Diffstat (limited to 'drivers/base/regmap')
-rw-r--r-- | drivers/base/regmap/internal.h | 14 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 181 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 75 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 4 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 25 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 26 |
6 files changed, 194 insertions, 131 deletions
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 29c83160ca29..57f777835d97 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
@@ -128,9 +128,6 @@ struct regmap { | |||
128 | void *cache; | 128 | void *cache; |
129 | u32 cache_dirty; | 129 | u32 cache_dirty; |
130 | 130 | ||
131 | unsigned long *cache_present; | ||
132 | unsigned int cache_present_nbits; | ||
133 | |||
134 | struct reg_default *patch; | 131 | struct reg_default *patch; |
135 | int patch_regs; | 132 | int patch_regs; |
136 | 133 | ||
@@ -203,6 +200,7 @@ int regcache_write(struct regmap *map, | |||
203 | unsigned int reg, unsigned int value); | 200 | unsigned int reg, unsigned int value); |
204 | int regcache_sync(struct regmap *map); | 201 | int regcache_sync(struct regmap *map); |
205 | int regcache_sync_block(struct regmap *map, void *block, | 202 | int regcache_sync_block(struct regmap *map, void *block, |
203 | unsigned long *cache_present, | ||
206 | unsigned int block_base, unsigned int start, | 204 | unsigned int block_base, unsigned int start, |
207 | unsigned int end); | 205 | unsigned int end); |
208 | 206 | ||
@@ -218,16 +216,6 @@ unsigned int regcache_get_val(struct regmap *map, const void *base, | |||
218 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, | 216 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
219 | unsigned int val); | 217 | unsigned int val); |
220 | int regcache_lookup_reg(struct regmap *map, unsigned int reg); | 218 | int regcache_lookup_reg(struct regmap *map, unsigned int reg); |
221 | int regcache_set_reg_present(struct regmap *map, unsigned int reg); | ||
222 | |||
223 | static inline bool regcache_reg_present(struct regmap *map, unsigned int reg) | ||
224 | { | ||
225 | if (!map->cache_present) | ||
226 | return true; | ||
227 | if (reg > map->cache_present_nbits) | ||
228 | return false; | ||
229 | return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg); | ||
230 | } | ||
231 | 219 | ||
232 | int _regmap_raw_write(struct regmap *map, unsigned int reg, | 220 | int _regmap_raw_write(struct regmap *map, unsigned int reg, |
233 | const void *val, size_t val_len, bool async); | 221 | const void *val, size_t val_len, bool async); |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 0fccc99881fd..930cad4e5df8 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -29,6 +29,8 @@ struct regcache_rbtree_node { | |||
29 | unsigned int base_reg; | 29 | unsigned int base_reg; |
30 | /* block of adjacent registers */ | 30 | /* block of adjacent registers */ |
31 | void *block; | 31 | void *block; |
32 | /* Which registers are present */ | ||
33 | long *cache_present; | ||
32 | /* number of registers available in the block */ | 34 | /* number of registers available in the block */ |
33 | unsigned int blklen; | 35 | unsigned int blklen; |
34 | } __attribute__ ((packed)); | 36 | } __attribute__ ((packed)); |
@@ -57,6 +59,7 @@ static void regcache_rbtree_set_register(struct regmap *map, | |||
57 | struct regcache_rbtree_node *rbnode, | 59 | struct regcache_rbtree_node *rbnode, |
58 | unsigned int idx, unsigned int val) | 60 | unsigned int idx, unsigned int val) |
59 | { | 61 | { |
62 | set_bit(idx, rbnode->cache_present); | ||
60 | regcache_set_val(map, rbnode->block, idx, val); | 63 | regcache_set_val(map, rbnode->block, idx, val); |
61 | } | 64 | } |
62 | 65 | ||
@@ -146,13 +149,13 @@ static int rbtree_show(struct seq_file *s, void *ignored) | |||
146 | map->lock(map->lock_arg); | 149 | map->lock(map->lock_arg); |
147 | 150 | ||
148 | mem_size = sizeof(*rbtree_ctx); | 151 | mem_size = sizeof(*rbtree_ctx); |
149 | mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); | ||
150 | 152 | ||
151 | for (node = rb_first(&rbtree_ctx->root); node != NULL; | 153 | for (node = rb_first(&rbtree_ctx->root); node != NULL; |
152 | node = rb_next(node)) { | 154 | node = rb_next(node)) { |
153 | n = container_of(node, struct regcache_rbtree_node, node); | 155 | n = container_of(node, struct regcache_rbtree_node, node); |
154 | mem_size += sizeof(*n); | 156 | mem_size += sizeof(*n); |
155 | mem_size += (n->blklen * map->cache_word_size); | 157 | mem_size += (n->blklen * map->cache_word_size); |
158 | mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); | ||
156 | 159 | ||
157 | regcache_rbtree_get_base_top_reg(map, n, &base, &top); | 160 | regcache_rbtree_get_base_top_reg(map, n, &base, &top); |
158 | this_registers = ((top - base) / map->reg_stride) + 1; | 161 | this_registers = ((top - base) / map->reg_stride) + 1; |
@@ -245,6 +248,7 @@ static int regcache_rbtree_exit(struct regmap *map) | |||
245 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); | 248 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); |
246 | next = rb_next(&rbtree_node->node); | 249 | next = rb_next(&rbtree_node->node); |
247 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); | 250 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); |
251 | kfree(rbtree_node->cache_present); | ||
248 | kfree(rbtree_node->block); | 252 | kfree(rbtree_node->block); |
249 | kfree(rbtree_node); | 253 | kfree(rbtree_node); |
250 | } | 254 | } |
@@ -265,7 +269,7 @@ static int regcache_rbtree_read(struct regmap *map, | |||
265 | rbnode = regcache_rbtree_lookup(map, reg); | 269 | rbnode = regcache_rbtree_lookup(map, reg); |
266 | if (rbnode) { | 270 | if (rbnode) { |
267 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; | 271 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
268 | if (!regcache_reg_present(map, reg)) | 272 | if (!test_bit(reg_tmp, rbnode->cache_present)) |
269 | return -ENOENT; | 273 | return -ENOENT; |
270 | *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); | 274 | *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); |
271 | } else { | 275 | } else { |
@@ -278,27 +282,45 @@ static int regcache_rbtree_read(struct regmap *map, | |||
278 | 282 | ||
279 | static int regcache_rbtree_insert_to_block(struct regmap *map, | 283 | static int regcache_rbtree_insert_to_block(struct regmap *map, |
280 | struct regcache_rbtree_node *rbnode, | 284 | struct regcache_rbtree_node *rbnode, |
281 | unsigned int pos, unsigned int reg, | 285 | unsigned int base_reg, |
286 | unsigned int top_reg, | ||
287 | unsigned int reg, | ||
282 | unsigned int value) | 288 | unsigned int value) |
283 | { | 289 | { |
290 | unsigned int blklen; | ||
291 | unsigned int pos, offset; | ||
292 | unsigned long *present; | ||
284 | u8 *blk; | 293 | u8 *blk; |
285 | 294 | ||
295 | blklen = (top_reg - base_reg) / map->reg_stride + 1; | ||
296 | pos = (reg - base_reg) / map->reg_stride; | ||
297 | offset = (rbnode->base_reg - base_reg) / map->reg_stride; | ||
298 | |||
286 | blk = krealloc(rbnode->block, | 299 | blk = krealloc(rbnode->block, |
287 | (rbnode->blklen + 1) * map->cache_word_size, | 300 | blklen * map->cache_word_size, |
288 | GFP_KERNEL); | 301 | GFP_KERNEL); |
289 | if (!blk) | 302 | if (!blk) |
290 | return -ENOMEM; | 303 | return -ENOMEM; |
291 | 304 | ||
305 | present = krealloc(rbnode->cache_present, | ||
306 | BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); | ||
307 | if (!present) { | ||
308 | kfree(blk); | ||
309 | return -ENOMEM; | ||
310 | } | ||
311 | |||
292 | /* insert the register value in the correct place in the rbnode block */ | 312 | /* insert the register value in the correct place in the rbnode block */ |
293 | memmove(blk + (pos + 1) * map->cache_word_size, | 313 | if (pos == 0) { |
294 | blk + pos * map->cache_word_size, | 314 | memmove(blk + offset * map->cache_word_size, |
295 | (rbnode->blklen - pos) * map->cache_word_size); | 315 | blk, rbnode->blklen * map->cache_word_size); |
316 | bitmap_shift_right(present, present, offset, blklen); | ||
317 | } | ||
296 | 318 | ||
297 | /* update the rbnode block, its size and the base register */ | 319 | /* update the rbnode block, its size and the base register */ |
298 | rbnode->block = blk; | 320 | rbnode->block = blk; |
299 | rbnode->blklen++; | 321 | rbnode->blklen = blklen; |
300 | if (!pos) | 322 | rbnode->base_reg = base_reg; |
301 | rbnode->base_reg = reg; | 323 | rbnode->cache_present = present; |
302 | 324 | ||
303 | regcache_rbtree_set_register(map, rbnode, pos, value); | 325 | regcache_rbtree_set_register(map, rbnode, pos, value); |
304 | return 0; | 326 | return 0; |
@@ -325,8 +347,8 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) | |||
325 | 347 | ||
326 | if (i != map->rd_table->n_yes_ranges) { | 348 | if (i != map->rd_table->n_yes_ranges) { |
327 | range = &map->rd_table->yes_ranges[i]; | 349 | range = &map->rd_table->yes_ranges[i]; |
328 | rbnode->blklen = range->range_max - range->range_min | 350 | rbnode->blklen = (range->range_max - range->range_min) / |
329 | + 1; | 351 | map->reg_stride + 1; |
330 | rbnode->base_reg = range->range_min; | 352 | rbnode->base_reg = range->range_min; |
331 | } | 353 | } |
332 | } | 354 | } |
@@ -338,12 +360,21 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) | |||
338 | 360 | ||
339 | rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, | 361 | rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, |
340 | GFP_KERNEL); | 362 | GFP_KERNEL); |
341 | if (!rbnode->block) { | 363 | if (!rbnode->block) |
342 | kfree(rbnode); | 364 | goto err_free; |
343 | return NULL; | 365 | |
344 | } | 366 | rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) * |
367 | sizeof(*rbnode->cache_present), GFP_KERNEL); | ||
368 | if (!rbnode->cache_present) | ||
369 | goto err_free_block; | ||
345 | 370 | ||
346 | return rbnode; | 371 | return rbnode; |
372 | |||
373 | err_free_block: | ||
374 | kfree(rbnode->block); | ||
375 | err_free: | ||
376 | kfree(rbnode); | ||
377 | return NULL; | ||
347 | } | 378 | } |
348 | 379 | ||
349 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | 380 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, |
@@ -353,15 +384,9 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |||
353 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; | 384 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; |
354 | struct rb_node *node; | 385 | struct rb_node *node; |
355 | unsigned int reg_tmp; | 386 | unsigned int reg_tmp; |
356 | unsigned int pos; | ||
357 | int i; | ||
358 | int ret; | 387 | int ret; |
359 | 388 | ||
360 | rbtree_ctx = map->cache; | 389 | rbtree_ctx = map->cache; |
361 | /* update the reg_present bitmap, make space if necessary */ | ||
362 | ret = regcache_set_reg_present(map, reg); | ||
363 | if (ret < 0) | ||
364 | return ret; | ||
365 | 390 | ||
366 | /* if we can't locate it in the cached rbnode we'll have | 391 | /* if we can't locate it in the cached rbnode we'll have |
367 | * to traverse the rbtree looking for it. | 392 | * to traverse the rbtree looking for it. |
@@ -371,30 +396,43 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |||
371 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; | 396 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
372 | regcache_rbtree_set_register(map, rbnode, reg_tmp, value); | 397 | regcache_rbtree_set_register(map, rbnode, reg_tmp, value); |
373 | } else { | 398 | } else { |
399 | unsigned int base_reg, top_reg; | ||
400 | unsigned int new_base_reg, new_top_reg; | ||
401 | unsigned int min, max; | ||
402 | unsigned int max_dist; | ||
403 | |||
404 | max_dist = map->reg_stride * sizeof(*rbnode_tmp) / | ||
405 | map->cache_word_size; | ||
406 | if (reg < max_dist) | ||
407 | min = 0; | ||
408 | else | ||
409 | min = reg - max_dist; | ||
410 | max = reg + max_dist; | ||
411 | |||
374 | /* look for an adjacent register to the one we are about to add */ | 412 | /* look for an adjacent register to the one we are about to add */ |
375 | for (node = rb_first(&rbtree_ctx->root); node; | 413 | for (node = rb_first(&rbtree_ctx->root); node; |
376 | node = rb_next(node)) { | 414 | node = rb_next(node)) { |
377 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, | 415 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, |
378 | node); | 416 | node); |
379 | for (i = 0; i < rbnode_tmp->blklen; i++) { | 417 | |
380 | reg_tmp = rbnode_tmp->base_reg + | 418 | regcache_rbtree_get_base_top_reg(map, rbnode_tmp, |
381 | (i * map->reg_stride); | 419 | &base_reg, &top_reg); |
382 | if (abs(reg_tmp - reg) != map->reg_stride) | 420 | |
383 | continue; | 421 | if (base_reg <= max && top_reg >= min) { |
384 | /* decide where in the block to place our register */ | 422 | new_base_reg = min(reg, base_reg); |
385 | if (reg_tmp + map->reg_stride == reg) | 423 | new_top_reg = max(reg, top_reg); |
386 | pos = i + 1; | 424 | } else { |
387 | else | 425 | continue; |
388 | pos = i; | ||
389 | ret = regcache_rbtree_insert_to_block(map, | ||
390 | rbnode_tmp, | ||
391 | pos, reg, | ||
392 | value); | ||
393 | if (ret) | ||
394 | return ret; | ||
395 | rbtree_ctx->cached_rbnode = rbnode_tmp; | ||
396 | return 0; | ||
397 | } | 426 | } |
427 | |||
428 | ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, | ||
429 | new_base_reg, | ||
430 | new_top_reg, reg, | ||
431 | value); | ||
432 | if (ret) | ||
433 | return ret; | ||
434 | rbtree_ctx->cached_rbnode = rbnode_tmp; | ||
435 | return 0; | ||
398 | } | 436 | } |
399 | 437 | ||
400 | /* We did not manage to find a place to insert it in | 438 | /* We did not manage to find a place to insert it in |
@@ -418,30 +456,34 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
418 | struct regcache_rbtree_ctx *rbtree_ctx; | 456 | struct regcache_rbtree_ctx *rbtree_ctx; |
419 | struct rb_node *node; | 457 | struct rb_node *node; |
420 | struct regcache_rbtree_node *rbnode; | 458 | struct regcache_rbtree_node *rbnode; |
459 | unsigned int base_reg, top_reg; | ||
460 | unsigned int start, end; | ||
421 | int ret; | 461 | int ret; |
422 | int base, end; | ||
423 | 462 | ||
424 | rbtree_ctx = map->cache; | 463 | rbtree_ctx = map->cache; |
425 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { | 464 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { |
426 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); | 465 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); |
427 | 466 | ||
428 | if (rbnode->base_reg > max) | 467 | regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, |
468 | &top_reg); | ||
469 | if (base_reg > max) | ||
429 | break; | 470 | break; |
430 | if (rbnode->base_reg + rbnode->blklen < min) | 471 | if (top_reg < min) |
431 | continue; | 472 | continue; |
432 | 473 | ||
433 | if (min > rbnode->base_reg) | 474 | if (min > base_reg) |
434 | base = min - rbnode->base_reg; | 475 | start = (min - base_reg) / map->reg_stride; |
435 | else | 476 | else |
436 | base = 0; | 477 | start = 0; |
437 | 478 | ||
438 | if (max < rbnode->base_reg + rbnode->blklen) | 479 | if (max < top_reg) |
439 | end = max - rbnode->base_reg + 1; | 480 | end = (max - base_reg) / map->reg_stride + 1; |
440 | else | 481 | else |
441 | end = rbnode->blklen; | 482 | end = rbnode->blklen; |
442 | 483 | ||
443 | ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg, | 484 | ret = regcache_sync_block(map, rbnode->block, |
444 | base, end); | 485 | rbnode->cache_present, |
486 | rbnode->base_reg, start, end); | ||
445 | if (ret != 0) | 487 | if (ret != 0) |
446 | return ret; | 488 | return ret; |
447 | } | 489 | } |
@@ -449,6 +491,42 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
449 | return regmap_async_complete(map); | 491 | return regmap_async_complete(map); |
450 | } | 492 | } |
451 | 493 | ||
494 | static int regcache_rbtree_drop(struct regmap *map, unsigned int min, | ||
495 | unsigned int max) | ||
496 | { | ||
497 | struct regcache_rbtree_ctx *rbtree_ctx; | ||
498 | struct regcache_rbtree_node *rbnode; | ||
499 | struct rb_node *node; | ||
500 | unsigned int base_reg, top_reg; | ||
501 | unsigned int start, end; | ||
502 | |||
503 | rbtree_ctx = map->cache; | ||
504 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { | ||
505 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); | ||
506 | |||
507 | regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, | ||
508 | &top_reg); | ||
509 | if (base_reg > max) | ||
510 | break; | ||
511 | if (top_reg < min) | ||
512 | continue; | ||
513 | |||
514 | if (min > base_reg) | ||
515 | start = (min - base_reg) / map->reg_stride; | ||
516 | else | ||
517 | start = 0; | ||
518 | |||
519 | if (max < top_reg) | ||
520 | end = (max - base_reg) / map->reg_stride + 1; | ||
521 | else | ||
522 | end = rbnode->blklen; | ||
523 | |||
524 | bitmap_clear(rbnode->cache_present, start, end - start); | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
452 | struct regcache_ops regcache_rbtree_ops = { | 530 | struct regcache_ops regcache_rbtree_ops = { |
453 | .type = REGCACHE_RBTREE, | 531 | .type = REGCACHE_RBTREE, |
454 | .name = "rbtree", | 532 | .name = "rbtree", |
@@ -456,5 +534,6 @@ struct regcache_ops regcache_rbtree_ops = { | |||
456 | .exit = regcache_rbtree_exit, | 534 | .exit = regcache_rbtree_exit, |
457 | .read = regcache_rbtree_read, | 535 | .read = regcache_rbtree_read, |
458 | .write = regcache_rbtree_write, | 536 | .write = regcache_rbtree_write, |
459 | .sync = regcache_rbtree_sync | 537 | .sync = regcache_rbtree_sync, |
538 | .drop = regcache_rbtree_drop, | ||
460 | }; | 539 | }; |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 3455f833e473..d6c2d691b6e8 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -121,8 +121,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) | |||
121 | map->reg_defaults_raw = config->reg_defaults_raw; | 121 | map->reg_defaults_raw = config->reg_defaults_raw; |
122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); | 122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; | 123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; |
124 | map->cache_present = NULL; | ||
125 | map->cache_present_nbits = 0; | ||
126 | 124 | ||
127 | map->cache = NULL; | 125 | map->cache = NULL; |
128 | map->cache_ops = cache_types[i]; | 126 | map->cache_ops = cache_types[i]; |
@@ -181,7 +179,6 @@ void regcache_exit(struct regmap *map) | |||
181 | 179 | ||
182 | BUG_ON(!map->cache_ops); | 180 | BUG_ON(!map->cache_ops); |
183 | 181 | ||
184 | kfree(map->cache_present); | ||
185 | kfree(map->reg_defaults); | 182 | kfree(map->reg_defaults); |
186 | if (map->cache_free) | 183 | if (map->cache_free) |
187 | kfree(map->reg_defaults_raw); | 184 | kfree(map->reg_defaults_raw); |
@@ -241,9 +238,6 @@ int regcache_write(struct regmap *map, | |||
241 | 238 | ||
242 | BUG_ON(!map->cache_ops); | 239 | BUG_ON(!map->cache_ops); |
243 | 240 | ||
244 | if (!regmap_writeable(map, reg)) | ||
245 | return -EIO; | ||
246 | |||
247 | if (!regmap_volatile(map, reg)) | 241 | if (!regmap_volatile(map, reg)) |
248 | return map->cache_ops->write(map, reg, value); | 242 | return map->cache_ops->write(map, reg, value); |
249 | 243 | ||
@@ -410,22 +404,16 @@ EXPORT_SYMBOL_GPL(regcache_sync_region); | |||
410 | int regcache_drop_region(struct regmap *map, unsigned int min, | 404 | int regcache_drop_region(struct regmap *map, unsigned int min, |
411 | unsigned int max) | 405 | unsigned int max) |
412 | { | 406 | { |
413 | unsigned int reg; | ||
414 | int ret = 0; | 407 | int ret = 0; |
415 | 408 | ||
416 | if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) | 409 | if (!map->cache_ops || !map->cache_ops->drop) |
417 | return -EINVAL; | 410 | return -EINVAL; |
418 | 411 | ||
419 | map->lock(map->lock_arg); | 412 | map->lock(map->lock_arg); |
420 | 413 | ||
421 | trace_regcache_drop_region(map->dev, min, max); | 414 | trace_regcache_drop_region(map->dev, min, max); |
422 | 415 | ||
423 | if (map->cache_present) | 416 | ret = map->cache_ops->drop(map, min, max); |
424 | for (reg = min; reg < max + 1; reg++) | ||
425 | clear_bit(reg, map->cache_present); | ||
426 | |||
427 | if (map->cache_ops && map->cache_ops->drop) | ||
428 | ret = map->cache_ops->drop(map, min, max); | ||
429 | 417 | ||
430 | map->unlock(map->lock_arg); | 418 | map->unlock(map->lock_arg); |
431 | 419 | ||
@@ -493,42 +481,6 @@ void regcache_cache_bypass(struct regmap *map, bool enable) | |||
493 | } | 481 | } |
494 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | 482 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
495 | 483 | ||
496 | int regcache_set_reg_present(struct regmap *map, unsigned int reg) | ||
497 | { | ||
498 | unsigned long *cache_present; | ||
499 | unsigned int cache_present_size; | ||
500 | unsigned int nregs; | ||
501 | int i; | ||
502 | |||
503 | nregs = reg + 1; | ||
504 | cache_present_size = BITS_TO_LONGS(nregs); | ||
505 | cache_present_size *= sizeof(long); | ||
506 | |||
507 | if (!map->cache_present) { | ||
508 | cache_present = kmalloc(cache_present_size, GFP_KERNEL); | ||
509 | if (!cache_present) | ||
510 | return -ENOMEM; | ||
511 | bitmap_zero(cache_present, nregs); | ||
512 | map->cache_present = cache_present; | ||
513 | map->cache_present_nbits = nregs; | ||
514 | } | ||
515 | |||
516 | if (nregs > map->cache_present_nbits) { | ||
517 | cache_present = krealloc(map->cache_present, | ||
518 | cache_present_size, GFP_KERNEL); | ||
519 | if (!cache_present) | ||
520 | return -ENOMEM; | ||
521 | for (i = 0; i < nregs; i++) | ||
522 | if (i >= map->cache_present_nbits) | ||
523 | clear_bit(i, cache_present); | ||
524 | map->cache_present = cache_present; | ||
525 | map->cache_present_nbits = nregs; | ||
526 | } | ||
527 | |||
528 | set_bit(reg, map->cache_present); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, | 484 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
533 | unsigned int val) | 485 | unsigned int val) |
534 | { | 486 | { |
@@ -620,7 +572,16 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg) | |||
620 | return -ENOENT; | 572 | return -ENOENT; |
621 | } | 573 | } |
622 | 574 | ||
575 | static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) | ||
576 | { | ||
577 | if (!cache_present) | ||
578 | return true; | ||
579 | |||
580 | return test_bit(idx, cache_present); | ||
581 | } | ||
582 | |||
623 | static int regcache_sync_block_single(struct regmap *map, void *block, | 583 | static int regcache_sync_block_single(struct regmap *map, void *block, |
584 | unsigned long *cache_present, | ||
624 | unsigned int block_base, | 585 | unsigned int block_base, |
625 | unsigned int start, unsigned int end) | 586 | unsigned int start, unsigned int end) |
626 | { | 587 | { |
@@ -630,7 +591,7 @@ static int regcache_sync_block_single(struct regmap *map, void *block, | |||
630 | for (i = start; i < end; i++) { | 591 | for (i = start; i < end; i++) { |
631 | regtmp = block_base + (i * map->reg_stride); | 592 | regtmp = block_base + (i * map->reg_stride); |
632 | 593 | ||
633 | if (!regcache_reg_present(map, regtmp)) | 594 | if (!regcache_reg_present(cache_present, i)) |
634 | continue; | 595 | continue; |
635 | 596 | ||
636 | val = regcache_get_val(map, block, i); | 597 | val = regcache_get_val(map, block, i); |
@@ -681,6 +642,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, | |||
681 | } | 642 | } |
682 | 643 | ||
683 | static int regcache_sync_block_raw(struct regmap *map, void *block, | 644 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
645 | unsigned long *cache_present, | ||
684 | unsigned int block_base, unsigned int start, | 646 | unsigned int block_base, unsigned int start, |
685 | unsigned int end) | 647 | unsigned int end) |
686 | { | 648 | { |
@@ -693,7 +655,7 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
693 | for (i = start; i < end; i++) { | 655 | for (i = start; i < end; i++) { |
694 | regtmp = block_base + (i * map->reg_stride); | 656 | regtmp = block_base + (i * map->reg_stride); |
695 | 657 | ||
696 | if (!regcache_reg_present(map, regtmp)) { | 658 | if (!regcache_reg_present(cache_present, i)) { |
697 | ret = regcache_sync_block_raw_flush(map, &data, | 659 | ret = regcache_sync_block_raw_flush(map, &data, |
698 | base, regtmp); | 660 | base, regtmp); |
699 | if (ret != 0) | 661 | if (ret != 0) |
@@ -724,13 +686,14 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
724 | } | 686 | } |
725 | 687 | ||
726 | int regcache_sync_block(struct regmap *map, void *block, | 688 | int regcache_sync_block(struct regmap *map, void *block, |
689 | unsigned long *cache_present, | ||
727 | unsigned int block_base, unsigned int start, | 690 | unsigned int block_base, unsigned int start, |
728 | unsigned int end) | 691 | unsigned int end) |
729 | { | 692 | { |
730 | if (regmap_can_raw_write(map)) | 693 | if (regmap_can_raw_write(map)) |
731 | return regcache_sync_block_raw(map, block, block_base, | 694 | return regcache_sync_block_raw(map, block, cache_present, |
732 | start, end); | 695 | block_base, start, end); |
733 | else | 696 | else |
734 | return regcache_sync_block_single(map, block, block_base, | 697 | return regcache_sync_block_single(map, block, cache_present, |
735 | start, end); | 698 | block_base, start, end); |
736 | } | 699 | } |
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 53495753fbdb..6c2652a8ad50 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c | |||
@@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, | |||
85 | unsigned int reg_offset; | 85 | unsigned int reg_offset; |
86 | 86 | ||
87 | /* Suppress the cache if we're using a subrange */ | 87 | /* Suppress the cache if we're using a subrange */ |
88 | if (from) | 88 | if (base) |
89 | return from; | 89 | return base; |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * If we don't have a cache build one so we don't have to do a | 92 | * If we don't have a cache build one so we don't have to do a |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1643e889bafc..d10456ffd811 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
@@ -418,6 +418,31 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
418 | reg, ret); | 418 | reg, ret); |
419 | goto err_alloc; | 419 | goto err_alloc; |
420 | } | 420 | } |
421 | |||
422 | if (!chip->init_ack_masked) | ||
423 | continue; | ||
424 | |||
425 | /* Ack masked but set interrupts */ | ||
426 | reg = chip->status_base + | ||
427 | (i * map->reg_stride * d->irq_reg_stride); | ||
428 | ret = regmap_read(map, reg, &d->status_buf[i]); | ||
429 | if (ret != 0) { | ||
430 | dev_err(map->dev, "Failed to read IRQ status: %d\n", | ||
431 | ret); | ||
432 | goto err_alloc; | ||
433 | } | ||
434 | |||
435 | if (d->status_buf[i] && chip->ack_base) { | ||
436 | reg = chip->ack_base + | ||
437 | (i * map->reg_stride * d->irq_reg_stride); | ||
438 | ret = regmap_write(map, reg, | ||
439 | d->status_buf[i] & d->mask_buf[i]); | ||
440 | if (ret != 0) { | ||
441 | dev_err(map->dev, "Failed to ack 0x%x: %d\n", | ||
442 | reg, ret); | ||
443 | goto err_alloc; | ||
444 | } | ||
445 | } | ||
421 | } | 446 | } |
422 | 447 | ||
423 | /* Wake is disabled by default */ | 448 | /* Wake is disabled by default */ |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index e0d0c7d8a5c5..7d689a15c500 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -303,6 +303,7 @@ static void regmap_unlock_mutex(void *__map) | |||
303 | } | 303 | } |
304 | 304 | ||
305 | static void regmap_lock_spinlock(void *__map) | 305 | static void regmap_lock_spinlock(void *__map) |
306 | __acquires(&map->spinlock) | ||
306 | { | 307 | { |
307 | struct regmap *map = __map; | 308 | struct regmap *map = __map; |
308 | unsigned long flags; | 309 | unsigned long flags; |
@@ -312,6 +313,7 @@ static void regmap_lock_spinlock(void *__map) | |||
312 | } | 313 | } |
313 | 314 | ||
314 | static void regmap_unlock_spinlock(void *__map) | 315 | static void regmap_unlock_spinlock(void *__map) |
316 | __releases(&map->spinlock) | ||
315 | { | 317 | { |
316 | struct regmap *map = __map; | 318 | struct regmap *map = __map; |
317 | spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); | 319 | spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); |
@@ -687,6 +689,10 @@ skip_format_initialization: | |||
687 | unsigned win_max = win_min + | 689 | unsigned win_max = win_min + |
688 | config->ranges[j].window_len - 1; | 690 | config->ranges[j].window_len - 1; |
689 | 691 | ||
692 | /* Allow data window inside its own virtual range */ | ||
693 | if (j == i) | ||
694 | continue; | ||
695 | |||
690 | if (range_cfg->range_min <= sel_reg && | 696 | if (range_cfg->range_min <= sel_reg && |
691 | sel_reg <= range_cfg->range_max) { | 697 | sel_reg <= range_cfg->range_max) { |
692 | dev_err(map->dev, | 698 | dev_err(map->dev, |
@@ -1261,6 +1267,9 @@ int _regmap_write(struct regmap *map, unsigned int reg, | |||
1261 | int ret; | 1267 | int ret; |
1262 | void *context = _regmap_map_get_context(map); | 1268 | void *context = _regmap_map_get_context(map); |
1263 | 1269 | ||
1270 | if (!regmap_writeable(map, reg)) | ||
1271 | return -EIO; | ||
1272 | |||
1264 | if (!map->cache_bypass && !map->defer_caching) { | 1273 | if (!map->cache_bypass && !map->defer_caching) { |
1265 | ret = regcache_write(map, reg, val); | 1274 | ret = regcache_write(map, reg, val); |
1266 | if (ret != 0) | 1275 | if (ret != 0) |
@@ -1888,13 +1897,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete); | |||
1888 | int regmap_register_patch(struct regmap *map, const struct reg_default *regs, | 1897 | int regmap_register_patch(struct regmap *map, const struct reg_default *regs, |
1889 | int num_regs) | 1898 | int num_regs) |
1890 | { | 1899 | { |
1900 | struct reg_default *p; | ||
1891 | int i, ret; | 1901 | int i, ret; |
1892 | bool bypass; | 1902 | bool bypass; |
1893 | 1903 | ||
1894 | /* If needed the implementation can be extended to support this */ | ||
1895 | if (map->patch) | ||
1896 | return -EBUSY; | ||
1897 | |||
1898 | map->lock(map->lock_arg); | 1904 | map->lock(map->lock_arg); |
1899 | 1905 | ||
1900 | bypass = map->cache_bypass; | 1906 | bypass = map->cache_bypass; |
@@ -1911,11 +1917,13 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, | |||
1911 | } | 1917 | } |
1912 | } | 1918 | } |
1913 | 1919 | ||
1914 | map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL); | 1920 | p = krealloc(map->patch, |
1915 | if (map->patch != NULL) { | 1921 | sizeof(struct reg_default) * (map->patch_regs + num_regs), |
1916 | memcpy(map->patch, regs, | 1922 | GFP_KERNEL); |
1917 | num_regs * sizeof(struct reg_default)); | 1923 | if (p) { |
1918 | map->patch_regs = num_regs; | 1924 | memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); |
1925 | map->patch = p; | ||
1926 | map->patch_regs += num_regs; | ||
1919 | } else { | 1927 | } else { |
1920 | ret = -ENOMEM; | 1928 | ret = -ENOMEM; |
1921 | } | 1929 | } |