aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/firmware_class.c27
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c68
-rw-r--r--drivers/base/regmap/regcache.c103
-rw-r--r--drivers/base/regmap/regmap-debugfs.c13
-rw-r--r--drivers/base/regmap/regmap.c26
6 files changed, 185 insertions, 54 deletions
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4b1f9265887f..01e21037d8fe 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
450{ 450{
451 struct firmware_buf *buf = fw_priv->buf; 451 struct firmware_buf *buf = fw_priv->buf;
452 452
453 /*
454 * There is a small window in which user can write to 'loading'
455 * between loading done and disappearance of 'loading'
456 */
457 if (test_bit(FW_STATUS_DONE, &buf->status))
458 return;
459
453 set_bit(FW_STATUS_ABORT, &buf->status); 460 set_bit(FW_STATUS_ABORT, &buf->status);
454 complete_all(&buf->completion); 461 complete_all(&buf->completion);
462
463 /* avoid user action after loading abort */
464 fw_priv->buf = NULL;
455} 465}
456 466
457#define is_fw_load_aborted(buf) \ 467#define is_fw_load_aborted(buf) \
@@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
528 struct device_attribute *attr, char *buf) 538 struct device_attribute *attr, char *buf)
529{ 539{
530 struct firmware_priv *fw_priv = to_firmware_priv(dev); 540 struct firmware_priv *fw_priv = to_firmware_priv(dev);
531 int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 541 int loading = 0;
542
543 mutex_lock(&fw_lock);
544 if (fw_priv->buf)
545 loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
546 mutex_unlock(&fw_lock);
532 547
533 return sprintf(buf, "%d\n", loading); 548 return sprintf(buf, "%d\n", loading);
534} 549}
@@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
570 const char *buf, size_t count) 585 const char *buf, size_t count)
571{ 586{
572 struct firmware_priv *fw_priv = to_firmware_priv(dev); 587 struct firmware_priv *fw_priv = to_firmware_priv(dev);
573 struct firmware_buf *fw_buf = fw_priv->buf; 588 struct firmware_buf *fw_buf;
574 int loading = simple_strtol(buf, NULL, 10); 589 int loading = simple_strtol(buf, NULL, 10);
575 int i; 590 int i;
576 591
577 mutex_lock(&fw_lock); 592 mutex_lock(&fw_lock);
578 593 fw_buf = fw_priv->buf;
579 if (!fw_buf) 594 if (!fw_buf)
580 goto out; 595 goto out;
581 596
@@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
777 struct firmware_priv, timeout_work.work); 792 struct firmware_priv, timeout_work.work);
778 793
779 mutex_lock(&fw_lock); 794 mutex_lock(&fw_lock);
780 if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
781 mutex_unlock(&fw_lock);
782 return;
783 }
784 fw_load_abort(fw_priv); 795 fw_load_abort(fw_priv);
785 mutex_unlock(&fw_lock); 796 mutex_unlock(&fw_lock);
786} 797}
@@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
861 872
862 cancel_delayed_work_sync(&fw_priv->timeout_work); 873 cancel_delayed_work_sync(&fw_priv->timeout_work);
863 874
864 fw_priv->buf = NULL;
865
866 device_remove_file(f_dev, &dev_attr_loading); 875 device_remove_file(f_dev, &dev_attr_loading);
867err_del_bin_attr: 876err_del_bin_attr:
868 device_remove_bin_file(f_dev, &firmware_attr_data); 877 device_remove_bin_file(f_dev, &firmware_attr_data);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index c5f6ebd0466d..29c83160ca29 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -52,6 +52,7 @@ struct regmap_async {
52struct regmap { 52struct regmap {
53 struct mutex mutex; 53 struct mutex mutex;
54 spinlock_t spinlock; 54 spinlock_t spinlock;
55 unsigned long spinlock_flags;
55 regmap_lock lock; 56 regmap_lock lock;
56 regmap_unlock unlock; 57 regmap_unlock unlock;
57 void *lock_arg; /* This is passed to lock/unlock functions */ 58 void *lock_arg; /* This is passed to lock/unlock functions */
@@ -148,6 +149,7 @@ struct regcache_ops {
148 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); 149 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
149 int (*write)(struct regmap *map, unsigned int reg, unsigned int value); 150 int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
150 int (*sync)(struct regmap *map, unsigned int min, unsigned int max); 151 int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
152 int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
151}; 153};
152 154
153bool regmap_writeable(struct regmap *map, unsigned int reg); 155bool regmap_writeable(struct regmap *map, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b7..5c1435c4e210 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
143 int registers = 0; 143 int registers = 0;
144 int this_registers, average; 144 int this_registers, average;
145 145
146 map->lock(map); 146 map->lock(map->lock_arg);
147 147
148 mem_size = sizeof(*rbtree_ctx); 148 mem_size = sizeof(*rbtree_ctx);
149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); 149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
171 nodes, registers, average, mem_size); 171 nodes, registers, average, mem_size);
172 172
173 map->unlock(map); 173 map->unlock(map->lock_arg);
174 174
175 return 0; 175 return 0;
176} 176}
@@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
304 return 0; 304 return 0;
305} 305}
306 306
307static struct regcache_rbtree_node *
308regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
309{
310 struct regcache_rbtree_node *rbnode;
311 const struct regmap_range *range;
312 int i;
313
314 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
315 if (!rbnode)
316 return NULL;
317
318 /* If there is a read table then use it to guess at an allocation */
319 if (map->rd_table) {
320 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
321 if (regmap_reg_in_range(reg,
322 &map->rd_table->yes_ranges[i]))
323 break;
324 }
325
326 if (i != map->rd_table->n_yes_ranges) {
327 range = &map->rd_table->yes_ranges[i];
328 rbnode->blklen = range->range_max - range->range_min
329 + 1;
330 rbnode->base_reg = range->range_min;
331 }
332 }
333
334 if (!rbnode->blklen) {
335 rbnode->blklen = sizeof(*rbnode);
336 rbnode->base_reg = reg;
337 }
338
339 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
340 GFP_KERNEL);
341 if (!rbnode->block) {
342 kfree(rbnode);
343 return NULL;
344 }
345
346 return rbnode;
347}
348
307static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 349static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
308 unsigned int value) 350 unsigned int value)
309{ 351{
@@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
354 return 0; 396 return 0;
355 } 397 }
356 } 398 }
357 /* we did not manage to find a place to insert it in an existing 399
358 * block so create a new rbnode with a single register in its block. 400 /* We did not manage to find a place to insert it in
359 * This block will get populated further if any other adjacent 401 * an existing block so create a new rbnode.
360 * registers get modified in the future.
361 */ 402 */
362 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); 403 rbnode = regcache_rbtree_node_alloc(map, reg);
363 if (!rbnode) 404 if (!rbnode)
364 return -ENOMEM; 405 return -ENOMEM;
365 rbnode->blklen = sizeof(*rbnode); 406 regcache_rbtree_set_register(map, rbnode,
366 rbnode->base_reg = reg; 407 reg - rbnode->base_reg, value);
367 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
368 GFP_KERNEL);
369 if (!rbnode->block) {
370 kfree(rbnode);
371 return -ENOMEM;
372 }
373 regcache_rbtree_set_register(map, rbnode, 0, value);
374 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); 408 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
375 rbtree_ctx->cached_rbnode = rbnode; 409 rbtree_ctx->cached_rbnode = rbnode;
376 } 410 }
@@ -391,8 +425,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 425 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
392 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 426 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
393 427
394 if (rbnode->base_reg < min)
395 continue;
396 if (rbnode->base_reg > max) 428 if (rbnode->base_reg > max)
397 break; 429 break;
398 if (rbnode->base_reg + rbnode->blklen < min) 430 if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396bd..e69102696533 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -250,6 +250,38 @@ int regcache_write(struct regmap *map,
250 return 0; 250 return 0;
251} 251}
252 252
253static int regcache_default_sync(struct regmap *map, unsigned int min,
254 unsigned int max)
255{
256 unsigned int reg;
257
258 for (reg = min; reg <= max; reg++) {
259 unsigned int val;
260 int ret;
261
262 if (regmap_volatile(map, reg))
263 continue;
264
265 ret = regcache_read(map, reg, &val);
266 if (ret)
267 return ret;
268
269 /* Is this the hardware default? If so skip. */
270 ret = regcache_lookup_reg(map, reg);
271 if (ret >= 0 && val == map->reg_defaults[ret].def)
272 continue;
273
274 map->cache_bypass = 1;
275 ret = _regmap_write(map, reg, val);
276 map->cache_bypass = 0;
277 if (ret)
278 return ret;
279 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
280 }
281
282 return 0;
283}
284
253/** 285/**
254 * regcache_sync: Sync the register cache with the hardware. 286 * regcache_sync: Sync the register cache with the hardware.
255 * 287 *
@@ -268,9 +300,9 @@ int regcache_sync(struct regmap *map)
268 const char *name; 300 const char *name;
269 unsigned int bypass; 301 unsigned int bypass;
270 302
271 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 303 BUG_ON(!map->cache_ops);
272 304
273 map->lock(map); 305 map->lock(map->lock_arg);
274 /* Remember the initial bypass state */ 306 /* Remember the initial bypass state */
275 bypass = map->cache_bypass; 307 bypass = map->cache_bypass;
276 dev_dbg(map->dev, "Syncing %s cache\n", 308 dev_dbg(map->dev, "Syncing %s cache\n",
@@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map)
297 } 329 }
298 map->cache_bypass = 0; 330 map->cache_bypass = 0;
299 331
300 ret = map->cache_ops->sync(map, 0, map->max_register); 332 if (map->cache_ops->sync)
333 ret = map->cache_ops->sync(map, 0, map->max_register);
334 else
335 ret = regcache_default_sync(map, 0, map->max_register);
301 336
302 if (ret == 0) 337 if (ret == 0)
303 map->cache_dirty = false; 338 map->cache_dirty = false;
@@ -306,7 +341,7 @@ out:
306 trace_regcache_sync(map->dev, name, "stop"); 341 trace_regcache_sync(map->dev, name, "stop");
307 /* Restore the bypass state */ 342 /* Restore the bypass state */
308 map->cache_bypass = bypass; 343 map->cache_bypass = bypass;
309 map->unlock(map); 344 map->unlock(map->lock_arg);
310 345
311 return ret; 346 return ret;
312} 347}
@@ -331,9 +366,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
331 const char *name; 366 const char *name;
332 unsigned int bypass; 367 unsigned int bypass;
333 368
334 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 369 BUG_ON(!map->cache_ops);
335 370
336 map->lock(map); 371 map->lock(map->lock_arg);
337 372
338 /* Remember the initial bypass state */ 373 /* Remember the initial bypass state */
339 bypass = map->cache_bypass; 374 bypass = map->cache_bypass;
@@ -346,19 +381,59 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
346 if (!map->cache_dirty) 381 if (!map->cache_dirty)
347 goto out; 382 goto out;
348 383
349 ret = map->cache_ops->sync(map, min, max); 384 if (map->cache_ops->sync)
385 ret = map->cache_ops->sync(map, min, max);
386 else
387 ret = regcache_default_sync(map, min, max);
350 388
351out: 389out:
352 trace_regcache_sync(map->dev, name, "stop region"); 390 trace_regcache_sync(map->dev, name, "stop region");
353 /* Restore the bypass state */ 391 /* Restore the bypass state */
354 map->cache_bypass = bypass; 392 map->cache_bypass = bypass;
355 map->unlock(map); 393 map->unlock(map->lock_arg);
356 394
357 return ret; 395 return ret;
358} 396}
359EXPORT_SYMBOL_GPL(regcache_sync_region); 397EXPORT_SYMBOL_GPL(regcache_sync_region);
360 398
361/** 399/**
400 * regcache_drop_region: Discard part of the register cache
401 *
402 * @map: map to operate on
403 * @min: first register to discard
404 * @max: last register to discard
405 *
406 * Discard part of the register cache.
407 *
408 * Return a negative value on failure, 0 on success.
409 */
410int regcache_drop_region(struct regmap *map, unsigned int min,
411 unsigned int max)
412{
413 unsigned int reg;
414 int ret = 0;
415
416 if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop))
417 return -EINVAL;
418
419 map->lock(map->lock_arg);
420
421 trace_regcache_drop_region(map->dev, min, max);
422
423 if (map->cache_present)
424 for (reg = min; reg < max + 1; reg++)
425 clear_bit(reg, map->cache_present);
426
427 if (map->cache_ops && map->cache_ops->drop)
428 ret = map->cache_ops->drop(map, min, max);
429
430 map->unlock(map->lock_arg);
431
432 return ret;
433}
434EXPORT_SYMBOL_GPL(regcache_drop_region);
435
436/**
362 * regcache_cache_only: Put a register map into cache only mode 437 * regcache_cache_only: Put a register map into cache only mode
363 * 438 *
364 * @map: map to configure 439 * @map: map to configure
@@ -372,11 +447,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
372 */ 447 */
373void regcache_cache_only(struct regmap *map, bool enable) 448void regcache_cache_only(struct regmap *map, bool enable)
374{ 449{
375 map->lock(map); 450 map->lock(map->lock_arg);
376 WARN_ON(map->cache_bypass && enable); 451 WARN_ON(map->cache_bypass && enable);
377 map->cache_only = enable; 452 map->cache_only = enable;
378 trace_regmap_cache_only(map->dev, enable); 453 trace_regmap_cache_only(map->dev, enable);
379 map->unlock(map); 454 map->unlock(map->lock_arg);
380} 455}
381EXPORT_SYMBOL_GPL(regcache_cache_only); 456EXPORT_SYMBOL_GPL(regcache_cache_only);
382 457
@@ -391,9 +466,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
391 */ 466 */
392void regcache_mark_dirty(struct regmap *map) 467void regcache_mark_dirty(struct regmap *map)
393{ 468{
394 map->lock(map); 469 map->lock(map->lock_arg);
395 map->cache_dirty = true; 470 map->cache_dirty = true;
396 map->unlock(map); 471 map->unlock(map->lock_arg);
397} 472}
398EXPORT_SYMBOL_GPL(regcache_mark_dirty); 473EXPORT_SYMBOL_GPL(regcache_mark_dirty);
399 474
@@ -410,11 +485,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
410 */ 485 */
411void regcache_cache_bypass(struct regmap *map, bool enable) 486void regcache_cache_bypass(struct regmap *map, bool enable)
412{ 487{
413 map->lock(map); 488 map->lock(map->lock_arg);
414 WARN_ON(map->cache_only && enable); 489 WARN_ON(map->cache_only && enable);
415 map->cache_bypass = enable; 490 map->cache_bypass = enable;
416 trace_regmap_cache_bypass(map->dev, enable); 491 trace_regmap_cache_bypass(map->dev, enable);
417 map->unlock(map); 492 map->unlock(map->lock_arg);
418} 493}
419EXPORT_SYMBOL_GPL(regcache_cache_bypass); 494EXPORT_SYMBOL_GPL(regcache_cache_bypass);
420 495
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2f..53495753fbdb 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
84 unsigned int fpos_offset; 84 unsigned int fpos_offset;
85 unsigned int reg_offset; 85 unsigned int reg_offset;
86 86
87 /* Suppress the cache if we're using a subrange */
88 if (from)
89 return from;
90
87 /* 91 /*
88 * If we don't have a cache build one so we don't have to do a 92 * If we don't have a cache build one so we don't have to do a
89 * linear scan each time. 93 * linear scan each time.
@@ -145,7 +149,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
145 reg_offset = fpos_offset / map->debugfs_tot_len; 149 reg_offset = fpos_offset / map->debugfs_tot_len;
146 *pos = c->min + (reg_offset * map->debugfs_tot_len); 150 *pos = c->min + (reg_offset * map->debugfs_tot_len);
147 mutex_unlock(&map->cache_lock); 151 mutex_unlock(&map->cache_lock);
148 return c->base_reg + reg_offset; 152 return c->base_reg + (reg_offset * map->reg_stride);
149 } 153 }
150 154
151 *pos = c->max; 155 *pos = c->max;
@@ -265,6 +269,7 @@ static ssize_t regmap_map_write_file(struct file *file,
265 char *start = buf; 269 char *start = buf;
266 unsigned long reg, value; 270 unsigned long reg, value;
267 struct regmap *map = file->private_data; 271 struct regmap *map = file->private_data;
272 int ret;
268 273
269 buf_size = min(count, (sizeof(buf)-1)); 274 buf_size = min(count, (sizeof(buf)-1));
270 if (copy_from_user(buf, user_buf, buf_size)) 275 if (copy_from_user(buf, user_buf, buf_size))
@@ -280,9 +285,11 @@ static ssize_t regmap_map_write_file(struct file *file,
280 return -EINVAL; 285 return -EINVAL;
281 286
282 /* Userspace has been fiddling around behind the kernel's back */ 287 /* Userspace has been fiddling around behind the kernel's back */
283 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); 288 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
284 289
285 regmap_write(map, reg, value); 290 ret = regmap_write(map, reg, value);
291 if (ret < 0)
292 return ret;
286 return buf_size; 293 return buf_size;
287} 294}
288#else 295#else
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index fef6f13b96bb..95920583e31e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -65,9 +65,8 @@ bool regmap_reg_in_ranges(unsigned int reg,
65} 65}
66EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 66EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
67 67
68static bool _regmap_check_range_table(struct regmap *map, 68bool regmap_check_range_table(struct regmap *map, unsigned int reg,
69 unsigned int reg, 69 const struct regmap_access_table *table)
70 const struct regmap_access_table *table)
71{ 70{
72 /* Check "no ranges" first */ 71 /* Check "no ranges" first */
73 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 72 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
@@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map,
80 return regmap_reg_in_ranges(reg, table->yes_ranges, 79 return regmap_reg_in_ranges(reg, table->yes_ranges,
81 table->n_yes_ranges); 80 table->n_yes_ranges);
82} 81}
82EXPORT_SYMBOL_GPL(regmap_check_range_table);
83 83
84bool regmap_writeable(struct regmap *map, unsigned int reg) 84bool regmap_writeable(struct regmap *map, unsigned int reg)
85{ 85{
@@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
90 return map->writeable_reg(map->dev, reg); 90 return map->writeable_reg(map->dev, reg);
91 91
92 if (map->wr_table) 92 if (map->wr_table)
93 return _regmap_check_range_table(map, reg, map->wr_table); 93 return regmap_check_range_table(map, reg, map->wr_table);
94 94
95 return true; 95 return true;
96} 96}
@@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
107 return map->readable_reg(map->dev, reg); 107 return map->readable_reg(map->dev, reg);
108 108
109 if (map->rd_table) 109 if (map->rd_table)
110 return _regmap_check_range_table(map, reg, map->rd_table); 110 return regmap_check_range_table(map, reg, map->rd_table);
111 111
112 return true; 112 return true;
113} 113}
@@ -121,9 +121,12 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
121 return map->volatile_reg(map->dev, reg); 121 return map->volatile_reg(map->dev, reg);
122 122
123 if (map->volatile_table) 123 if (map->volatile_table)
124 return _regmap_check_range_table(map, reg, map->volatile_table); 124 return regmap_check_range_table(map, reg, map->volatile_table);
125 125
126 return true; 126 if (map->cache_ops)
127 return false;
128 else
129 return true;
127} 130}
128 131
129bool regmap_precious(struct regmap *map, unsigned int reg) 132bool regmap_precious(struct regmap *map, unsigned int reg)
@@ -135,7 +138,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
135 return map->precious_reg(map->dev, reg); 138 return map->precious_reg(map->dev, reg);
136 139
137 if (map->precious_table) 140 if (map->precious_table)
138 return _regmap_check_range_table(map, reg, map->precious_table); 141 return regmap_check_range_table(map, reg, map->precious_table);
139 142
140 return false; 143 return false;
141} 144}
@@ -302,13 +305,16 @@ static void regmap_unlock_mutex(void *__map)
302static void regmap_lock_spinlock(void *__map) 305static void regmap_lock_spinlock(void *__map)
303{ 306{
304 struct regmap *map = __map; 307 struct regmap *map = __map;
305 spin_lock(&map->spinlock); 308 unsigned long flags;
309
310 spin_lock_irqsave(&map->spinlock, flags);
311 map->spinlock_flags = flags;
306} 312}
307 313
308static void regmap_unlock_spinlock(void *__map) 314static void regmap_unlock_spinlock(void *__map)
309{ 315{
310 struct regmap *map = __map; 316 struct regmap *map = __map;
311 spin_unlock(&map->spinlock); 317 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
312} 318}
313 319
314static void dev_get_regmap_release(struct device *dev, void *res) 320static void dev_get_regmap_release(struct device *dev, void *res)