aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-10-28 16:01:32 -0400
committerMark Brown <broonie@linaro.org>2013-10-28 16:01:32 -0400
commit5eff79fe2d239a60c98ec709c769f8e8f952b42f (patch)
tree7bcd2f93edc84b449b3be8af0c8de9e93b9a13b9 /drivers/base
parent70c1c86d56522fd1dffb9c17df4cbec3c2e1ac60 (diff)
parent04c50ccf0dab02923ef888a4839bfbd00de03181 (diff)
Merge remote-tracking branch 'regmap/topic/async' into regmap-next
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regcache.c19
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap.c217
4 files changed, 194 insertions, 50 deletions
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 57f777835d97..6873b4ce03f9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -44,7 +44,6 @@ struct regmap_format {
44 44
45struct regmap_async { 45struct regmap_async {
46 struct list_head list; 46 struct list_head list;
47 struct work_struct cleanup;
48 struct regmap *map; 47 struct regmap *map;
49 void *work_buf; 48 void *work_buf;
50}; 49};
@@ -64,9 +63,11 @@ struct regmap {
64 void *bus_context; 63 void *bus_context;
65 const char *name; 64 const char *name;
66 65
66 bool async;
67 spinlock_t async_lock; 67 spinlock_t async_lock;
68 wait_queue_head_t async_waitq; 68 wait_queue_head_t async_waitq;
69 struct list_head async_list; 69 struct list_head async_list;
70 struct list_head async_free;
70 int async_ret; 71 int async_ret;
71 72
72#ifdef CONFIG_DEBUG_FS 73#ifdef CONFIG_DEBUG_FS
@@ -218,7 +219,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
218int regcache_lookup_reg(struct regmap *map, unsigned int reg); 219int regcache_lookup_reg(struct regmap *map, unsigned int reg);
219 220
220int _regmap_raw_write(struct regmap *map, unsigned int reg, 221int _regmap_raw_write(struct regmap *map, unsigned int reg,
221 const void *val, size_t val_len, bool async); 222 const void *val, size_t val_len);
222 223
223void regmap_async_complete_cb(struct regmap_async *async, int ret); 224void regmap_async_complete_cb(struct regmap_async *async, int ret);
224 225
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d6c2d691b6e8..d4dd77134814 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -307,6 +307,8 @@ int regcache_sync(struct regmap *map)
307 if (!map->cache_dirty) 307 if (!map->cache_dirty)
308 goto out; 308 goto out;
309 309
310 map->async = true;
311
310 /* Apply any patch first */ 312 /* Apply any patch first */
311 map->cache_bypass = 1; 313 map->cache_bypass = 1;
312 for (i = 0; i < map->patch_regs; i++) { 314 for (i = 0; i < map->patch_regs; i++) {
@@ -332,11 +334,15 @@ int regcache_sync(struct regmap *map)
332 map->cache_dirty = false; 334 map->cache_dirty = false;
333 335
334out: 336out:
335 trace_regcache_sync(map->dev, name, "stop");
336 /* Restore the bypass state */ 337 /* Restore the bypass state */
338 map->async = false;
337 map->cache_bypass = bypass; 339 map->cache_bypass = bypass;
338 map->unlock(map->lock_arg); 340 map->unlock(map->lock_arg);
339 341
342 regmap_async_complete(map);
343
344 trace_regcache_sync(map->dev, name, "stop");
345
340 return ret; 346 return ret;
341} 347}
342EXPORT_SYMBOL_GPL(regcache_sync); 348EXPORT_SYMBOL_GPL(regcache_sync);
@@ -375,17 +381,23 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
375 if (!map->cache_dirty) 381 if (!map->cache_dirty)
376 goto out; 382 goto out;
377 383
384 map->async = true;
385
378 if (map->cache_ops->sync) 386 if (map->cache_ops->sync)
379 ret = map->cache_ops->sync(map, min, max); 387 ret = map->cache_ops->sync(map, min, max);
380 else 388 else
381 ret = regcache_default_sync(map, min, max); 389 ret = regcache_default_sync(map, min, max);
382 390
383out: 391out:
384 trace_regcache_sync(map->dev, name, "stop region");
385 /* Restore the bypass state */ 392 /* Restore the bypass state */
386 map->cache_bypass = bypass; 393 map->cache_bypass = bypass;
394 map->async = false;
387 map->unlock(map->lock_arg); 395 map->unlock(map->lock_arg);
388 396
397 regmap_async_complete(map);
398
399 trace_regcache_sync(map->dev, name, "stop region");
400
389 return ret; 401 return ret;
390} 402}
391EXPORT_SYMBOL_GPL(regcache_sync_region); 403EXPORT_SYMBOL_GPL(regcache_sync_region);
@@ -631,8 +643,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
631 643
632 map->cache_bypass = 1; 644 map->cache_bypass = 1;
633 645
634 ret = _regmap_raw_write(map, base, *data, count * val_bytes, 646 ret = _regmap_raw_write(map, base, *data, count * val_bytes);
635 false);
636 647
637 map->cache_bypass = 0; 648 map->cache_bypass = 0;
638 649
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4c506bd940f3..37f12ae7aada 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -73,7 +73,8 @@ static int regmap_spi_async_write(void *context,
73 73
74 spi_message_init(&async->m); 74 spi_message_init(&async->m);
75 spi_message_add_tail(&async->t[0], &async->m); 75 spi_message_add_tail(&async->t[0], &async->m);
76 spi_message_add_tail(&async->t[1], &async->m); 76 if (val)
77 spi_message_add_tail(&async->t[1], &async->m);
77 78
78 async->m.complete = regmap_spi_complete; 79 async->m.complete = regmap_spi_complete;
79 async->m.context = async; 80 async->m.context = async;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c1245cae0f43..d0ce2fef43a3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
42static int _regmap_bus_raw_write(void *context, unsigned int reg, 42static int _regmap_bus_raw_write(void *context, unsigned int reg,
43 unsigned int val); 43 unsigned int val);
44 44
45static void async_cleanup(struct work_struct *work)
46{
47 struct regmap_async *async = container_of(work, struct regmap_async,
48 cleanup);
49
50 kfree(async->work_buf);
51 kfree(async);
52}
53
54bool regmap_reg_in_ranges(unsigned int reg, 45bool regmap_reg_in_ranges(unsigned int reg,
55 const struct regmap_range *ranges, 46 const struct regmap_range *ranges,
56 unsigned int nranges) 47 unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
465 456
466 spin_lock_init(&map->async_lock); 457 spin_lock_init(&map->async_lock);
467 INIT_LIST_HEAD(&map->async_list); 458 INIT_LIST_HEAD(&map->async_list);
459 INIT_LIST_HEAD(&map->async_free);
468 init_waitqueue_head(&map->async_waitq); 460 init_waitqueue_head(&map->async_waitq);
469 461
470 if (config->read_flag_mask || config->write_flag_mask) { 462 if (config->read_flag_mask || config->write_flag_mask) {
@@ -942,12 +934,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
942 */ 934 */
943void regmap_exit(struct regmap *map) 935void regmap_exit(struct regmap *map)
944{ 936{
937 struct regmap_async *async;
938
945 regcache_exit(map); 939 regcache_exit(map);
946 regmap_debugfs_exit(map); 940 regmap_debugfs_exit(map);
947 regmap_range_exit(map); 941 regmap_range_exit(map);
948 if (map->bus && map->bus->free_context) 942 if (map->bus && map->bus->free_context)
949 map->bus->free_context(map->bus_context); 943 map->bus->free_context(map->bus_context);
950 kfree(map->work_buf); 944 kfree(map->work_buf);
945 while (!list_empty(&map->async_free)) {
946 async = list_first_entry_or_null(&map->async_free,
947 struct regmap_async,
948 list);
949 list_del(&async->list);
950 kfree(async->work_buf);
951 kfree(async);
952 }
951 kfree(map); 953 kfree(map);
952} 954}
953EXPORT_SYMBOL_GPL(regmap_exit); 955EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1039,7 +1041,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1039} 1041}
1040 1042
1041int _regmap_raw_write(struct regmap *map, unsigned int reg, 1043int _regmap_raw_write(struct regmap *map, unsigned int reg,
1042 const void *val, size_t val_len, bool async) 1044 const void *val, size_t val_len)
1043{ 1045{
1044 struct regmap_range_node *range; 1046 struct regmap_range_node *range;
1045 unsigned long flags; 1047 unsigned long flags;
@@ -1091,7 +1093,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1091 dev_dbg(map->dev, "Writing window %d/%zu\n", 1093 dev_dbg(map->dev, "Writing window %d/%zu\n",
1092 win_residue, val_len / map->format.val_bytes); 1094 win_residue, val_len / map->format.val_bytes);
1093 ret = _regmap_raw_write(map, reg, val, win_residue * 1095 ret = _regmap_raw_write(map, reg, val, win_residue *
1094 map->format.val_bytes, async); 1096 map->format.val_bytes);
1095 if (ret != 0) 1097 if (ret != 0)
1096 return ret; 1098 return ret;
1097 1099
@@ -1114,49 +1116,72 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1114 1116
1115 u8[0] |= map->write_flag_mask; 1117 u8[0] |= map->write_flag_mask;
1116 1118
1117 if (async && map->bus->async_write) { 1119 /*
1118 struct regmap_async *async = map->bus->async_alloc(); 1120 * Essentially all I/O mechanisms will be faster with a single
1119 if (!async) 1121 * buffer to write. Since register syncs often generate raw
1120 return -ENOMEM; 1122 * writes of single registers optimise that case.
1123 */
1124 if (val != work_val && val_len == map->format.val_bytes) {
1125 memcpy(work_val, val, map->format.val_bytes);
1126 val = work_val;
1127 }
1128
1129 if (map->async && map->bus->async_write) {
1130 struct regmap_async *async;
1121 1131
1122 trace_regmap_async_write_start(map->dev, reg, val_len); 1132 trace_regmap_async_write_start(map->dev, reg, val_len);
1123 1133
1124 async->work_buf = kzalloc(map->format.buf_size, 1134 spin_lock_irqsave(&map->async_lock, flags);
1125 GFP_KERNEL | GFP_DMA); 1135 async = list_first_entry_or_null(&map->async_free,
1126 if (!async->work_buf) { 1136 struct regmap_async,
1127 kfree(async); 1137 list);
1128 return -ENOMEM; 1138 if (async)
1139 list_del(&async->list);
1140 spin_unlock_irqrestore(&map->async_lock, flags);
1141
1142 if (!async) {
1143 async = map->bus->async_alloc();
1144 if (!async)
1145 return -ENOMEM;
1146
1147 async->work_buf = kzalloc(map->format.buf_size,
1148 GFP_KERNEL | GFP_DMA);
1149 if (!async->work_buf) {
1150 kfree(async);
1151 return -ENOMEM;
1152 }
1129 } 1153 }
1130 1154
1131 INIT_WORK(&async->cleanup, async_cleanup);
1132 async->map = map; 1155 async->map = map;
1133 1156
1134 /* If the caller supplied the value we can use it safely. */ 1157 /* If the caller supplied the value we can use it safely. */
1135 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1158 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1136 map->format.reg_bytes + map->format.val_bytes); 1159 map->format.reg_bytes + map->format.val_bytes);
1137 if (val == work_val)
1138 val = async->work_buf + map->format.pad_bytes +
1139 map->format.reg_bytes;
1140 1160
1141 spin_lock_irqsave(&map->async_lock, flags); 1161 spin_lock_irqsave(&map->async_lock, flags);
1142 list_add_tail(&async->list, &map->async_list); 1162 list_add_tail(&async->list, &map->async_list);
1143 spin_unlock_irqrestore(&map->async_lock, flags); 1163 spin_unlock_irqrestore(&map->async_lock, flags);
1144 1164
1145 ret = map->bus->async_write(map->bus_context, async->work_buf, 1165 if (val != work_val)
1146 map->format.reg_bytes + 1166 ret = map->bus->async_write(map->bus_context,
1147 map->format.pad_bytes, 1167 async->work_buf,
1148 val, val_len, async); 1168 map->format.reg_bytes +
1169 map->format.pad_bytes,
1170 val, val_len, async);
1171 else
1172 ret = map->bus->async_write(map->bus_context,
1173 async->work_buf,
1174 map->format.reg_bytes +
1175 map->format.pad_bytes +
1176 val_len, NULL, 0, async);
1149 1177
1150 if (ret != 0) { 1178 if (ret != 0) {
1151 dev_err(map->dev, "Failed to schedule write: %d\n", 1179 dev_err(map->dev, "Failed to schedule write: %d\n",
1152 ret); 1180 ret);
1153 1181
1154 spin_lock_irqsave(&map->async_lock, flags); 1182 spin_lock_irqsave(&map->async_lock, flags);
1155 list_del(&async->list); 1183 list_move(&async->list, &map->async_free);
1156 spin_unlock_irqrestore(&map->async_lock, flags); 1184 spin_unlock_irqrestore(&map->async_lock, flags);
1157
1158 kfree(async->work_buf);
1159 kfree(async);
1160 } 1185 }
1161 1186
1162 return ret; 1187 return ret;
@@ -1253,7 +1278,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
1253 map->work_buf + 1278 map->work_buf +
1254 map->format.reg_bytes + 1279 map->format.reg_bytes +
1255 map->format.pad_bytes, 1280 map->format.pad_bytes,
1256 map->format.val_bytes, false); 1281 map->format.val_bytes);
1257} 1282}
1258 1283
1259static inline void *_regmap_map_get_context(struct regmap *map) 1284static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1318,6 +1343,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1318EXPORT_SYMBOL_GPL(regmap_write); 1343EXPORT_SYMBOL_GPL(regmap_write);
1319 1344
1320/** 1345/**
1346 * regmap_write_async(): Write a value to a single register asynchronously
1347 *
1348 * @map: Register map to write to
1349 * @reg: Register to write to
1350 * @val: Value to be written
1351 *
1352 * A value of zero will be returned on success, a negative errno will
1353 * be returned in error cases.
1354 */
1355int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1356{
1357 int ret;
1358
1359 if (reg % map->reg_stride)
1360 return -EINVAL;
1361
1362 map->lock(map->lock_arg);
1363
1364 map->async = true;
1365
1366 ret = _regmap_write(map, reg, val);
1367
1368 map->async = false;
1369
1370 map->unlock(map->lock_arg);
1371
1372 return ret;
1373}
1374EXPORT_SYMBOL_GPL(regmap_write_async);
1375
1376/**
1321 * regmap_raw_write(): Write raw values to one or more registers 1377 * regmap_raw_write(): Write raw values to one or more registers
1322 * 1378 *
1323 * @map: Register map to write to 1379 * @map: Register map to write to
@@ -1345,7 +1401,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1345 1401
1346 map->lock(map->lock_arg); 1402 map->lock(map->lock_arg);
1347 1403
1348 ret = _regmap_raw_write(map, reg, val, val_len, false); 1404 ret = _regmap_raw_write(map, reg, val, val_len);
1349 1405
1350 map->unlock(map->lock_arg); 1406 map->unlock(map->lock_arg);
1351 1407
@@ -1421,14 +1477,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1421 ret = _regmap_raw_write(map, 1477 ret = _regmap_raw_write(map,
1422 reg + (i * map->reg_stride), 1478 reg + (i * map->reg_stride),
1423 val + (i * val_bytes), 1479 val + (i * val_bytes),
1424 val_bytes, 1480 val_bytes);
1425 false);
1426 if (ret != 0) 1481 if (ret != 0)
1427 return ret; 1482 return ret;
1428 } 1483 }
1429 } else { 1484 } else {
1430 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count, 1485 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1431 false);
1432 } 1486 }
1433 1487
1434 if (val_bytes != 1) 1488 if (val_bytes != 1)
@@ -1474,7 +1528,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1474 1528
1475 map->lock(map->lock_arg); 1529 map->lock(map->lock_arg);
1476 1530
1477 ret = _regmap_raw_write(map, reg, val, val_len, true); 1531 map->async = true;
1532
1533 ret = _regmap_raw_write(map, reg, val, val_len);
1534
1535 map->async = false;
1478 1536
1479 map->unlock(map->lock_arg); 1537 map->unlock(map->lock_arg);
1480 1538
@@ -1789,6 +1847,41 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
1789EXPORT_SYMBOL_GPL(regmap_update_bits); 1847EXPORT_SYMBOL_GPL(regmap_update_bits);
1790 1848
1791/** 1849/**
1850 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
1851 * map asynchronously
1852 *
1853 * @map: Register map to update
1854 * @reg: Register to update
1855 * @mask: Bitmask to change
1856 * @val: New value for bitmask
1857 *
1858 * With most buses the read must be done synchronously so this is most
1859 * useful for devices with a cache which do not need to interact with
1860 * the hardware to determine the current register value.
1861 *
1862 * Returns zero for success, a negative number on error.
1863 */
1864int regmap_update_bits_async(struct regmap *map, unsigned int reg,
1865 unsigned int mask, unsigned int val)
1866{
1867 bool change;
1868 int ret;
1869
1870 map->lock(map->lock_arg);
1871
1872 map->async = true;
1873
1874 ret = _regmap_update_bits(map, reg, mask, val, &change);
1875
1876 map->async = false;
1877
1878 map->unlock(map->lock_arg);
1879
1880 return ret;
1881}
1882EXPORT_SYMBOL_GPL(regmap_update_bits_async);
1883
1884/**
1792 * regmap_update_bits_check: Perform a read/modify/write cycle on the 1885 * regmap_update_bits_check: Perform a read/modify/write cycle on the
1793 * register map and report if updated 1886 * register map and report if updated
1794 * 1887 *
@@ -1813,6 +1906,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1813} 1906}
1814EXPORT_SYMBOL_GPL(regmap_update_bits_check); 1907EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1815 1908
1909/**
1910 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
1911 * register map asynchronously and report if
1912 * updated
1913 *
1914 * @map: Register map to update
1915 * @reg: Register to update
1916 * @mask: Bitmask to change
1917 * @val: New value for bitmask
1918 * @change: Boolean indicating if a write was done
1919 *
1920 * With most buses the read must be done synchronously so this is most
1921 * useful for devices with a cache which do not need to interact with
1922 * the hardware to determine the current register value.
1923 *
1924 * Returns zero for success, a negative number on error.
1925 */
1926int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
1927 unsigned int mask, unsigned int val,
1928 bool *change)
1929{
1930 int ret;
1931
1932 map->lock(map->lock_arg);
1933
1934 map->async = true;
1935
1936 ret = _regmap_update_bits(map, reg, mask, val, change);
1937
1938 map->async = false;
1939
1940 map->unlock(map->lock_arg);
1941
1942 return ret;
1943}
1944EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
1945
1816void regmap_async_complete_cb(struct regmap_async *async, int ret) 1946void regmap_async_complete_cb(struct regmap_async *async, int ret)
1817{ 1947{
1818 struct regmap *map = async->map; 1948 struct regmap *map = async->map;
@@ -1821,8 +1951,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
1821 trace_regmap_async_io_complete(map->dev); 1951 trace_regmap_async_io_complete(map->dev);
1822 1952
1823 spin_lock(&map->async_lock); 1953 spin_lock(&map->async_lock);
1824 1954 list_move(&async->list, &map->async_free);
1825 list_del(&async->list);
1826 wake = list_empty(&map->async_list); 1955 wake = list_empty(&map->async_list);
1827 1956
1828 if (ret != 0) 1957 if (ret != 0)
@@ -1830,8 +1959,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
1830 1959
1831 spin_unlock(&map->async_lock); 1960 spin_unlock(&map->async_lock);
1832 1961
1833 schedule_work(&async->cleanup);
1834
1835 if (wake) 1962 if (wake)
1836 wake_up(&map->async_waitq); 1963 wake_up(&map->async_waitq);
1837} 1964}
@@ -1907,6 +2034,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1907 bypass = map->cache_bypass; 2034 bypass = map->cache_bypass;
1908 2035
1909 map->cache_bypass = true; 2036 map->cache_bypass = true;
2037 map->async = true;
1910 2038
1911 /* Write out first; it's useful to apply even if we fail later. */ 2039 /* Write out first; it's useful to apply even if we fail later. */
1912 for (i = 0; i < num_regs; i++) { 2040 for (i = 0; i < num_regs; i++) {
@@ -1930,10 +2058,13 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1930 } 2058 }
1931 2059
1932out: 2060out:
2061 map->async = false;
1933 map->cache_bypass = bypass; 2062 map->cache_bypass = bypass;
1934 2063
1935 map->unlock(map->lock_arg); 2064 map->unlock(map->lock_arg);
1936 2065
2066 regmap_async_complete(map);
2067
1937 return ret; 2068 return ret;
1938} 2069}
1939EXPORT_SYMBOL_GPL(regmap_register_patch); 2070EXPORT_SYMBOL_GPL(regmap_register_patch);