aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCharles Keepax <ckeepax@opensource.cirrus.com>2018-02-22 07:59:12 -0500
committerMark Brown <broonie@kernel.org>2018-02-26 06:00:32 -0500
commit7ef2c6b8689a084954cffbd102ee49c2fb72cbd4 (patch)
treee1dcaa8f9884d94bb7ee3fdadc5185883fb9dbfb
parentb4ecfec5ee3f282a4ac0876de332876fec9b488c (diff)
regmap: Move the handling for max_raw_write into regmap_raw_write
Currently regmap_bulk_write will split a write into chunks before calling regmap_raw_write if max_raw_write is set. It is more logical for this handling to be inside regmap_raw_write itself, as this removes the need to keep re-implementing the chunking code, which would be the same for all users of regmap_raw_write. Signed-off-by: Charles Keepax <ckeepax@opensource.cirrus.com> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/base/regmap/regmap.c117
1 files changed, 54 insertions, 63 deletions
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 707b0450ad72..e82ea77849fb 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1438,8 +1438,8 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1438 buf[i] |= (mask >> (8 * i)) & 0xff; 1438 buf[i] |= (mask >> (8 * i)) & 0xff;
1439} 1439}
1440 1440
1441int _regmap_raw_write(struct regmap *map, unsigned int reg, 1441static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1442 const void *val, size_t val_len) 1442 const void *val, size_t val_len)
1443{ 1443{
1444 struct regmap_range_node *range; 1444 struct regmap_range_node *range;
1445 unsigned long flags; 1445 unsigned long flags;
@@ -1490,8 +1490,9 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1490 while (val_num > win_residue) { 1490 while (val_num > win_residue) {
1491 dev_dbg(map->dev, "Writing window %d/%zu\n", 1491 dev_dbg(map->dev, "Writing window %d/%zu\n",
1492 win_residue, val_len / map->format.val_bytes); 1492 win_residue, val_len / map->format.val_bytes);
1493 ret = _regmap_raw_write(map, reg, val, win_residue * 1493 ret = _regmap_raw_write_impl(map, reg, val,
1494 map->format.val_bytes); 1494 win_residue *
1495 map->format.val_bytes);
1495 if (ret != 0) 1496 if (ret != 0)
1496 return ret; 1497 return ret;
1497 1498
@@ -1707,11 +1708,11 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
1707 1708
1708 map->format.format_val(map->work_buf + map->format.reg_bytes 1709 map->format.format_val(map->work_buf + map->format.reg_bytes
1709 + map->format.pad_bytes, val, 0); 1710 + map->format.pad_bytes, val, 0);
1710 return _regmap_raw_write(map, reg, 1711 return _regmap_raw_write_impl(map, reg,
1711 map->work_buf + 1712 map->work_buf +
1712 map->format.reg_bytes + 1713 map->format.reg_bytes +
1713 map->format.pad_bytes, 1714 map->format.pad_bytes,
1714 map->format.val_bytes); 1715 map->format.val_bytes);
1715} 1716}
1716 1717
1717static inline void *_regmap_map_get_context(struct regmap *map) 1718static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1806,6 +1807,49 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1806} 1807}
1807EXPORT_SYMBOL_GPL(regmap_write_async); 1808EXPORT_SYMBOL_GPL(regmap_write_async);
1808 1809
1810int _regmap_raw_write(struct regmap *map, unsigned int reg,
1811 const void *val, size_t val_len)
1812{
1813 size_t val_bytes = map->format.val_bytes;
1814 size_t val_count = val_len / val_bytes;
1815 int chunk_stride = map->reg_stride;
1816 size_t chunk_size = val_bytes;
1817 size_t chunk_count = val_count;
1818 int ret, i;
1819
1820 if (!val_count)
1821 return -EINVAL;
1822
1823 if (!map->use_single_write) {
1824 if (map->max_raw_write)
1825 chunk_size = map->max_raw_write;
1826 else
1827 chunk_size = val_len;
1828 if (chunk_size % val_bytes)
1829 chunk_size -= chunk_size % val_bytes;
1830 chunk_count = val_len / chunk_size;
1831 chunk_stride *= chunk_size / val_bytes;
1832 }
1833
1834 /* Write as many bytes as possible with chunk_size */
1835 for (i = 0; i < chunk_count; i++) {
1836 ret = _regmap_raw_write_impl(map,
1837 reg + (i * chunk_stride),
1838 val + (i * chunk_size),
1839 chunk_size);
1840 if (ret)
1841 return ret;
1842 }
1843
1844 /* Write remaining bytes */
1845 if (!ret && chunk_size * i < val_len)
1846 ret = _regmap_raw_write_impl(map, reg + (i * chunk_stride),
1847 val + (i * chunk_size),
1848 val_len - i * chunk_size);
1849
1850 return ret;
1851}
1852
1809/** 1853/**
1810 * regmap_raw_write() - Write raw values to one or more registers 1854 * regmap_raw_write() - Write raw values to one or more registers
1811 * 1855 *
@@ -1831,8 +1875,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1831 return -EINVAL; 1875 return -EINVAL;
1832 if (val_len % map->format.val_bytes) 1876 if (val_len % map->format.val_bytes)
1833 return -EINVAL; 1877 return -EINVAL;
1834 if (map->max_raw_write && map->max_raw_write < val_len)
1835 return -E2BIG;
1836 1878
1837 map->lock(map->lock_arg); 1879 map->lock(map->lock_arg);
1838 1880
@@ -1923,7 +1965,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1923{ 1965{
1924 int ret = 0, i; 1966 int ret = 0, i;
1925 size_t val_bytes = map->format.val_bytes; 1967 size_t val_bytes = map->format.val_bytes;
1926 size_t total_size = val_bytes * val_count;
1927 1968
1928 if (!IS_ALIGNED(reg, map->reg_stride)) 1969 if (!IS_ALIGNED(reg, map->reg_stride))
1929 return -EINVAL; 1970 return -EINVAL;
@@ -1998,57 +2039,9 @@ out:
1998 if (ret) 2039 if (ret)
1999 return ret; 2040 return ret;
2000 } 2041 }
2001 } else if (map->use_single_write ||
2002 (map->max_raw_write && map->max_raw_write < total_size)) {
2003 int chunk_stride = map->reg_stride;
2004 size_t chunk_size = val_bytes;
2005 size_t chunk_count = val_count;
2006 void *wval;
2007
2008 if (!val_count)
2009 return -EINVAL;
2010
2011 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2012 if (!wval)
2013 return -ENOMEM;
2014
2015 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2016 map->format.parse_inplace(wval + i);
2017
2018 if (!map->use_single_write) {
2019 chunk_size = map->max_raw_write;
2020 if (chunk_size % val_bytes)
2021 chunk_size -= chunk_size % val_bytes;
2022 chunk_count = total_size / chunk_size;
2023 chunk_stride *= chunk_size / val_bytes;
2024 }
2025
2026 map->lock(map->lock_arg);
2027 /* Write as many bytes as possible with chunk_size */
2028 for (i = 0; i < chunk_count; i++) {
2029 ret = _regmap_raw_write(map,
2030 reg + (i * chunk_stride),
2031 wval + (i * chunk_size),
2032 chunk_size);
2033 if (ret)
2034 break;
2035 }
2036
2037 /* Write remaining bytes */
2038 if (!ret && chunk_size * i < total_size) {
2039 ret = _regmap_raw_write(map, reg + (i * chunk_stride),
2040 wval + (i * chunk_size),
2041 total_size - i * chunk_size);
2042 }
2043 map->unlock(map->lock_arg);
2044
2045 kfree(wval);
2046 } else { 2042 } else {
2047 void *wval; 2043 void *wval;
2048 2044
2049 if (!val_count)
2050 return -EINVAL;
2051
2052 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2045 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2053 if (!wval) 2046 if (!wval)
2054 return -ENOMEM; 2047 return -ENOMEM;
@@ -2056,9 +2049,7 @@ out:
2056 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2049 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2057 map->format.parse_inplace(wval + i); 2050 map->format.parse_inplace(wval + i);
2058 2051
2059 map->lock(map->lock_arg); 2052 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2060 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
2061 map->unlock(map->lock_arg);
2062 2053
2063 kfree(wval); 2054 kfree(wval);
2064 } 2055 }