aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sram.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sram.c')
-rw-r--r--drivers/misc/sram.c137
1 files changed, 72 insertions, 65 deletions
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index eeaaf5fca105..15c33cc34a80 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -18,23 +18,20 @@
18 * MA 02110-1301, USA. 18 * MA 02110-1301, USA.
19 */ 19 */
20 20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/clk.h> 21#include <linux/clk.h>
24#include <linux/err.h> 22#include <linux/genalloc.h>
25#include <linux/io.h> 23#include <linux/io.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/list.h>
29#include <linux/list_sort.h> 24#include <linux/list_sort.h>
25#include <linux/of_address.h>
30#include <linux/platform_device.h> 26#include <linux/platform_device.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/genalloc.h>
34 28
35#define SRAM_GRANULARITY 32 29#define SRAM_GRANULARITY 32
36 30
37struct sram_dev { 31struct sram_dev {
32 struct device *dev;
33 void __iomem *virt_base;
34
38 struct gen_pool *pool; 35 struct gen_pool *pool;
39 struct clk *clk; 36 struct clk *clk;
40}; 37};
@@ -54,62 +51,27 @@ static int sram_reserve_cmp(void *priv, struct list_head *a,
54 return ra->start - rb->start; 51 return ra->start - rb->start;
55} 52}
56 53
57static int sram_probe(struct platform_device *pdev) 54static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
58{ 55{
59 void __iomem *virt_base; 56 struct device_node *np = sram->dev->of_node, *child;
60 struct sram_dev *sram;
61 struct resource *res;
62 struct device_node *np = pdev->dev.of_node, *child;
63 unsigned long size, cur_start, cur_size; 57 unsigned long size, cur_start, cur_size;
64 struct sram_reserve *rblocks, *block; 58 struct sram_reserve *rblocks, *block;
65 struct list_head reserve_list; 59 struct list_head reserve_list;
66 unsigned int nblocks; 60 unsigned int nblocks;
67 int ret; 61 int ret = 0;
68 62
69 INIT_LIST_HEAD(&reserve_list); 63 INIT_LIST_HEAD(&reserve_list);
70 64
71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
72 if (!res) {
73 dev_err(&pdev->dev, "found no memory resource\n");
74 return -EINVAL;
75 }
76
77 size = resource_size(res); 65 size = resource_size(res);
78 66
79 if (!devm_request_mem_region(&pdev->dev,
80 res->start, size, pdev->name)) {
81 dev_err(&pdev->dev, "could not request region for resource\n");
82 return -EBUSY;
83 }
84
85 virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
86 if (IS_ERR(virt_base))
87 return PTR_ERR(virt_base);
88
89 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
90 if (!sram)
91 return -ENOMEM;
92
93 sram->clk = devm_clk_get(&pdev->dev, NULL);
94 if (IS_ERR(sram->clk))
95 sram->clk = NULL;
96 else
97 clk_prepare_enable(sram->clk);
98
99 sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
100 if (!sram->pool)
101 return -ENOMEM;
102
103 /* 67 /*
104 * We need an additional block to mark the end of the memory region 68 * We need an additional block to mark the end of the memory region
105 * after the reserved blocks from the dt are processed. 69 * after the reserved blocks from the dt are processed.
106 */ 70 */
107 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; 71 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
108 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); 72 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
109 if (!rblocks) { 73 if (!rblocks)
110 ret = -ENOMEM; 74 return -ENOMEM;
111 goto err_alloc;
112 }
113 75
114 block = &rblocks[0]; 76 block = &rblocks[0];
115 for_each_available_child_of_node(np, child) { 77 for_each_available_child_of_node(np, child) {
@@ -117,17 +79,19 @@ static int sram_probe(struct platform_device *pdev)
117 79
118 ret = of_address_to_resource(child, 0, &child_res); 80 ret = of_address_to_resource(child, 0, &child_res);
119 if (ret < 0) { 81 if (ret < 0) {
120 dev_err(&pdev->dev, 82 dev_err(sram->dev,
121 "could not get address for node %s\n", 83 "could not get address for node %s\n",
122 child->full_name); 84 child->full_name);
85 of_node_put(child);
123 goto err_chunks; 86 goto err_chunks;
124 } 87 }
125 88
126 if (child_res.start < res->start || child_res.end > res->end) { 89 if (child_res.start < res->start || child_res.end > res->end) {
127 dev_err(&pdev->dev, 90 dev_err(sram->dev,
128 "reserved block %s outside the sram area\n", 91 "reserved block %s outside the sram area\n",
129 child->full_name); 92 child->full_name);
130 ret = -EINVAL; 93 ret = -EINVAL;
94 of_node_put(child);
131 goto err_chunks; 95 goto err_chunks;
132 } 96 }
133 97
@@ -135,9 +99,8 @@ static int sram_probe(struct platform_device *pdev)
135 block->size = resource_size(&child_res); 99 block->size = resource_size(&child_res);
136 list_add_tail(&block->list, &reserve_list); 100 list_add_tail(&block->list, &reserve_list);
137 101
138 dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", 102 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
139 block->start, 103 block->start, block->start + block->size);
140 block->start + block->size);
141 104
142 block++; 105 block++;
143 } 106 }
@@ -154,7 +117,7 @@ static int sram_probe(struct platform_device *pdev)
154 list_for_each_entry(block, &reserve_list, list) { 117 list_for_each_entry(block, &reserve_list, list) {
155 /* can only happen if sections overlap */ 118 /* can only happen if sections overlap */
156 if (block->start < cur_start) { 119 if (block->start < cur_start) {
157 dev_err(&pdev->dev, 120 dev_err(sram->dev,
158 "block at 0x%x starts after current offset 0x%lx\n", 121 "block at 0x%x starts after current offset 0x%lx\n",
159 block->start, cur_start); 122 block->start, cur_start);
160 ret = -EINVAL; 123 ret = -EINVAL;
@@ -174,10 +137,11 @@ static int sram_probe(struct platform_device *pdev)
174 */ 137 */
175 cur_size = block->start - cur_start; 138 cur_size = block->start - cur_start;
176 139
177 dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", 140 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
178 cur_start, cur_start + cur_size); 141 cur_start, cur_start + cur_size);
142
179 ret = gen_pool_add_virt(sram->pool, 143 ret = gen_pool_add_virt(sram->pool,
180 (unsigned long)virt_base + cur_start, 144 (unsigned long)sram->virt_base + cur_start,
181 res->start + cur_start, cur_size, -1); 145 res->start + cur_start, cur_size, -1);
182 if (ret < 0) 146 if (ret < 0)
183 goto err_chunks; 147 goto err_chunks;
@@ -186,20 +150,63 @@ static int sram_probe(struct platform_device *pdev)
186 cur_start = block->start + block->size; 150 cur_start = block->start + block->size;
187 } 151 }
188 152
153 err_chunks:
189 kfree(rblocks); 154 kfree(rblocks);
190 155
156 return ret;
157}
158
159static int sram_probe(struct platform_device *pdev)
160{
161 struct sram_dev *sram;
162 struct resource *res;
163 size_t size;
164 int ret;
165
166 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
167 if (!sram)
168 return -ENOMEM;
169
170 sram->dev = &pdev->dev;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!res) {
174 dev_err(sram->dev, "found no memory resource\n");
175 return -EINVAL;
176 }
177
178 size = resource_size(res);
179
180 if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
181 dev_err(sram->dev, "could not request region for resource\n");
182 return -EBUSY;
183 }
184
185 sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
186 if (IS_ERR(sram->virt_base))
187 return PTR_ERR(sram->virt_base);
188
189 sram->pool = devm_gen_pool_create(sram->dev,
190 ilog2(SRAM_GRANULARITY), -1);
191 if (!sram->pool)
192 return -ENOMEM;
193
194 ret = sram_reserve_regions(sram, res);
195 if (ret)
196 return ret;
197
198 sram->clk = devm_clk_get(sram->dev, NULL);
199 if (IS_ERR(sram->clk))
200 sram->clk = NULL;
201 else
202 clk_prepare_enable(sram->clk);
203
191 platform_set_drvdata(pdev, sram); 204 platform_set_drvdata(pdev, sram);
192 205
193 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); 206 dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
207 gen_pool_size(sram->pool) / 1024, sram->virt_base);
194 208
195 return 0; 209 return 0;
196
197err_chunks:
198 kfree(rblocks);
199err_alloc:
200 if (sram->clk)
201 clk_disable_unprepare(sram->clk);
202 return ret;
203} 210}
204 211
205static int sram_remove(struct platform_device *pdev) 212static int sram_remove(struct platform_device *pdev)
@@ -207,7 +214,7 @@ static int sram_remove(struct platform_device *pdev)
207 struct sram_dev *sram = platform_get_drvdata(pdev); 214 struct sram_dev *sram = platform_get_drvdata(pdev);
208 215
209 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) 216 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
210 dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); 217 dev_err(sram->dev, "removed while SRAM allocated\n");
211 218
212 if (sram->clk) 219 if (sram->clk)
213 clk_disable_unprepare(sram->clk); 220 clk_disable_unprepare(sram->clk);