aboutsummaryrefslogtreecommitdiffstats
path: root/sound/soc
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2011-11-10 12:39:40 -0500
committerMark Brown <broonie@opensource.wolfsonmicro.com>2011-12-12 11:20:28 -0500
commit1dfb6efd87d63d2efef6e985770d5dd642f83146 (patch)
tree6f2ec639c914860a0f77ffe80e8dad1cc83ca0d7 /sound/soc
parentcae59c7b2185856522822e40260174c088ca5b11 (diff)
ASoC: Remove rbtree register cache
All users now use regmap directly so delete the ASoC version of the code. Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Diffstat (limited to 'sound/soc')
-rw-r--r--sound/soc/soc-cache.c381
1 files changed, 0 insertions, 381 deletions
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 18bb6b3335e0..9d56f0218f41 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -66,378 +66,6 @@ static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
66 return -1; 66 return -1;
67} 67}
68 68
69struct snd_soc_rbtree_node {
70 struct rb_node node; /* the actual rbtree node holding this block */
71 unsigned int base_reg; /* base register handled by this block */
72 unsigned int word_size; /* number of bytes needed to represent the register index */
73 void *block; /* block of adjacent registers */
74 unsigned int blklen; /* number of registers available in the block */
75} __attribute__ ((packed));
76
77struct snd_soc_rbtree_ctx {
78 struct rb_root root;
79 struct snd_soc_rbtree_node *cached_rbnode;
80};
81
82static inline void snd_soc_rbtree_get_base_top_reg(
83 struct snd_soc_rbtree_node *rbnode,
84 unsigned int *base, unsigned int *top)
85{
86 *base = rbnode->base_reg;
87 *top = rbnode->base_reg + rbnode->blklen - 1;
88}
89
90static unsigned int snd_soc_rbtree_get_register(
91 struct snd_soc_rbtree_node *rbnode, unsigned int idx)
92{
93 unsigned int val;
94
95 switch (rbnode->word_size) {
96 case 1: {
97 u8 *p = rbnode->block;
98 val = p[idx];
99 return val;
100 }
101 case 2: {
102 u16 *p = rbnode->block;
103 val = p[idx];
104 return val;
105 }
106 default:
107 BUG();
108 break;
109 }
110 return -1;
111}
112
113static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
114 unsigned int idx, unsigned int val)
115{
116 switch (rbnode->word_size) {
117 case 1: {
118 u8 *p = rbnode->block;
119 p[idx] = val;
120 break;
121 }
122 case 2: {
123 u16 *p = rbnode->block;
124 p[idx] = val;
125 break;
126 }
127 default:
128 BUG();
129 break;
130 }
131}
132
133static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
134 struct rb_root *root, unsigned int reg)
135{
136 struct rb_node *node;
137 struct snd_soc_rbtree_node *rbnode;
138 unsigned int base_reg, top_reg;
139
140 node = root->rb_node;
141 while (node) {
142 rbnode = container_of(node, struct snd_soc_rbtree_node, node);
143 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
144 if (reg >= base_reg && reg <= top_reg)
145 return rbnode;
146 else if (reg > top_reg)
147 node = node->rb_right;
148 else if (reg < base_reg)
149 node = node->rb_left;
150 }
151
152 return NULL;
153}
154
155static int snd_soc_rbtree_insert(struct rb_root *root,
156 struct snd_soc_rbtree_node *rbnode)
157{
158 struct rb_node **new, *parent;
159 struct snd_soc_rbtree_node *rbnode_tmp;
160 unsigned int base_reg_tmp, top_reg_tmp;
161 unsigned int base_reg;
162
163 parent = NULL;
164 new = &root->rb_node;
165 while (*new) {
166 rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
167 node);
168 /* base and top registers of the current rbnode */
169 snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
170 &top_reg_tmp);
171 /* base register of the rbnode to be added */
172 base_reg = rbnode->base_reg;
173 parent = *new;
174 /* if this register has already been inserted, just return */
175 if (base_reg >= base_reg_tmp &&
176 base_reg <= top_reg_tmp)
177 return 0;
178 else if (base_reg > top_reg_tmp)
179 new = &((*new)->rb_right);
180 else if (base_reg < base_reg_tmp)
181 new = &((*new)->rb_left);
182 }
183
184 /* insert the node into the rbtree */
185 rb_link_node(&rbnode->node, parent, new);
186 rb_insert_color(&rbnode->node, root);
187
188 return 1;
189}
190
191static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
192{
193 struct snd_soc_rbtree_ctx *rbtree_ctx;
194 struct rb_node *node;
195 struct snd_soc_rbtree_node *rbnode;
196 unsigned int regtmp;
197 unsigned int val, def;
198 int ret;
199 int i;
200
201 rbtree_ctx = codec->reg_cache;
202 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
204 for (i = 0; i < rbnode->blklen; ++i) {
205 regtmp = rbnode->base_reg + i;
206 val = snd_soc_rbtree_get_register(rbnode, i);
207 def = snd_soc_get_cache_val(codec->reg_def_copy, i,
208 rbnode->word_size);
209 if (val == def)
210 continue;
211
212 WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
213
214 codec->cache_bypass = 1;
215 ret = snd_soc_write(codec, regtmp, val);
216 codec->cache_bypass = 0;
217 if (ret)
218 return ret;
219 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
220 regtmp, val);
221 }
222 }
223
224 return 0;
225}
226
227static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
228 unsigned int pos, unsigned int reg,
229 unsigned int value)
230{
231 u8 *blk;
232
233 blk = krealloc(rbnode->block,
234 (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
235 if (!blk)
236 return -ENOMEM;
237
238 /* insert the register value in the correct place in the rbnode block */
239 memmove(blk + (pos + 1) * rbnode->word_size,
240 blk + pos * rbnode->word_size,
241 (rbnode->blklen - pos) * rbnode->word_size);
242
243 /* update the rbnode block, its size and the base register */
244 rbnode->block = blk;
245 rbnode->blklen++;
246 if (!pos)
247 rbnode->base_reg = reg;
248
249 snd_soc_rbtree_set_register(rbnode, pos, value);
250 return 0;
251}
252
253static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
254 unsigned int reg, unsigned int value)
255{
256 struct snd_soc_rbtree_ctx *rbtree_ctx;
257 struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
258 struct rb_node *node;
259 unsigned int val;
260 unsigned int reg_tmp;
261 unsigned int base_reg, top_reg;
262 unsigned int pos;
263 int i;
264 int ret;
265
266 rbtree_ctx = codec->reg_cache;
267 /* look up the required register in the cached rbnode */
268 rbnode = rbtree_ctx->cached_rbnode;
269 if (rbnode) {
270 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
271 if (reg >= base_reg && reg <= top_reg) {
272 reg_tmp = reg - base_reg;
273 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
274 if (val == value)
275 return 0;
276 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
277 return 0;
278 }
279 }
280 /* if we can't locate it in the cached rbnode we'll have
281 * to traverse the rbtree looking for it.
282 */
283 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
284 if (rbnode) {
285 reg_tmp = reg - rbnode->base_reg;
286 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
287 if (val == value)
288 return 0;
289 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
290 rbtree_ctx->cached_rbnode = rbnode;
291 } else {
292 /* bail out early, no need to create the rbnode yet */
293 if (!value)
294 return 0;
295 /* look for an adjacent register to the one we are about to add */
296 for (node = rb_first(&rbtree_ctx->root); node;
297 node = rb_next(node)) {
298 rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
299 for (i = 0; i < rbnode_tmp->blklen; ++i) {
300 reg_tmp = rbnode_tmp->base_reg + i;
301 if (abs(reg_tmp - reg) != 1)
302 continue;
303 /* decide where in the block to place our register */
304 if (reg_tmp + 1 == reg)
305 pos = i + 1;
306 else
307 pos = i;
308 ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
309 reg, value);
310 if (ret)
311 return ret;
312 rbtree_ctx->cached_rbnode = rbnode_tmp;
313 return 0;
314 }
315 }
316 /* we did not manage to find a place to insert it in an existing
317 * block so create a new rbnode with a single register in its block.
318 * This block will get populated further if any other adjacent
319 * registers get modified in the future.
320 */
321 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
322 if (!rbnode)
323 return -ENOMEM;
324 rbnode->blklen = 1;
325 rbnode->base_reg = reg;
326 rbnode->word_size = codec->driver->reg_word_size;
327 rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
328 GFP_KERNEL);
329 if (!rbnode->block) {
330 kfree(rbnode);
331 return -ENOMEM;
332 }
333 snd_soc_rbtree_set_register(rbnode, 0, value);
334 snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
335 rbtree_ctx->cached_rbnode = rbnode;
336 }
337
338 return 0;
339}
340
341static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
342 unsigned int reg, unsigned int *value)
343{
344 struct snd_soc_rbtree_ctx *rbtree_ctx;
345 struct snd_soc_rbtree_node *rbnode;
346 unsigned int base_reg, top_reg;
347 unsigned int reg_tmp;
348
349 rbtree_ctx = codec->reg_cache;
350 /* look up the required register in the cached rbnode */
351 rbnode = rbtree_ctx->cached_rbnode;
352 if (rbnode) {
353 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
354 if (reg >= base_reg && reg <= top_reg) {
355 reg_tmp = reg - base_reg;
356 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
357 return 0;
358 }
359 }
360 /* if we can't locate it in the cached rbnode we'll have
361 * to traverse the rbtree looking for it.
362 */
363 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
364 if (rbnode) {
365 reg_tmp = reg - rbnode->base_reg;
366 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
367 rbtree_ctx->cached_rbnode = rbnode;
368 } else {
369 /* uninitialized registers default to 0 */
370 *value = 0;
371 }
372
373 return 0;
374}
375
376static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
377{
378 struct rb_node *next;
379 struct snd_soc_rbtree_ctx *rbtree_ctx;
380 struct snd_soc_rbtree_node *rbtree_node;
381
382 /* if we've already been called then just return */
383 rbtree_ctx = codec->reg_cache;
384 if (!rbtree_ctx)
385 return 0;
386
387 /* free up the rbtree */
388 next = rb_first(&rbtree_ctx->root);
389 while (next) {
390 rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
391 next = rb_next(&rbtree_node->node);
392 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
393 kfree(rbtree_node->block);
394 kfree(rbtree_node);
395 }
396
397 /* release the resources */
398 kfree(codec->reg_cache);
399 codec->reg_cache = NULL;
400
401 return 0;
402}
403
404static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
405{
406 struct snd_soc_rbtree_ctx *rbtree_ctx;
407 unsigned int word_size;
408 unsigned int val;
409 int i;
410 int ret;
411
412 codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
413 if (!codec->reg_cache)
414 return -ENOMEM;
415
416 rbtree_ctx = codec->reg_cache;
417 rbtree_ctx->root = RB_ROOT;
418 rbtree_ctx->cached_rbnode = NULL;
419
420 if (!codec->reg_def_copy)
421 return 0;
422
423 word_size = codec->driver->reg_word_size;
424 for (i = 0; i < codec->driver->reg_cache_size; ++i) {
425 val = snd_soc_get_cache_val(codec->reg_def_copy, i,
426 word_size);
427 if (!val)
428 continue;
429 ret = snd_soc_rbtree_cache_write(codec, i, val);
430 if (ret)
431 goto err;
432 }
433
434 return 0;
435
436err:
437 snd_soc_cache_exit(codec);
438 return ret;
439}
440
441static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) 69static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
442{ 70{
443 int i; 71 int i;
@@ -516,15 +144,6 @@ static const struct snd_soc_cache_ops cache_types[] = {
516 .write = snd_soc_flat_cache_write, 144 .write = snd_soc_flat_cache_write,
517 .sync = snd_soc_flat_cache_sync 145 .sync = snd_soc_flat_cache_sync
518 }, 146 },
519 {
520 .id = SND_SOC_RBTREE_COMPRESSION,
521 .name = "rbtree",
522 .init = snd_soc_rbtree_cache_init,
523 .exit = snd_soc_rbtree_cache_exit,
524 .read = snd_soc_rbtree_cache_read,
525 .write = snd_soc_rbtree_cache_write,
526 .sync = snd_soc_rbtree_cache_sync
527 }
528}; 147};
529 148
530int snd_soc_cache_init(struct snd_soc_codec *codec) 149int snd_soc_cache_init(struct snd_soc_codec *codec)