aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/nvhost_acm.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/video/tegra/host/nvhost_acm.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/video/tegra/host/nvhost_acm.c')
-rw-r--r--drivers/video/tegra/host/nvhost_acm.c467
1 files changed, 467 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c
new file mode 100644
index 00000000000..318f209651a
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_acm.c
@@ -0,0 +1,467 @@
1/*
2 * drivers/video/tegra/host/nvhost_acm.c
3 *
4 * Tegra Graphics Host Automatic Clock Management
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "nvhost_acm.h"
22#include "dev.h"
23#include <linux/slab.h>
24#include <linux/string.h>
25#include <linux/sched.h>
26#include <linux/err.h>
27#include <linux/device.h>
28#include <linux/delay.h>
29#include <linux/platform_device.h>
30#include <mach/powergate.h>
31#include <mach/clk.h>
32#include <mach/hardware.h>
33
34#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ)
35#define POWERGATE_DELAY 10
36#define MAX_DEVID_LENGTH 16
37
38DEFINE_MUTEX(client_list_lock);
39
40struct nvhost_module_client {
41 struct list_head node;
42 unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
43 void *priv;
44};
45
46static void do_powergate_locked(int id)
47{
48 if (id != -1 && tegra_powergate_is_powered(id))
49 tegra_powergate_partition(id);
50}
51
52static void do_unpowergate_locked(int id)
53{
54 if (id != -1)
55 tegra_unpowergate_partition(id);
56}
57
58void nvhost_module_reset(struct nvhost_device *dev)
59{
60 dev_dbg(&dev->dev,
61 "%s: asserting %s module reset (id %d, id2 %d)\n",
62 __func__, dev->name,
63 dev->powergate_ids[0], dev->powergate_ids[1]);
64
65 mutex_lock(&dev->lock);
66
67 /* assert module and mc client reset */
68 if (dev->powergate_ids[0] != -1) {
69 tegra_powergate_mc_disable(dev->powergate_ids[0]);
70 tegra_periph_reset_assert(dev->clk[0]);
71 tegra_powergate_mc_flush(dev->powergate_ids[0]);
72 }
73 if (dev->powergate_ids[1] != -1) {
74 tegra_powergate_mc_disable(dev->powergate_ids[1]);
75 tegra_periph_reset_assert(dev->clk[1]);
76 tegra_powergate_mc_flush(dev->powergate_ids[1]);
77 }
78
79 udelay(POWERGATE_DELAY);
80
81 /* deassert reset */
82 if (dev->powergate_ids[0] != -1) {
83 tegra_powergate_mc_flush_done(dev->powergate_ids[0]);
84 tegra_periph_reset_deassert(dev->clk[0]);
85 tegra_powergate_mc_enable(dev->powergate_ids[0]);
86 }
87 if (dev->powergate_ids[1] != -1) {
88 tegra_powergate_mc_flush_done(dev->powergate_ids[1]);
89 tegra_periph_reset_deassert(dev->clk[1]);
90 tegra_powergate_mc_enable(dev->powergate_ids[1]);
91 }
92
93 mutex_unlock(&dev->lock);
94
95 dev_dbg(&dev->dev, "%s: module %s out of reset\n",
96 __func__, dev->name);
97}
98
99static void to_state_clockgated_locked(struct nvhost_device *dev)
100{
101 if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) {
102 int i;
103 for (i = 0; i < dev->num_clks; i++)
104 clk_disable(dev->clk[i]);
105 if (dev->dev.parent)
106 nvhost_module_idle(to_nvhost_device(dev->dev.parent));
107 } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED
108 && dev->can_powergate) {
109 do_unpowergate_locked(dev->powergate_ids[0]);
110 do_unpowergate_locked(dev->powergate_ids[1]);
111 }
112 dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
113}
114
115static void to_state_running_locked(struct nvhost_device *dev)
116{
117 int prev_state = dev->powerstate;
118 if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED)
119 to_state_clockgated_locked(dev);
120 if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) {
121 int i;
122
123 if (dev->dev.parent)
124 nvhost_module_busy(to_nvhost_device(dev->dev.parent));
125
126 for (i = 0; i < dev->num_clks; i++) {
127 int err = clk_enable(dev->clk[i]);
128 BUG_ON(err);
129 }
130
131 if (prev_state == NVHOST_POWER_STATE_POWERGATED
132 && dev->finalize_poweron)
133 dev->finalize_poweron(dev);
134 }
135 dev->powerstate = NVHOST_POWER_STATE_RUNNING;
136}
137
138/* This gets called from powergate_handler() and from module suspend.
139 * Module suspend is done for all modules, runtime power gating only
140 * for modules with can_powergate set.
141 */
142static int to_state_powergated_locked(struct nvhost_device *dev)
143{
144 int err = 0;
145
146 if (dev->prepare_poweroff
147 && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) {
148 /* Clock needs to be on in prepare_poweroff */
149 to_state_running_locked(dev);
150 err = dev->prepare_poweroff(dev);
151 if (err)
152 return err;
153 }
154
155 if (dev->powerstate == NVHOST_POWER_STATE_RUNNING)
156 to_state_clockgated_locked(dev);
157
158 if (dev->can_powergate) {
159 do_powergate_locked(dev->powergate_ids[0]);
160 do_powergate_locked(dev->powergate_ids[1]);
161 }
162
163 dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
164 return 0;
165}
166
167static void schedule_powergating_locked(struct nvhost_device *dev)
168{
169 if (dev->can_powergate)
170 schedule_delayed_work(&dev->powerstate_down,
171 msecs_to_jiffies(dev->powergate_delay));
172}
173
174static void schedule_clockgating_locked(struct nvhost_device *dev)
175{
176 schedule_delayed_work(&dev->powerstate_down,
177 msecs_to_jiffies(dev->clockgate_delay));
178}
179
180void nvhost_module_busy(struct nvhost_device *dev)
181{
182 if (dev->busy)
183 dev->busy(dev);
184
185 mutex_lock(&dev->lock);
186 cancel_delayed_work(&dev->powerstate_down);
187
188 dev->refcount++;
189 if (dev->refcount > 0 && !nvhost_module_powered(dev))
190 to_state_running_locked(dev);
191 mutex_unlock(&dev->lock);
192}
193
194static void powerstate_down_handler(struct work_struct *work)
195{
196 struct nvhost_device *dev;
197
198 dev = container_of(to_delayed_work(work),
199 struct nvhost_device,
200 powerstate_down);
201
202 mutex_lock(&dev->lock);
203 if (dev->refcount == 0) {
204 switch (dev->powerstate) {
205 case NVHOST_POWER_STATE_RUNNING:
206 to_state_clockgated_locked(dev);
207 schedule_powergating_locked(dev);
208 break;
209 case NVHOST_POWER_STATE_CLOCKGATED:
210 if (to_state_powergated_locked(dev))
211 schedule_powergating_locked(dev);
212 break;
213 default:
214 break;
215 }
216 }
217 mutex_unlock(&dev->lock);
218}
219
220
221void nvhost_module_idle_mult(struct nvhost_device *dev, int refs)
222{
223 bool kick = false;
224
225 mutex_lock(&dev->lock);
226 dev->refcount -= refs;
227 if (dev->refcount == 0) {
228 if (nvhost_module_powered(dev))
229 schedule_clockgating_locked(dev);
230 kick = true;
231 }
232 mutex_unlock(&dev->lock);
233
234 if (kick) {
235 wake_up(&dev->idle_wq);
236
237 if (dev->idle)
238 dev->idle(dev);
239 }
240}
241
242int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate,
243 int index)
244{
245 struct clk *c;
246
247 c = dev->clk[index];
248 if (IS_ERR_OR_NULL(c))
249 return -EINVAL;
250
251 /* Need to enable client to get correct rate */
252 nvhost_module_busy(dev);
253 *rate = clk_get_rate(c);
254 nvhost_module_idle(dev);
255 return 0;
256
257}
258
259static int nvhost_module_update_rate(struct nvhost_device *dev, int index)
260{
261 unsigned long rate = 0;
262 struct nvhost_module_client *m;
263
264 if (!dev->clk[index])
265 return -EINVAL;
266
267 list_for_each_entry(m, &dev->client_list, node) {
268 rate = max(m->rate[index], rate);
269 }
270 if (!rate)
271 rate = clk_round_rate(dev->clk[index],
272 dev->clocks[index].default_rate);
273
274 return clk_set_rate(dev->clk[index], rate);
275}
276
277int nvhost_module_set_rate(struct nvhost_device *dev, void *priv,
278 unsigned long rate, int index)
279{
280 struct nvhost_module_client *m;
281 int i, ret = 0;
282
283 mutex_lock(&client_list_lock);
284 list_for_each_entry(m, &dev->client_list, node) {
285 if (m->priv == priv) {
286 for (i = 0; i < dev->num_clks; i++)
287 m->rate[i] = clk_round_rate(dev->clk[i], rate);
288 break;
289 }
290 }
291
292 for (i = 0; i < dev->num_clks; i++) {
293 ret = nvhost_module_update_rate(dev, i);
294 if (ret < 0)
295 break;
296 }
297 mutex_unlock(&client_list_lock);
298 return ret;
299
300}
301
302int nvhost_module_add_client(struct nvhost_device *dev, void *priv)
303{
304 int i;
305 unsigned long rate;
306 struct nvhost_module_client *client;
307
308 client = kzalloc(sizeof(*client), GFP_KERNEL);
309 if (!client)
310 return -ENOMEM;
311
312 INIT_LIST_HEAD(&client->node);
313 client->priv = priv;
314
315 for (i = 0; i < dev->num_clks; i++) {
316 rate = clk_round_rate(dev->clk[i],
317 dev->clocks[i].default_rate);
318 client->rate[i] = rate;
319 }
320 mutex_lock(&client_list_lock);
321 list_add_tail(&client->node, &dev->client_list);
322 mutex_unlock(&client_list_lock);
323 return 0;
324}
325
326void nvhost_module_remove_client(struct nvhost_device *dev, void *priv)
327{
328 int i;
329 struct nvhost_module_client *m;
330
331 mutex_lock(&client_list_lock);
332 list_for_each_entry(m, &dev->client_list, node) {
333 if (priv == m->priv) {
334 list_del(&m->node);
335 break;
336 }
337 }
338 if (m) {
339 kfree(m);
340 for (i = 0; i < dev->num_clks; i++)
341 nvhost_module_update_rate(dev, i);
342 }
343 mutex_unlock(&client_list_lock);
344}
345
346int nvhost_module_init(struct nvhost_device *dev)
347{
348 int i = 0;
349
350 /* initialize clocks to known state */
351 INIT_LIST_HEAD(&dev->client_list);
352 while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
353 char devname[MAX_DEVID_LENGTH];
354 long rate = dev->clocks[i].default_rate;
355 struct clk *c;
356
357 snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name);
358 c = clk_get_sys(devname, dev->clocks[i].name);
359 BUG_ON(IS_ERR_OR_NULL(c));
360
361 rate = clk_round_rate(c, rate);
362 clk_enable(c);
363 clk_set_rate(c, rate);
364 clk_disable(c);
365 dev->clk[i] = c;
366 i++;
367 }
368 dev->num_clks = i;
369
370 mutex_init(&dev->lock);
371 init_waitqueue_head(&dev->idle_wq);
372 INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler);
373
374 /* power gate units that we can power gate */
375 if (dev->can_powergate) {
376 do_powergate_locked(dev->powergate_ids[0]);
377 do_powergate_locked(dev->powergate_ids[1]);
378 dev->powerstate = NVHOST_POWER_STATE_POWERGATED;
379 } else {
380 do_unpowergate_locked(dev->powergate_ids[0]);
381 do_unpowergate_locked(dev->powergate_ids[1]);
382 dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED;
383 }
384
385 return 0;
386}
387
388static int is_module_idle(struct nvhost_device *dev)
389{
390 int count;
391 mutex_lock(&dev->lock);
392 count = dev->refcount;
393 mutex_unlock(&dev->lock);
394 return (count == 0);
395}
396
397static void debug_not_idle(struct nvhost_master *host)
398{
399 int i;
400 bool lock_released = true;
401
402 for (i = 0; i < host->nb_channels; i++) {
403 struct nvhost_device *dev = host->channels[i].dev;
404 mutex_lock(&dev->lock);
405 if (dev->name)
406 dev_warn(&host->dev->dev,
407 "tegra_grhost: %s: refcnt %d\n", dev->name,
408 dev->refcount);
409 mutex_unlock(&dev->lock);
410 }
411
412 for (i = 0; i < host->syncpt.nb_mlocks; i++) {
413 int c = atomic_read(&host->syncpt.lock_counts[i]);
414 if (c) {
415 dev_warn(&host->dev->dev,
416 "tegra_grhost: lock id %d: refcnt %d\n",
417 i, c);
418 lock_released = false;
419 }
420 }
421 if (lock_released)
422 dev_dbg(&host->dev->dev, "tegra_grhost: all locks released\n");
423}
424
425int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend)
426{
427 int ret;
428 struct nvhost_master *host = nvhost_get_host(dev);
429
430 if (system_suspend && !is_module_idle(dev))
431 debug_not_idle(host);
432
433 ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev),
434 ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT);
435 if (ret == 0) {
436 dev_info(&dev->dev, "%s prevented suspend\n",
437 dev->name);
438 return -EBUSY;
439 }
440
441 if (system_suspend)
442 dev_dbg(&dev->dev, "tegra_grhost: entered idle\n");
443
444 mutex_lock(&dev->lock);
445 cancel_delayed_work(&dev->powerstate_down);
446 to_state_powergated_locked(dev);
447 mutex_unlock(&dev->lock);
448
449 if (dev->suspend)
450 dev->suspend(dev);
451
452 return 0;
453}
454
455void nvhost_module_deinit(struct nvhost_device *dev)
456{
457 int i;
458
459 if (dev->deinit)
460 dev->deinit(dev);
461
462 nvhost_module_suspend(dev, false);
463 for (i = 0; i < dev->num_clks; i++)
464 clk_put(dev->clk[i]);
465 dev->powerstate = NVHOST_POWER_STATE_DEINIT;
466}
467