aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/host1x/host1x_syncpt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host/host1x/host1x_syncpt.c')
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.c248
1 files changed, 248 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c
new file mode 100644
index 00000000000..b0fd9970aaa
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c
@@ -0,0 +1,248 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_syncpt.c
3 *
4 * Tegra Graphics Host Syncpoints for HOST1X
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/nvhost_ioctl.h>
22#include "nvhost_syncpt.h"
23#include "dev.h"
24#include "host1x_syncpt.h"
25#include "host1x_hardware.h"
26
27/**
28 * Write the current syncpoint value back to hw.
29 */
30static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
31{
32 struct nvhost_master *dev = syncpt_to_dev(sp);
33 int min = nvhost_syncpt_read_min(sp, id);
34 writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
35}
36
37/**
38 * Write the current waitbase value back to hw.
39 */
40static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
41{
42 struct nvhost_master *dev = syncpt_to_dev(sp);
43 writel(sp->base_val[id],
44 dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
45}
46
47/**
48 * Read waitbase value from hw.
49 */
50static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
51{
52 struct nvhost_master *dev = syncpt_to_dev(sp);
53 sp->base_val[id] = readl(dev->sync_aperture +
54 (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
55}
56
57/**
58 * Updates the last value read from hardware.
59 * (was nvhost_syncpt_update_min)
60 */
61static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
62{
63 struct nvhost_master *dev = syncpt_to_dev(sp);
64 void __iomem *sync_regs = dev->sync_aperture;
65 u32 old, live;
66
67 do {
68 old = nvhost_syncpt_read_min(sp, id);
69 live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
70 } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
71
72 if (!nvhost_syncpt_check_max(sp, id, live))
73 dev_err(&syncpt_to_dev(sp)->dev->dev,
74 "%s failed: id=%u\n",
75 __func__,
76 id);
77
78 return live;
79}
80
81/**
82 * Write a cpu syncpoint increment to the hardware, without touching
83 * the cache. Caller is responsible for host being powered.
84 */
85static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
86{
87 struct nvhost_master *dev = syncpt_to_dev(sp);
88 BUG_ON(!nvhost_module_powered(dev->dev));
89 if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) {
90 dev_err(&syncpt_to_dev(sp)->dev->dev,
91 "Trying to increment syncpoint id %d beyond max\n",
92 id);
93 nvhost_debug_dump(syncpt_to_dev(sp));
94 return;
95 }
96 writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
97 wmb();
98}
99
100/* check for old WAITs to be removed (avoiding a wrap) */
101static int t20_syncpt_wait_check(struct nvhost_syncpt *sp,
102 struct nvmap_client *nvmap,
103 u32 waitchk_mask,
104 struct nvhost_waitchk *wait,
105 int num_waitchk)
106{
107 u32 idx;
108 int err = 0;
109
110 /* get current syncpt values */
111 for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
112 if (BIT(idx) & waitchk_mask)
113 nvhost_syncpt_update_min(sp, idx);
114 }
115
116 BUG_ON(!wait && !num_waitchk);
117
118 /* compare syncpt vs wait threshold */
119 while (num_waitchk) {
120 u32 override;
121
122 BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);
123 if (nvhost_syncpt_is_expired(sp,
124 wait->syncpt_id, wait->thresh)) {
125 /*
126 * NULL an already satisfied WAIT_SYNCPT host method,
127 * by patching its args in the command stream. The
128 * method data is changed to reference a reserved
129 * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
130 * syncpt with a matching threshold value of 0, so
131 * is guaranteed to be popped by the host HW.
132 */
133 dev_dbg(&syncpt_to_dev(sp)->dev->dev,
134 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
135 wait->syncpt_id,
136 syncpt_op(sp).name(sp, wait->syncpt_id),
137 wait->thresh,
138 nvhost_syncpt_read_min(sp, wait->syncpt_id));
139
140 /* patch the wait */
141 override = nvhost_class_host_wait_syncpt(
142 NVSYNCPT_GRAPHICS_HOST, 0);
143 err = nvmap_patch_word(nvmap,
144 (struct nvmap_handle *)wait->mem,
145 wait->offset, override);
146 if (err)
147 break;
148 }
149
150 wait++;
151 num_waitchk--;
152 }
153 return err;
154}
155
156
157static const char *s_syncpt_names[32] = {
158 "gfx_host",
159 "", "", "", "", "", "", "",
160 "disp0_a", "disp1_a", "avp_0",
161 "csi_vi_0", "csi_vi_1",
162 "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
163 "2d_0", "2d_1",
164 "disp0_b", "disp1_b",
165 "3d",
166 "mpe",
167 "disp0_c", "disp1_c",
168 "vblank0", "vblank1",
169 "mpe_ebm_eof", "mpe_wr_safe",
170 "2d_tinyblt",
171 "dsi"
172};
173
174static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id)
175{
176 BUG_ON(id >= ARRAY_SIZE(s_syncpt_names));
177 return s_syncpt_names[id];
178}
179
180static void t20_syncpt_debug(struct nvhost_syncpt *sp)
181{
182 u32 i;
183 for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
184 u32 max = nvhost_syncpt_read_max(sp, i);
185 u32 min = nvhost_syncpt_update_min(sp, i);
186 if (!max && !min)
187 continue;
188 dev_info(&syncpt_to_dev(sp)->dev->dev,
189 "id %d (%s) min %d max %d\n",
190 i, syncpt_op(sp).name(sp, i),
191 min, max);
192
193 }
194
195 for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) {
196 u32 base_val;
197 t20_syncpt_read_wait_base(sp, i);
198 base_val = sp->base_val[i];
199 if (base_val)
200 dev_info(&syncpt_to_dev(sp)->dev->dev,
201 "waitbase id %d val %d\n",
202 i, base_val);
203
204 }
205}
206
207static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
208 unsigned int idx)
209{
210 void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
211 /* mlock registers returns 0 when the lock is aquired.
212 * writing 0 clears the lock. */
213 return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
214}
215
216static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
217 unsigned int idx)
218{
219 void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
220
221 writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
222}
223
224int host1x_init_syncpt_support(struct nvhost_master *host)
225{
226
227 host->sync_aperture = host->aperture +
228 (NV_HOST1X_CHANNEL0_BASE +
229 HOST1X_CHANNEL_SYNC_REG_BASE);
230
231 host->op.syncpt.reset = t20_syncpt_reset;
232 host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base;
233 host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base;
234 host->op.syncpt.update_min = t20_syncpt_update_min;
235 host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr;
236 host->op.syncpt.wait_check = t20_syncpt_wait_check;
237 host->op.syncpt.debug = t20_syncpt_debug;
238 host->op.syncpt.name = t20_syncpt_name;
239 host->op.syncpt.mutex_try_lock = syncpt_mutex_try_lock;
240 host->op.syncpt.mutex_unlock = syncpt_mutex_unlock;
241
242 host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS;
243 host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES;
244 host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED;
245 host->syncpt.nb_mlocks = NV_HOST1X_SYNC_MLOCK_NUM;
246
247 return 0;
248}