aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/nvhost_intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host/nvhost_intr.c')
-rw-r--r--drivers/video/tegra/host/nvhost_intr.c428
1 files changed, 428 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c
new file mode 100644
index 00000000000..7c4bdc7bafb
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_intr.c
@@ -0,0 +1,428 @@
1/*
2 * drivers/video/tegra/host/nvhost_intr.c
3 *
4 * Tegra Graphics Host Interrupt Management
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "nvhost_intr.h"
22#include "dev.h"
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <linux/irq.h>
26#include <trace/events/nvhost.h>
27
28
29
30
31
32/*** Wait list management ***/
33
34struct nvhost_waitlist {
35 struct list_head list;
36 struct kref refcount;
37 u32 thresh;
38 enum nvhost_intr_action action;
39 atomic_t state;
40 void *data;
41 int count;
42};
43
44enum waitlist_state {
45 WLS_PENDING,
46 WLS_REMOVED,
47 WLS_CANCELLED,
48 WLS_HANDLED
49};
50
51static void waiter_release(struct kref *kref)
52{
53 kfree(container_of(kref, struct nvhost_waitlist, refcount));
54}
55
56/**
57 * add a waiter to a waiter queue, sorted by threshold
58 * returns true if it was added at the head of the queue
59 */
60static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
61 struct list_head *queue)
62{
63 struct nvhost_waitlist *pos;
64 u32 thresh = waiter->thresh;
65
66 list_for_each_entry_reverse(pos, queue, list)
67 if ((s32)(pos->thresh - thresh) <= 0) {
68 list_add(&waiter->list, &pos->list);
69 return false;
70 }
71
72 list_add(&waiter->list, queue);
73 return true;
74}
75
76/**
77 * run through a waiter queue for a single sync point ID
78 * and gather all completed waiters into lists by actions
79 */
80static void remove_completed_waiters(struct list_head *head, u32 sync,
81 struct list_head completed[NVHOST_INTR_ACTION_COUNT])
82{
83 struct list_head *dest;
84 struct nvhost_waitlist *waiter, *next, *prev;
85
86 list_for_each_entry_safe(waiter, next, head, list) {
87 if ((s32)(waiter->thresh - sync) > 0)
88 break;
89
90 dest = completed + waiter->action;
91
92 /* consolidate submit cleanups */
93 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
94 && !list_empty(dest)) {
95 prev = list_entry(dest->prev,
96 struct nvhost_waitlist, list);
97 if (prev->data == waiter->data) {
98 prev->count++;
99 dest = NULL;
100 }
101 }
102
103 /* PENDING->REMOVED or CANCELLED->HANDLED */
104 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
105 list_del(&waiter->list);
106 kref_put(&waiter->refcount, waiter_release);
107 } else {
108 list_move_tail(&waiter->list, dest);
109 }
110 }
111}
112
113void reset_threshold_interrupt(struct nvhost_intr *intr,
114 struct list_head *head,
115 unsigned int id)
116{
117 u32 thresh = list_first_entry(head,
118 struct nvhost_waitlist, list)->thresh;
119 BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
120 intr_op(intr).enable_syncpt_intr));
121
122 intr_op(intr).set_syncpt_threshold(intr, id, thresh);
123 intr_op(intr).enable_syncpt_intr(intr, id);
124}
125
126
127static void action_submit_complete(struct nvhost_waitlist *waiter)
128{
129 struct nvhost_channel *channel = waiter->data;
130 int nr_completed = waiter->count;
131
132 /* Add nr_completed to trace */
133 trace_nvhost_channel_submit_complete(channel->dev->name,
134 nr_completed, waiter->thresh);
135
136 nvhost_cdma_update(&channel->cdma);
137 nvhost_module_idle_mult(channel->dev, nr_completed);
138}
139
140static void action_ctxsave(struct nvhost_waitlist *waiter)
141{
142 struct nvhost_hwctx *hwctx = waiter->data;
143 struct nvhost_channel *channel = hwctx->channel;
144
145 if (channel->ctxhandler->save_service)
146 channel->ctxhandler->save_service(hwctx);
147}
148
149static void action_wakeup(struct nvhost_waitlist *waiter)
150{
151 wait_queue_head_t *wq = waiter->data;
152
153 wake_up(wq);
154}
155
156static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
157{
158 wait_queue_head_t *wq = waiter->data;
159
160 wake_up_interruptible(wq);
161}
162
163typedef void (*action_handler)(struct nvhost_waitlist *waiter);
164
165static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
166 action_submit_complete,
167 action_ctxsave,
168 action_wakeup,
169 action_wakeup_interruptible,
170};
171
172static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
173{
174 struct list_head *head = completed;
175 int i;
176
177 for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
178 action_handler handler = action_handlers[i];
179 struct nvhost_waitlist *waiter, *next;
180
181 list_for_each_entry_safe(waiter, next, head, list) {
182 list_del(&waiter->list);
183 handler(waiter);
184 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
185 kref_put(&waiter->refcount, waiter_release);
186 }
187 }
188}
189
190/**
191 * Remove & handle all waiters that have completed for the given syncpt
192 */
193static int process_wait_list(struct nvhost_intr *intr,
194 struct nvhost_intr_syncpt *syncpt,
195 u32 threshold)
196{
197 struct list_head completed[NVHOST_INTR_ACTION_COUNT];
198 unsigned int i;
199 int empty;
200
201 for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
202 INIT_LIST_HEAD(completed + i);
203
204 spin_lock(&syncpt->lock);
205
206 remove_completed_waiters(&syncpt->wait_head, threshold, completed);
207
208 empty = list_empty(&syncpt->wait_head);
209 if (!empty)
210 reset_threshold_interrupt(intr, &syncpt->wait_head,
211 syncpt->id);
212
213 spin_unlock(&syncpt->lock);
214
215 run_handlers(completed);
216
217 return empty;
218}
219
220/*** host syncpt interrupt service functions ***/
221/**
222 * Sync point threshold interrupt service thread function
223 * Handles sync point threshold triggers, in thread context
224 */
225irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id)
226{
227 struct nvhost_intr_syncpt *syncpt = dev_id;
228 unsigned int id = syncpt->id;
229 struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
230 struct nvhost_master *dev = intr_to_dev(intr);
231
232 (void)process_wait_list(intr, syncpt,
233 nvhost_syncpt_update_min(&dev->syncpt, id));
234
235 return IRQ_HANDLED;
236}
237
238/**
239 * free a syncpt's irq. syncpt interrupt should be disabled first.
240 */
241static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
242{
243 if (syncpt->irq_requested) {
244 free_irq(syncpt->irq, syncpt);
245 syncpt->irq_requested = 0;
246 }
247}
248
249
250/*** host general interrupt service functions ***/
251
252
253/*** Main API ***/
254
255int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
256 enum nvhost_intr_action action, void *data,
257 void *_waiter,
258 void **ref)
259{
260 struct nvhost_waitlist *waiter = _waiter;
261 struct nvhost_intr_syncpt *syncpt;
262 int queue_was_empty;
263 int err;
264
265 BUG_ON(waiter == NULL);
266
267 BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
268 intr_op(intr).enable_syncpt_intr));
269
270 /* initialize a new waiter */
271 INIT_LIST_HEAD(&waiter->list);
272 kref_init(&waiter->refcount);
273 if (ref)
274 kref_get(&waiter->refcount);
275 waiter->thresh = thresh;
276 waiter->action = action;
277 atomic_set(&waiter->state, WLS_PENDING);
278 waiter->data = data;
279 waiter->count = 1;
280
281 BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts);
282 syncpt = intr->syncpt + id;
283
284 spin_lock(&syncpt->lock);
285
286 /* lazily request irq for this sync point */
287 if (!syncpt->irq_requested) {
288 spin_unlock(&syncpt->lock);
289
290 mutex_lock(&intr->mutex);
291 BUG_ON(!(intr_op(intr).request_syncpt_irq));
292 err = intr_op(intr).request_syncpt_irq(syncpt);
293 mutex_unlock(&intr->mutex);
294
295 if (err) {
296 kfree(waiter);
297 return err;
298 }
299
300 spin_lock(&syncpt->lock);
301 }
302
303 queue_was_empty = list_empty(&syncpt->wait_head);
304
305 if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
306 /* added at head of list - new threshold value */
307 intr_op(intr).set_syncpt_threshold(intr, id, thresh);
308
309 /* added as first waiter - enable interrupt */
310 if (queue_was_empty)
311 intr_op(intr).enable_syncpt_intr(intr, id);
312 }
313
314 spin_unlock(&syncpt->lock);
315
316 if (ref)
317 *ref = waiter;
318 return 0;
319}
320
321void *nvhost_intr_alloc_waiter()
322{
323 return kzalloc(sizeof(struct nvhost_waitlist),
324 GFP_KERNEL|__GFP_REPEAT);
325}
326
327void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
328{
329 struct nvhost_waitlist *waiter = ref;
330
331 while (atomic_cmpxchg(&waiter->state,
332 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
333 schedule();
334
335 kref_put(&waiter->refcount, waiter_release);
336}
337
338
339/*** Init & shutdown ***/
340
341int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
342{
343 unsigned int id;
344 struct nvhost_intr_syncpt *syncpt;
345 struct nvhost_master *host =
346 container_of(intr, struct nvhost_master, intr);
347 u32 nb_pts = host->syncpt.nb_pts;
348
349 mutex_init(&intr->mutex);
350 intr->host_general_irq = irq_gen;
351 intr->host_general_irq_requested = false;
352
353 for (id = 0, syncpt = intr->syncpt;
354 id < nb_pts;
355 ++id, ++syncpt) {
356 syncpt->intr = &host->intr;
357 syncpt->id = id;
358 syncpt->irq = irq_sync + id;
359 syncpt->irq_requested = 0;
360 spin_lock_init(&syncpt->lock);
361 INIT_LIST_HEAD(&syncpt->wait_head);
362 snprintf(syncpt->thresh_irq_name,
363 sizeof(syncpt->thresh_irq_name),
364 "host_sp_%02d", id);
365 }
366
367 return 0;
368}
369
370void nvhost_intr_deinit(struct nvhost_intr *intr)
371{
372 nvhost_intr_stop(intr);
373}
374
375void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
376{
377 BUG_ON(!(intr_op(intr).init_host_sync &&
378 intr_op(intr).set_host_clocks_per_usec &&
379 intr_op(intr).request_host_general_irq));
380
381 mutex_lock(&intr->mutex);
382
383 intr_op(intr).init_host_sync(intr);
384 intr_op(intr).set_host_clocks_per_usec(intr,
385 (hz + 1000000 - 1)/1000000);
386
387 intr_op(intr).request_host_general_irq(intr);
388
389 mutex_unlock(&intr->mutex);
390}
391
392void nvhost_intr_stop(struct nvhost_intr *intr)
393{
394 unsigned int id;
395 struct nvhost_intr_syncpt *syncpt;
396 u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts;
397
398 BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs &&
399 intr_op(intr).free_host_general_irq));
400
401 mutex_lock(&intr->mutex);
402
403 intr_op(intr).disable_all_syncpt_intrs(intr);
404
405 for (id = 0, syncpt = intr->syncpt;
406 id < nb_pts;
407 ++id, ++syncpt) {
408 struct nvhost_waitlist *waiter, *next;
409 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
410 if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
411 == WLS_CANCELLED) {
412 list_del(&waiter->list);
413 kref_put(&waiter->refcount, waiter_release);
414 }
415 }
416
417 if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */
418 printk(KERN_DEBUG "%s id=%d\n", __func__, id);
419 BUG_ON(1);
420 }
421
422 free_syncpt_irq(syncpt);
423 }
424
425 intr_op(intr).free_host_general_irq(intr);
426
427 mutex_unlock(&intr->mutex);
428}