aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2013-03-22 10:34:02 -0400
committerThierry Reding <thierry.reding@avionic-design.de>2013-04-22 06:32:42 -0400
commit7ede0b0bf3e2595d40d6195b6fe4c4dcef438830 (patch)
treeeb59a94a3892bd41ae810b1726f7e7ae9de96812
parent754716874389ccbea5ee03174df8ad9e72e41880 (diff)
gpu: host1x: Add syncpoint wait and interrupts
Add support for sync point interrupts, and sync point wait. Sync point wait used interrupts for unblocking wait. Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Thierry Reding <thierry.reding@avionic-design.de> Tested-by: Thierry Reding <thierry.reding@avionic-design.de> Tested-by: Erik Faye-Lund <kusmabite@gmail.com> Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de>
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/dev.c12
-rw-r--r--drivers/gpu/host1x/dev.h51
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x01_sync.h42
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c143
-rw-r--r--drivers/gpu/host1x/intr.c328
-rw-r--r--drivers/gpu/host1x/intr.h96
-rw-r--r--drivers/gpu/host1x/syncpt.c159
-rw-r--r--drivers/gpu/host1x/syncpt.h12
10 files changed, 846 insertions, 0 deletions
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 363e6ab31517..5ef47ffce674 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -3,6 +3,7 @@ ccflags-y = -Idrivers/gpu/host1x
3host1x-y = \ 3host1x-y = \
4 syncpt.o \ 4 syncpt.o \
5 dev.o \ 5 dev.o \
6 intr.o \
6 hw/host1x01.o 7 hw/host1x01.o
7 8
8obj-$(CONFIG_TEGRA_HOST1X) += host1x.o 9obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 0d6002cb67c1..b967f6e8df55 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -28,6 +28,7 @@
28#include <trace/events/host1x.h> 28#include <trace/events/host1x.h>
29 29
30#include "dev.h" 30#include "dev.h"
31#include "intr.h"
31#include "hw/host1x01.h" 32#include "hw/host1x01.h"
32 33
33void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 34void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
@@ -123,13 +124,24 @@ static int host1x_probe(struct platform_device *pdev)
123 return err; 124 return err;
124 } 125 }
125 126
127 err = host1x_intr_init(host, syncpt_irq);
128 if (err) {
129 dev_err(&pdev->dev, "failed to initialize interrupts\n");
130 goto fail_deinit_syncpt;
131 }
132
126 return 0; 133 return 0;
134
135fail_deinit_syncpt:
136 host1x_syncpt_deinit(host);
137 return err;
127} 138}
128 139
129static int __exit host1x_remove(struct platform_device *pdev) 140static int __exit host1x_remove(struct platform_device *pdev)
130{ 141{
131 struct host1x *host = platform_get_drvdata(pdev); 142 struct host1x *host = platform_get_drvdata(pdev);
132 143
144 host1x_intr_deinit(host);
133 host1x_syncpt_deinit(host); 145 host1x_syncpt_deinit(host);
134 clk_disable_unprepare(host->clk); 146 clk_disable_unprepare(host->clk);
135 147
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index eaf602657f76..caf9cc62eb17 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -21,6 +21,7 @@
21#include <linux/device.h> 21#include <linux/device.h>
22 22
23#include "syncpt.h" 23#include "syncpt.h"
24#include "intr.h"
24 25
25struct host1x_syncpt; 26struct host1x_syncpt;
26 27
@@ -33,6 +34,17 @@ struct host1x_syncpt_ops {
33 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr); 34 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
34}; 35};
35 36
37struct host1x_intr_ops {
38 int (*init_host_sync)(struct host1x *host, u32 cpm,
39 void (*syncpt_thresh_work)(struct work_struct *work));
40 void (*set_syncpt_threshold)(
41 struct host1x *host, u32 id, u32 thresh);
42 void (*enable_syncpt_intr)(struct host1x *host, u32 id);
43 void (*disable_syncpt_intr)(struct host1x *host, u32 id);
44 void (*disable_all_syncpt_intrs)(struct host1x *host);
45 int (*free_syncpt_irq)(struct host1x *host);
46};
47
36struct host1x_info { 48struct host1x_info {
37 int nb_channels; /* host1x: num channels supported */ 49 int nb_channels; /* host1x: num channels supported */
38 int nb_pts; /* host1x: num syncpoints supported */ 50 int nb_pts; /* host1x: num syncpoints supported */
@@ -50,7 +62,13 @@ struct host1x {
50 struct device *dev; 62 struct device *dev;
51 struct clk *clk; 63 struct clk *clk;
52 64
65 struct mutex intr_mutex;
66 struct workqueue_struct *intr_wq;
67 int intr_syncpt_irq;
68
53 const struct host1x_syncpt_ops *syncpt_op; 69 const struct host1x_syncpt_ops *syncpt_op;
70 const struct host1x_intr_ops *intr_op;
71
54}; 72};
55 73
56void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); 74void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -93,4 +111,37 @@ static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
93 return host->syncpt_op->patch_wait(sp, patch_addr); 111 return host->syncpt_op->patch_wait(sp, patch_addr);
94} 112}
95 113
114static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
115 void (*syncpt_thresh_work)(struct work_struct *))
116{
117 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
118}
119
120static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
121 u32 id, u32 thresh)
122{
123 host->intr_op->set_syncpt_threshold(host, id, thresh);
124}
125
126static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
127 u32 id)
128{
129 host->intr_op->enable_syncpt_intr(host, id);
130}
131
132static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
133 u32 id)
134{
135 host->intr_op->disable_syncpt_intr(host, id);
136}
137
138static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
139{
140 host->intr_op->disable_all_syncpt_intrs(host);
141}
142
143static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
144{
145 return host->intr_op->free_syncpt_irq(host);
146}
96#endif 147#endif
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index 612b4574c4b6..f5c35fa66d05 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -21,6 +21,7 @@
21#include "hw/host1x01_hardware.h" 21#include "hw/host1x01_hardware.h"
22 22
23/* include code */ 23/* include code */
24#include "hw/intr_hw.c"
24#include "hw/syncpt_hw.c" 25#include "hw/syncpt_hw.c"
25 26
26#include "dev.h" 27#include "dev.h"
@@ -28,6 +29,7 @@
28int host1x01_init(struct host1x *host) 29int host1x01_init(struct host1x *host)
29{ 30{
30 host->syncpt_op = &host1x_syncpt_ops; 31 host->syncpt_op = &host1x_syncpt_ops;
32 host->intr_op = &host1x_intr_ops;
31 33
32 return 0; 34 return 0;
33} 35}
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
index 3af258b46e62..eea0bb06052a 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_sync.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -59,6 +59,48 @@ static inline u32 host1x_sync_syncpt_r(unsigned int id)
59} 59}
60#define HOST1X_SYNC_SYNCPT(id) \ 60#define HOST1X_SYNC_SYNCPT(id) \
61 host1x_sync_syncpt_r(id) 61 host1x_sync_syncpt_r(id)
62static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
63{
64 return 0x40 + id * REGISTER_STRIDE;
65}
66#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
67 host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
68static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
69{
70 return 0x60 + id * REGISTER_STRIDE;
71}
72#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
73 host1x_sync_syncpt_thresh_int_disable_r(id)
74static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
75{
76 return 0x68 + id * REGISTER_STRIDE;
77}
78#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
79 host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
80static inline u32 host1x_sync_usec_clk_r(void)
81{
82 return 0x1a4;
83}
84#define HOST1X_SYNC_USEC_CLK \
85 host1x_sync_usec_clk_r()
86static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
87{
88 return 0x1a8;
89}
90#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
91 host1x_sync_ctxsw_timeout_cfg_r()
92static inline u32 host1x_sync_ip_busy_timeout_r(void)
93{
94 return 0x1bc;
95}
96#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
97 host1x_sync_ip_busy_timeout_r()
98static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
99{
100 return 0x500 + id * REGISTER_STRIDE;
101}
102#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
103 host1x_sync_syncpt_int_thresh_r(id)
62static inline u32 host1x_sync_syncpt_base_r(unsigned int id) 104static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
63{ 105{
64 return 0x600 + id * REGISTER_STRIDE; 106 return 0x600 + id * REGISTER_STRIDE;
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 000000000000..b592eef1efcb
--- /dev/null
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23#include <asm/mach/irq.h>
24
25#include "intr.h"
26#include "dev.h"
27
28/*
29 * Sync point threshold interrupt service function
30 * Handles sync point threshold triggers, in interrupt context
31 */
32static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
33{
34 unsigned int id = syncpt->id;
35 struct host1x *host = syncpt->host;
36
37 host1x_sync_writel(host, BIT_MASK(id),
38 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
39 host1x_sync_writel(host, BIT_MASK(id),
40 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
41
42 queue_work(host->intr_wq, &syncpt->intr.work);
43}
44
45static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
46{
47 struct host1x *host = dev_id;
48 unsigned long reg;
49 int i, id;
50
51 for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) {
52 reg = host1x_sync_readl(host,
53 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
54 for_each_set_bit(id, &reg, BITS_PER_LONG) {
55 struct host1x_syncpt *syncpt =
56 host->syncpt + (i * BITS_PER_LONG + id);
57 host1x_intr_syncpt_handle(syncpt);
58 }
59 }
60
61 return IRQ_HANDLED;
62}
63
64static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
65{
66 u32 i;
67
68 for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) {
69 host1x_sync_writel(host, 0xffffffffu,
70 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
71 host1x_sync_writel(host, 0xffffffffu,
72 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
73 }
74}
75
76static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *))
78{
79 int i, err;
80
81 host1x_hw_intr_disable_all_syncpt_intrs(host);
82
83 for (i = 0; i < host->info->nb_pts; i++)
84 INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
85
86 err = devm_request_irq(host->dev, host->intr_syncpt_irq,
87 syncpt_thresh_isr, IRQF_SHARED,
88 "host1x_syncpt", host);
89 if (IS_ERR_VALUE(err)) {
90 WARN_ON(1);
91 return err;
92 }
93
94 /* disable the ip_busy_timeout. this prevents write drops */
95 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
96
97 /*
98 * increase the auto-ack timout to the maximum value. 2d will hang
99 * otherwise on Tegra2.
100 */
101 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
102
103 /* update host clocks per usec */
104 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
105
106 return 0;
107}
108
109static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
110 u32 id, u32 thresh)
111{
112 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
113}
114
115static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
116{
117 host1x_sync_writel(host, BIT_MASK(id),
118 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
119}
120
121static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
122{
123 host1x_sync_writel(host, BIT_MASK(id),
124 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
125 host1x_sync_writel(host, BIT_MASK(id),
126 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
127}
128
129static int _host1x_free_syncpt_irq(struct host1x *host)
130{
131 devm_free_irq(host->dev, host->intr_syncpt_irq, host);
132 flush_workqueue(host->intr_wq);
133 return 0;
134}
135
136static const struct host1x_intr_ops host1x_intr_ops = {
137 .init_host_sync = _host1x_intr_init_host_sync,
138 .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
139 .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
140 .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
141 .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
142 .free_syncpt_irq = _host1x_free_syncpt_irq,
143};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
new file mode 100644
index 000000000000..b1b5a80e3125
--- /dev/null
+++ b/drivers/gpu/host1x/intr.c
@@ -0,0 +1,328 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include "dev.h"
25#include "intr.h"
26
27/* Wait list management */
28
29enum waitlist_state {
30 WLS_PENDING,
31 WLS_REMOVED,
32 WLS_CANCELLED,
33 WLS_HANDLED
34};
35
36static void waiter_release(struct kref *kref)
37{
38 kfree(container_of(kref, struct host1x_waitlist, refcount));
39}
40
41/*
42 * add a waiter to a waiter queue, sorted by threshold
43 * returns true if it was added at the head of the queue
44 */
45static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
46 struct list_head *queue)
47{
48 struct host1x_waitlist *pos;
49 u32 thresh = waiter->thresh;
50
51 list_for_each_entry_reverse(pos, queue, list)
52 if ((s32)(pos->thresh - thresh) <= 0) {
53 list_add(&waiter->list, &pos->list);
54 return false;
55 }
56
57 list_add(&waiter->list, queue);
58 return true;
59}
60
61/*
62 * run through a waiter queue for a single sync point ID
63 * and gather all completed waiters into lists by actions
64 */
65static void remove_completed_waiters(struct list_head *head, u32 sync,
66 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
67{
68 struct list_head *dest;
69 struct host1x_waitlist *waiter, *next;
70
71 list_for_each_entry_safe(waiter, next, head, list) {
72 if ((s32)(waiter->thresh - sync) > 0)
73 break;
74
75 dest = completed + waiter->action;
76
77 /* PENDING->REMOVED or CANCELLED->HANDLED */
78 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
79 list_del(&waiter->list);
80 kref_put(&waiter->refcount, waiter_release);
81 } else
82 list_move_tail(&waiter->list, dest);
83 }
84}
85
86static void reset_threshold_interrupt(struct host1x *host,
87 struct list_head *head,
88 unsigned int id)
89{
90 u32 thresh =
91 list_first_entry(head, struct host1x_waitlist, list)->thresh;
92
93 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
94 host1x_hw_intr_enable_syncpt_intr(host, id);
95}
96
97static void action_wakeup(struct host1x_waitlist *waiter)
98{
99 wait_queue_head_t *wq = waiter->data;
100 wake_up(wq);
101}
102
103static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
104{
105 wait_queue_head_t *wq = waiter->data;
106 wake_up_interruptible(wq);
107}
108
109typedef void (*action_handler)(struct host1x_waitlist *waiter);
110
111static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
112 action_wakeup,
113 action_wakeup_interruptible,
114};
115
116static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
117{
118 struct list_head *head = completed;
119 int i;
120
121 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
122 action_handler handler = action_handlers[i];
123 struct host1x_waitlist *waiter, *next;
124
125 list_for_each_entry_safe(waiter, next, head, list) {
126 list_del(&waiter->list);
127 handler(waiter);
128 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
129 WLS_REMOVED);
130 kref_put(&waiter->refcount, waiter_release);
131 }
132 }
133}
134
135/*
136 * Remove & handle all waiters that have completed for the given syncpt
137 */
138static int process_wait_list(struct host1x *host,
139 struct host1x_syncpt *syncpt,
140 u32 threshold)
141{
142 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
143 unsigned int i;
144 int empty;
145
146 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
147 INIT_LIST_HEAD(completed + i);
148
149 spin_lock(&syncpt->intr.lock);
150
151 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
152 completed);
153
154 empty = list_empty(&syncpt->intr.wait_head);
155 if (empty)
156 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
157 else
158 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
159 syncpt->id);
160
161 spin_unlock(&syncpt->intr.lock);
162
163 run_handlers(completed);
164
165 return empty;
166}
167
168/*
169 * Sync point threshold interrupt service thread function
170 * Handles sync point threshold triggers, in thread context
171 */
172
173static void syncpt_thresh_work(struct work_struct *work)
174{
175 struct host1x_syncpt_intr *syncpt_intr =
176 container_of(work, struct host1x_syncpt_intr, work);
177 struct host1x_syncpt *syncpt =
178 container_of(syncpt_intr, struct host1x_syncpt, intr);
179 unsigned int id = syncpt->id;
180 struct host1x *host = syncpt->host;
181
182 (void)process_wait_list(host, syncpt,
183 host1x_syncpt_load(host->syncpt + id));
184}
185
186int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
187 enum host1x_intr_action action, void *data,
188 struct host1x_waitlist *waiter, void **ref)
189{
190 struct host1x_syncpt *syncpt;
191 int queue_was_empty;
192
193 if (waiter == NULL) {
194 pr_warn("%s: NULL waiter\n", __func__);
195 return -EINVAL;
196 }
197
198 /* initialize a new waiter */
199 INIT_LIST_HEAD(&waiter->list);
200 kref_init(&waiter->refcount);
201 if (ref)
202 kref_get(&waiter->refcount);
203 waiter->thresh = thresh;
204 waiter->action = action;
205 atomic_set(&waiter->state, WLS_PENDING);
206 waiter->data = data;
207 waiter->count = 1;
208
209 syncpt = host->syncpt + id;
210
211 spin_lock(&syncpt->intr.lock);
212
213 queue_was_empty = list_empty(&syncpt->intr.wait_head);
214
215 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
216 /* added at head of list - new threshold value */
217 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
218
219 /* added as first waiter - enable interrupt */
220 if (queue_was_empty)
221 host1x_hw_intr_enable_syncpt_intr(host, id);
222 }
223
224 spin_unlock(&syncpt->intr.lock);
225
226 if (ref)
227 *ref = waiter;
228 return 0;
229}
230
231void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
232{
233 struct host1x_waitlist *waiter = ref;
234 struct host1x_syncpt *syncpt;
235
236 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
237 WLS_REMOVED)
238 schedule();
239
240 syncpt = host->syncpt + id;
241 (void)process_wait_list(host, syncpt,
242 host1x_syncpt_load(host->syncpt + id));
243
244 kref_put(&waiter->refcount, waiter_release);
245}
246
247int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
248{
249 unsigned int id;
250 u32 nb_pts = host1x_syncpt_nb_pts(host);
251
252 mutex_init(&host->intr_mutex);
253 host->intr_syncpt_irq = irq_sync;
254 host->intr_wq = create_workqueue("host_syncpt");
255 if (!host->intr_wq)
256 return -ENOMEM;
257
258 for (id = 0; id < nb_pts; ++id) {
259 struct host1x_syncpt *syncpt = host->syncpt + id;
260
261 spin_lock_init(&syncpt->intr.lock);
262 INIT_LIST_HEAD(&syncpt->intr.wait_head);
263 snprintf(syncpt->intr.thresh_irq_name,
264 sizeof(syncpt->intr.thresh_irq_name),
265 "host1x_sp_%02d", id);
266 }
267
268 host1x_intr_start(host);
269
270 return 0;
271}
272
273void host1x_intr_deinit(struct host1x *host)
274{
275 host1x_intr_stop(host);
276 destroy_workqueue(host->intr_wq);
277}
278
279void host1x_intr_start(struct host1x *host)
280{
281 u32 hz = clk_get_rate(host->clk);
282 int err;
283
284 mutex_lock(&host->intr_mutex);
285 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
286 syncpt_thresh_work);
287 if (err) {
288 mutex_unlock(&host->intr_mutex);
289 return;
290 }
291 mutex_unlock(&host->intr_mutex);
292}
293
294void host1x_intr_stop(struct host1x *host)
295{
296 unsigned int id;
297 struct host1x_syncpt *syncpt = host->syncpt;
298 u32 nb_pts = host1x_syncpt_nb_pts(host);
299
300 mutex_lock(&host->intr_mutex);
301
302 host1x_hw_intr_disable_all_syncpt_intrs(host);
303
304 for (id = 0; id < nb_pts; ++id) {
305 struct host1x_waitlist *waiter, *next;
306
307 list_for_each_entry_safe(waiter, next,
308 &syncpt[id].intr.wait_head, list) {
309 if (atomic_cmpxchg(&waiter->state,
310 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
311 list_del(&waiter->list);
312 kref_put(&waiter->refcount, waiter_release);
313 }
314 }
315
316 if (!list_empty(&syncpt[id].intr.wait_head)) {
317 /* output diagnostics */
318 mutex_unlock(&host->intr_mutex);
319 pr_warn("%s cannot stop syncpt intr id=%d\n",
320 __func__, id);
321 return;
322 }
323 }
324
325 host1x_hw_intr_free_syncpt_irq(host);
326
327 mutex_unlock(&host->intr_mutex);
328}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
new file mode 100644
index 000000000000..a3f06abe94bb
--- /dev/null
+++ b/drivers/gpu/host1x/intr.h
@@ -0,0 +1,96 @@
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __HOST1X_INTR_H
20#define __HOST1X_INTR_H
21
22#include <linux/interrupt.h>
23#include <linux/workqueue.h>
24
25struct host1x;
26
27enum host1x_intr_action {
28 /*
29 * Wake up a task.
30 * 'data' points to a wait_queue_head_t
31 */
32 HOST1X_INTR_ACTION_WAKEUP,
33
34 /*
35 * Wake up a interruptible task.
36 * 'data' points to a wait_queue_head_t
37 */
38 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
39
40 HOST1X_INTR_ACTION_COUNT
41};
42
43struct host1x_syncpt_intr {
44 spinlock_t lock;
45 struct list_head wait_head;
46 char thresh_irq_name[12];
47 struct work_struct work;
48};
49
50struct host1x_waitlist {
51 struct list_head list;
52 struct kref refcount;
53 u32 thresh;
54 enum host1x_intr_action action;
55 atomic_t state;
56 void *data;
57 int count;
58};
59
60/*
61 * Schedule an action to be taken when a sync point reaches the given threshold.
62 *
63 * @id the sync point
64 * @thresh the threshold
65 * @action the action to take
66 * @data a pointer to extra data depending on action, see above
67 * @waiter waiter structure - assumes ownership
68 * @ref must be passed if cancellation is possible, else NULL
69 *
70 * This is a non-blocking api.
71 */
72int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
73 enum host1x_intr_action action, void *data,
74 struct host1x_waitlist *waiter, void **ref);
75
76/*
77 * Unreference an action submitted to host1x_intr_add_action().
78 * You must call this if you passed non-NULL as ref.
79 * @ref the ref returned from host1x_intr_add_action()
80 */
81void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
82
83/* Initialize host1x sync point interrupt */
84int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
85
86/* Deinitialize host1x sync point interrupt */
87void host1x_intr_deinit(struct host1x *host);
88
89/* Enable host1x sync point interrupt */
90void host1x_intr_start(struct host1x *host);
91
92/* Disable host1x sync point interrupt */
93void host1x_intr_stop(struct host1x *host);
94
95irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
96#endif
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 30385f62daee..07fad1412bab 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -24,6 +24,10 @@
24 24
25#include "syncpt.h" 25#include "syncpt.h"
26#include "dev.h" 26#include "dev.h"
27#include "intr.h"
28
29#define SYNCPT_CHECK_PERIOD (2 * HZ)
30#define MAX_STUCK_CHECK_COUNT 15
27 31
28static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, 32static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
29 struct device *dev, 33 struct device *dev,
@@ -141,6 +145,161 @@ void host1x_syncpt_incr(struct host1x_syncpt *sp)
141 host1x_syncpt_cpu_incr(sp); 145 host1x_syncpt_cpu_incr(sp);
142} 146}
143 147
148/*
149 * Updated sync point form hardware, and returns true if syncpoint is expired,
150 * false if we may need to wait
151 */
152static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
153{
154 host1x_hw_syncpt_load(sp->host, sp);
155 return host1x_syncpt_is_expired(sp, thresh);
156}
157
158/*
159 * Main entrypoint for syncpoint value waits.
160 */
161int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
162 u32 *value)
163{
164 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
165 void *ref;
166 struct host1x_waitlist *waiter;
167 int err = 0, check_count = 0;
168 u32 val;
169
170 if (value)
171 *value = 0;
172
173 /* first check cache */
174 if (host1x_syncpt_is_expired(sp, thresh)) {
175 if (value)
176 *value = host1x_syncpt_load(sp);
177 return 0;
178 }
179
180 /* try to read from register */
181 val = host1x_hw_syncpt_load(sp->host, sp);
182 if (host1x_syncpt_is_expired(sp, thresh)) {
183 if (value)
184 *value = val;
185 goto done;
186 }
187
188 if (!timeout) {
189 err = -EAGAIN;
190 goto done;
191 }
192
193 /* allocate a waiter */
194 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
195 if (!waiter) {
196 err = -ENOMEM;
197 goto done;
198 }
199
200 /* schedule a wakeup when the syncpoint value is reached */
201 err = host1x_intr_add_action(sp->host, sp->id, thresh,
202 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
203 &wq, waiter, &ref);
204 if (err)
205 goto done;
206
207 err = -EAGAIN;
208 /* Caller-specified timeout may be impractically low */
209 if (timeout < 0)
210 timeout = LONG_MAX;
211
212 /* wait for the syncpoint, or timeout, or signal */
213 while (timeout) {
214 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
215 int remain = wait_event_interruptible_timeout(wq,
216 syncpt_load_min_is_expired(sp, thresh),
217 check);
218 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
219 if (value)
220 *value = host1x_syncpt_load(sp);
221 err = 0;
222 break;
223 }
224 if (remain < 0) {
225 err = remain;
226 break;
227 }
228 timeout -= check;
229 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
230 dev_warn(sp->host->dev,
231 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
232 current->comm, sp->id, sp->name,
233 thresh, timeout);
234 check_count++;
235 }
236 }
237 host1x_intr_put_ref(sp->host, sp->id, ref);
238
239done:
240 return err;
241}
242EXPORT_SYMBOL(host1x_syncpt_wait);
243
244/*
245 * Returns true if syncpoint is expired, false if we may need to wait
246 */
247bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
248{
249 u32 current_val;
250 u32 future_val;
251 smp_rmb();
252 current_val = (u32)atomic_read(&sp->min_val);
253 future_val = (u32)atomic_read(&sp->max_val);
254
255 /* Note the use of unsigned arithmetic here (mod 1<<32).
256 *
257 * c = current_val = min_val = the current value of the syncpoint.
258 * t = thresh = the value we are checking
259 * f = future_val = max_val = the value c will reach when all
260 * outstanding increments have completed.
261 *
262 * Note that c always chases f until it reaches f.
263 *
264 * Dtf = (f - t)
265 * Dtc = (c - t)
266 *
267 * Consider all cases:
268 *
269 * A) .....c..t..f..... Dtf < Dtc need to wait
270 * B) .....c.....f..t.. Dtf > Dtc expired
271 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
272 *
273 * Any case where f==c: always expired (for any t). Dtf == Dcf
274 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
275 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
276 * Dtc!=0)
277 *
278 * Other cases:
279 *
280 * A) .....t..f..c..... Dtf < Dtc need to wait
281 * A) .....f..c..t..... Dtf < Dtc need to wait
282 * A) .....f..t..c..... Dtf > Dtc expired
283 *
284 * So:
285 * Dtf >= Dtc implies EXPIRED (return true)
286 * Dtf < Dtc implies WAIT (return false)
287 *
288 * Note: If t is expired then we *cannot* wait on it. We would wait
289 * forever (hang the system).
290 *
291 * Note: do NOT get clever and remove the -thresh from both sides. It
292 * is NOT the same.
293 *
294 * If future valueis zero, we have a client managed sync point. In that
295 * case we do a direct comparison.
296 */
297 if (!host1x_syncpt_client_managed(sp))
298 return future_val - thresh >= current_val - thresh;
299 else
300 return (s32)(current_val - thresh) >= 0;
301}
302
144int host1x_syncpt_init(struct host1x *host) 303int host1x_syncpt_init(struct host1x *host)
145{ 304{
146 struct host1x_syncpt *syncpt; 305 struct host1x_syncpt *syncpt;
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index efa2b6e659d6..17c1616de100 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -23,6 +23,8 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25 25
26#include "intr.h"
27
26struct host1x; 28struct host1x;
27 29
28struct host1x_syncpt { 30struct host1x_syncpt {
@@ -34,6 +36,9 @@ struct host1x_syncpt {
34 int client_managed; 36 int client_managed;
35 struct host1x *host; 37 struct host1x *host;
36 struct device *dev; 38 struct device *dev;
39
40 /* interrupt data */
41 struct host1x_syncpt_intr intr;
37}; 42};
38 43
39/* Initialize sync point array */ 44/* Initialize sync point array */
@@ -113,6 +118,9 @@ void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
113/* Load current value from hardware to the shadow register. */ 118/* Load current value from hardware to the shadow register. */
114u32 host1x_syncpt_load(struct host1x_syncpt *sp); 119u32 host1x_syncpt_load(struct host1x_syncpt *sp);
115 120
121/* Check if the given syncpoint value has already passed */
122bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
123
116/* Save host1x sync point state into shadow registers. */ 124/* Save host1x sync point state into shadow registers. */
117void host1x_syncpt_save(struct host1x *host); 125void host1x_syncpt_save(struct host1x *host);
118 126
@@ -128,6 +136,10 @@ void host1x_syncpt_incr(struct host1x_syncpt *sp);
128/* Indicate future operations by incrementing the sync point max. */ 136/* Indicate future operations by incrementing the sync point max. */
129u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); 137u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
130 138
139/* Wait until sync point reaches a threshold value, or a timeout. */
140int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
141 long timeout, u32 *value);
142
131/* Check if sync point id is valid. */ 143/* Check if sync point id is valid. */
132static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp) 144static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
133{ 145{