aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_fence.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2009-12-11 04:24:15 -0500
committerDave Airlie <airlied@redhat.com>2009-12-11 06:29:34 -0500
commit6ee738610f41b59733f63718f0bdbcba7d3a3f12 (patch)
treeeccb9f07671998c50a1bc606a54cd6f82ba43e0a /drivers/gpu/drm/nouveau/nouveau_fence.c
parentd1ede145cea25c5b6d2ebb19b167af14e374bb45 (diff)
drm/nouveau: Add DRM driver for NVIDIA GPUs
This adds a drm/kms staging non-API stable driver for GPUs from NVIDIA. This driver is a KMS-based driver and requires a compatible nouveau userspace libdrm and nouveau X.org driver. This driver requires firmware files not available in this kernel tree, interested parties can find them via the nouveau project git archive. This driver is reverse engineered, and is in no way supported by nVidia. Support for nearly the complete range of nvidia hw from nv04->g80 (nv50) is available, and the kms driver should support driving nearly all output types (displayport is under development still) along with supporting suspend/resume. This work is all from the upstream nouveau project found at nouveau.freedesktop.org. The original authors list from nouveau git tree is: Anssi Hannula <anssi.hannula@iki.fi> Ben Skeggs <bskeggs@redhat.com> Francisco Jerez <currojerez@riseup.net> Maarten Maathuis <madman2003@gmail.com> Marcin Koƛcielnicki <koriakin@0x04.net> Matthew Garrett <mjg@redhat.com> Matt Parnell <mparnell@gmail.com> Patrice Mandin <patmandin@gmail.com> Pekka Paalanen <pq@iki.fi> Xavier Chantry <shiningxc@gmail.com> along with project founder Stephane Marchesin <marchesin@icps.u-strasbg.fr> Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
1 files changed, 262 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 000000000000..0cff7eb3690a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
67 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48);
69 else
70 sequence = chan->fence.last_sequence_irq;
71
72 if (chan->fence.sequence_ack == sequence)
73 return;
74 chan->fence.sequence_ack = sequence;
75
76 list_for_each_safe(entry, tmp, &chan->fence.pending) {
77 fence = list_entry(entry, struct nouveau_fence, entry);
78
79 sequence = fence->sequence;
80 fence->signalled = true;
81 list_del(&fence->entry);
82 kref_put(&fence->refcount, nouveau_fence_del);
83
84 if (sequence == chan->fence.sequence_ack)
85 break;
86 }
87}
88
89int
90nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
91 bool emit)
92{
93 struct nouveau_fence *fence;
94 int ret = 0;
95
96 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
97 if (!fence)
98 return -ENOMEM;
99 kref_init(&fence->refcount);
100 fence->channel = chan;
101
102 if (emit)
103 ret = nouveau_fence_emit(fence);
104
105 if (ret)
106 nouveau_fence_unref((void *)&fence);
107 *pfence = fence;
108 return ret;
109}
110
111struct nouveau_channel *
112nouveau_fence_channel(struct nouveau_fence *fence)
113{
114 return fence ? fence->channel : NULL;
115}
116
117int
118nouveau_fence_emit(struct nouveau_fence *fence)
119{
120 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
121 struct nouveau_channel *chan = fence->channel;
122 unsigned long flags;
123 int ret;
124
125 ret = RING_SPACE(chan, 2);
126 if (ret)
127 return ret;
128
129 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
130 spin_lock_irqsave(&chan->fence.lock, flags);
131 nouveau_fence_update(chan);
132 spin_unlock_irqrestore(&chan->fence.lock, flags);
133
134 BUG_ON(chan->fence.sequence ==
135 chan->fence.sequence_ack - 1);
136 }
137
138 fence->sequence = ++chan->fence.sequence;
139
140 kref_get(&fence->refcount);
141 spin_lock_irqsave(&chan->fence.lock, flags);
142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144
145 BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan);
148
149 return 0;
150}
151
152void
153nouveau_fence_unref(void **sync_obj)
154{
155 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
156
157 if (fence)
158 kref_put(&fence->refcount, nouveau_fence_del);
159 *sync_obj = NULL;
160}
161
162void *
163nouveau_fence_ref(void *sync_obj)
164{
165 struct nouveau_fence *fence = nouveau_fence(sync_obj);
166
167 kref_get(&fence->refcount);
168 return sync_obj;
169}
170
171bool
172nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173{
174 struct nouveau_fence *fence = nouveau_fence(sync_obj);
175 struct nouveau_channel *chan = fence->channel;
176 unsigned long flags;
177
178 if (fence->signalled)
179 return true;
180
181 spin_lock_irqsave(&chan->fence.lock, flags);
182 nouveau_fence_update(chan);
183 spin_unlock_irqrestore(&chan->fence.lock, flags);
184 return fence->signalled;
185}
186
187int
188nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
189{
190 unsigned long timeout = jiffies + (3 * DRM_HZ);
191 int ret = 0;
192
193 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
194
195 while (1) {
196 if (nouveau_fence_signalled(sync_obj, sync_arg))
197 break;
198
199 if (time_after_eq(jiffies, timeout)) {
200 ret = -EBUSY;
201 break;
202 }
203
204 if (lazy)
205 schedule_timeout(1);
206
207 if (intr && signal_pending(current)) {
208 ret = -ERESTART;
209 break;
210 }
211 }
212
213 __set_current_state(TASK_RUNNING);
214
215 return ret;
216}
217
218int
219nouveau_fence_flush(void *sync_obj, void *sync_arg)
220{
221 return 0;
222}
223
224void
225nouveau_fence_handler(struct drm_device *dev, int channel)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_channel *chan = NULL;
229
230 if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
231 chan = dev_priv->fifos[channel];
232
233 if (chan) {
234 spin_lock_irq(&chan->fence.lock);
235 nouveau_fence_update(chan);
236 spin_unlock_irq(&chan->fence.lock);
237 }
238}
239
240int
241nouveau_fence_init(struct nouveau_channel *chan)
242{
243 INIT_LIST_HEAD(&chan->fence.pending);
244 spin_lock_init(&chan->fence.lock);
245 return 0;
246}
247
248void
249nouveau_fence_fini(struct nouveau_channel *chan)
250{
251 struct list_head *entry, *tmp;
252 struct nouveau_fence *fence;
253
254 list_for_each_safe(entry, tmp, &chan->fence.pending) {
255 fence = list_entry(entry, struct nouveau_fence, entry);
256
257 fence->signalled = true;
258 list_del(&fence->entry);
259 kref_put(&fence->refcount, nouveau_fence_del);
260 }
261}
262