diff options
Diffstat (limited to 'drivers/gpu')
61 files changed, 2025 insertions, 2121 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h index 78c6649407e6..d52ef8419fd2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/device.h +++ b/drivers/gpu/drm/nouveau/include/nvif/device.h | |||
@@ -64,6 +64,6 @@ u64 nvif_device_time(struct nvif_device *); | |||
64 | #include <engine/gr.h> | 64 | #include <engine/gr.h> |
65 | #include <engine/sw.h> | 65 | #include <engine/sw.h> |
66 | 66 | ||
67 | #define nvxx_fifo(a) nvkm_fifo(nvxx_device(a)) | 67 | #define nvxx_fifo(a) nvxx_device(a)->fifo |
68 | #define nvxx_gr(a) nvkm_gr(nvxx_device(a)) | 68 | #define nvxx_gr(a) nvkm_gr(nvxx_device(a)) |
69 | #endif | 69 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h index dfa24d233321..4a77fdaa8b90 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h | |||
@@ -45,7 +45,4 @@ int _nvkm_engctx_init(struct nvkm_object *); | |||
45 | int _nvkm_engctx_fini(struct nvkm_object *, bool suspend); | 45 | int _nvkm_engctx_fini(struct nvkm_object *, bool suspend); |
46 | #define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32 | 46 | #define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32 |
47 | #define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32 | 47 | #define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32 |
48 | |||
49 | struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr); | ||
50 | void nvkm_engctx_put(struct nvkm_object *); | ||
51 | #endif | 48 | #endif |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index 8c67d755e5c1..9d9c0e779f3f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h | |||
@@ -42,6 +42,7 @@ struct nvkm_engine_func { | |||
42 | int (*sclass)(struct nvkm_oclass *, int index); | 42 | int (*sclass)(struct nvkm_oclass *, int index); |
43 | } fifo; | 43 | } fifo; |
44 | 44 | ||
45 | const struct nvkm_object_func *cclass; | ||
45 | struct nvkm_sclass sclass[]; | 46 | struct nvkm_sclass sclass[]; |
46 | }; | 47 | }; |
47 | 48 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h index 88e8bb17a280..539278916d23 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h | |||
@@ -4,7 +4,6 @@ | |||
4 | struct nvkm_object; | 4 | struct nvkm_object; |
5 | 5 | ||
6 | struct nvkm_handle { | 6 | struct nvkm_handle { |
7 | struct nvkm_namedb *namedb; | ||
8 | struct list_head node; | 7 | struct list_head node; |
9 | 8 | ||
10 | struct list_head head; | 9 | struct list_head head; |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h deleted file mode 100644 index 16337f69b113..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | #ifndef __NVKM_NAMEDB_H__ | ||
2 | #define __NVKM_NAMEDB_H__ | ||
3 | #include <core/parent.h> | ||
4 | struct nvkm_handle; | ||
5 | |||
6 | struct nvkm_namedb { | ||
7 | struct nvkm_parent parent; | ||
8 | rwlock_t lock; | ||
9 | struct list_head list; | ||
10 | }; | ||
11 | |||
12 | static inline struct nvkm_namedb * | ||
13 | nv_namedb(void *obj) | ||
14 | { | ||
15 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | ||
16 | BUG_ON(!nv_iclass(obj, NV_NAMEDB_CLASS)); | ||
17 | #endif | ||
18 | return obj; | ||
19 | } | ||
20 | |||
21 | #define nvkm_namedb_create(p,e,c,v,s,m,d) \ | ||
22 | nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \ | ||
23 | sizeof(**d), (void **)d) | ||
24 | #define nvkm_namedb_init(p) \ | ||
25 | nvkm_parent_init(&(p)->parent) | ||
26 | #define nvkm_namedb_fini(p,s) \ | ||
27 | nvkm_parent_fini(&(p)->parent, (s)) | ||
28 | #define nvkm_namedb_destroy(p) \ | ||
29 | nvkm_parent_destroy(&(p)->parent) | ||
30 | |||
31 | int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *, | ||
32 | struct nvkm_oclass *, u32 pclass, | ||
33 | struct nvkm_oclass *, u64 engcls, | ||
34 | int size, void **); | ||
35 | |||
36 | int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *, | ||
37 | struct nvkm_oclass *, void *, u32, | ||
38 | struct nvkm_object **); | ||
39 | #define _nvkm_namedb_dtor _nvkm_parent_dtor | ||
40 | #define _nvkm_namedb_init _nvkm_parent_init | ||
41 | #define _nvkm_namedb_fini _nvkm_parent_fini | ||
42 | |||
43 | int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *, | ||
44 | struct nvkm_handle *); | ||
45 | void nvkm_namedb_remove(struct nvkm_handle *); | ||
46 | |||
47 | struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32); | ||
48 | struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, s32); | ||
49 | struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64); | ||
50 | struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32); | ||
51 | void nvkm_namedb_put(struct nvkm_handle *); | ||
52 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index ac97072dcfef..d9e494ba5033 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h | |||
@@ -1,25 +1,39 @@ | |||
1 | #ifndef __NVKM_FIFO_H__ | 1 | #ifndef __NVKM_FIFO_H__ |
2 | #define __NVKM_FIFO_H__ | 2 | #define __NVKM_FIFO_H__ |
3 | #include <core/namedb.h> | 3 | #define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object) |
4 | #define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine) | ||
5 | #include <core/engine.h> | ||
6 | #include <core/event.h> | ||
7 | |||
8 | #define NVKM_FIFO_CHID_NR 4096 | ||
9 | |||
10 | struct nvkm_fifo_engn { | ||
11 | struct nvkm_object *object; | ||
12 | int refcount; | ||
13 | int usecount; | ||
14 | }; | ||
4 | 15 | ||
5 | struct nvkm_fifo_chan { | 16 | struct nvkm_fifo_chan { |
6 | struct nvkm_namedb namedb; | 17 | const struct nvkm_fifo_chan_func *func; |
7 | struct nvkm_gpuobj *pushgpu; | 18 | struct nvkm_fifo *fifo; |
19 | u64 engines; | ||
20 | struct nvkm_object object; | ||
21 | |||
22 | struct list_head head; | ||
23 | u16 chid; | ||
24 | struct nvkm_gpuobj *inst; | ||
25 | struct nvkm_gpuobj *push; | ||
26 | struct nvkm_vm *vm; | ||
8 | void __iomem *user; | 27 | void __iomem *user; |
9 | u64 addr; | 28 | u64 addr; |
10 | u32 size; | 29 | u32 size; |
11 | u16 chid; | 30 | |
12 | u64 inst; | 31 | struct nvkm_fifo_engn engn[NVDEV_SUBDEV_NR]; |
13 | }; | 32 | }; |
14 | 33 | ||
15 | static inline struct nvkm_fifo_chan * | 34 | extern const struct nvkm_object_func nvkm_fifo_chan_func; |
16 | nvkm_fifo_chan(void *obj) | ||
17 | { | ||
18 | return (void *)nv_namedb(obj); | ||
19 | } | ||
20 | 35 | ||
21 | #include <core/gpuobj.h> | 36 | #include <core/gpuobj.h> |
22 | |||
23 | struct nvkm_fifo_base { | 37 | struct nvkm_fifo_base { |
24 | struct nvkm_gpuobj gpuobj; | 38 | struct nvkm_gpuobj gpuobj; |
25 | }; | 39 | }; |
@@ -39,25 +53,27 @@ struct nvkm_fifo_base { | |||
39 | #define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32 | 53 | #define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32 |
40 | #define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32 | 54 | #define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32 |
41 | 55 | ||
42 | #include <core/engine.h> | ||
43 | #include <core/event.h> | ||
44 | |||
45 | struct nvkm_fifo { | 56 | struct nvkm_fifo { |
46 | struct nvkm_engine engine; | 57 | struct nvkm_engine engine; |
58 | const struct nvkm_fifo_func *func; | ||
47 | 59 | ||
48 | struct nvkm_event cevent; /* channel creation event */ | 60 | struct nvkm_event cevent; /* channel creation event */ |
49 | struct nvkm_event uevent; /* async user trigger */ | 61 | struct nvkm_event uevent; /* async user trigger */ |
50 | 62 | ||
51 | struct nvkm_object **channel; | 63 | DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR); |
64 | int nr; | ||
65 | struct list_head chan; | ||
52 | spinlock_t lock; | 66 | spinlock_t lock; |
53 | u16 min; | ||
54 | u16 max; | ||
55 | 67 | ||
56 | int (*chid)(struct nvkm_fifo *, struct nvkm_object *); | ||
57 | void (*pause)(struct nvkm_fifo *, unsigned long *); | 68 | void (*pause)(struct nvkm_fifo *, unsigned long *); |
58 | void (*start)(struct nvkm_fifo *, unsigned long *); | 69 | void (*start)(struct nvkm_fifo *, unsigned long *); |
59 | }; | 70 | }; |
60 | 71 | ||
72 | struct nvkm_fifo_func { | ||
73 | void *(*dtor)(struct nvkm_fifo *); | ||
74 | const struct nvkm_fifo_chan_oclass *chan[]; | ||
75 | }; | ||
76 | |||
61 | void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags, | 77 | void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags, |
62 | struct nvkm_fifo_chan **); | 78 | struct nvkm_fifo_chan **); |
63 | struct nvkm_fifo_chan * | 79 | struct nvkm_fifo_chan * |
@@ -65,12 +81,6 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags); | |||
65 | struct nvkm_fifo_chan * | 81 | struct nvkm_fifo_chan * |
66 | nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags); | 82 | nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags); |
67 | 83 | ||
68 | static inline struct nvkm_fifo * | ||
69 | nvkm_fifo(void *obj) | ||
70 | { | ||
71 | return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO); | ||
72 | } | ||
73 | |||
74 | #define nvkm_fifo_create(o,e,c,fc,lc,d) \ | 84 | #define nvkm_fifo_create(o,e,c,fc,lc,d) \ |
75 | nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) | 85 | nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) |
76 | #define nvkm_fifo_init(p) \ | 86 | #define nvkm_fifo_init(p) \ |
@@ -82,8 +92,6 @@ int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *, | |||
82 | struct nvkm_oclass *, int min, int max, | 92 | struct nvkm_oclass *, int min, int max, |
83 | int size, void **); | 93 | int size, void **); |
84 | void nvkm_fifo_destroy(struct nvkm_fifo *); | 94 | void nvkm_fifo_destroy(struct nvkm_fifo *); |
85 | const char * | ||
86 | nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid); | ||
87 | 95 | ||
88 | #define _nvkm_fifo_init _nvkm_engine_init | 96 | #define _nvkm_fifo_init _nvkm_engine_init |
89 | #define _nvkm_fifo_fini _nvkm_engine_fini | 97 | #define _nvkm_fifo_fini _nvkm_engine_fini |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 4d7ad6d3fbd4..412c5be5a9ca 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
228 | priv->base.context_new = nv84_fence_context_new; | 228 | priv->base.context_new = nv84_fence_context_new; |
229 | priv->base.context_del = nv84_fence_context_del; | 229 | priv->base.context_del = nv84_fence_context_del; |
230 | 230 | ||
231 | priv->base.contexts = fifo->max + 1; | 231 | priv->base.contexts = fifo->nr; |
232 | priv->base.context_base = fence_context_alloc(priv->base.contexts); | 232 | priv->base.context_base = fence_context_alloc(priv->base.contexts); |
233 | priv->base.uevent = true; | 233 | priv->base.uevent = true; |
234 | 234 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild index e56c8eb9b054..d3932d59ff09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild | |||
@@ -8,7 +8,6 @@ nvkm-y += nvkm/core/handle.o | |||
8 | nvkm-y += nvkm/core/ioctl.o | 8 | nvkm-y += nvkm/core/ioctl.o |
9 | nvkm-y += nvkm/core/memory.o | 9 | nvkm-y += nvkm/core/memory.o |
10 | nvkm-y += nvkm/core/mm.o | 10 | nvkm-y += nvkm/core/mm.o |
11 | nvkm-y += nvkm/core/namedb.o | ||
12 | nvkm-y += nvkm/core/notify.o | 11 | nvkm-y += nvkm/core/notify.o |
13 | nvkm-y += nvkm/core/object.o | 12 | nvkm-y += nvkm/core/object.o |
14 | nvkm-y += nvkm/core/oproxy.o | 13 | nvkm-y += nvkm/core/oproxy.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c index be640fd24f77..bd13facc53d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c | |||
@@ -124,58 +124,12 @@ nvkm_engctx_destroy(struct nvkm_engctx *engctx) | |||
124 | int | 124 | int |
125 | nvkm_engctx_init(struct nvkm_engctx *engctx) | 125 | nvkm_engctx_init(struct nvkm_engctx *engctx) |
126 | { | 126 | { |
127 | struct nvkm_object *object = nv_object(engctx); | 127 | return nvkm_gpuobj_init(&engctx->gpuobj); |
128 | struct nvkm_subdev *subdev = nv_subdev(object->engine); | ||
129 | struct nvkm_object *parent; | ||
130 | struct nvkm_subdev *pardev; | ||
131 | int ret; | ||
132 | |||
133 | ret = nvkm_gpuobj_init(&engctx->gpuobj); | ||
134 | if (ret) | ||
135 | return ret; | ||
136 | |||
137 | parent = nv_pclass(object->parent, NV_PARENT_CLASS); | ||
138 | pardev = nv_subdev(parent->engine); | ||
139 | if (nv_parent(parent)->context_attach) { | ||
140 | mutex_lock(&pardev->mutex); | ||
141 | ret = nv_parent(parent)->context_attach(parent, object); | ||
142 | mutex_unlock(&pardev->mutex); | ||
143 | } | ||
144 | |||
145 | if (ret) { | ||
146 | nvkm_error(pardev, "failed to attach %s context, %d\n", | ||
147 | nvkm_subdev_name[subdev->index], ret); | ||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | nvkm_trace(pardev, "attached %s context\n", nvkm_subdev_name[subdev->index]); | ||
152 | return 0; | ||
153 | } | 128 | } |
154 | 129 | ||
155 | int | 130 | int |
156 | nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend) | 131 | nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend) |
157 | { | 132 | { |
158 | struct nvkm_object *object = nv_object(engctx); | ||
159 | struct nvkm_subdev *subdev = nv_subdev(object->engine); | ||
160 | struct nvkm_object *parent; | ||
161 | struct nvkm_subdev *pardev; | ||
162 | int ret = 0; | ||
163 | |||
164 | parent = nv_pclass(object->parent, NV_PARENT_CLASS); | ||
165 | pardev = nv_subdev(parent->engine); | ||
166 | if (nv_parent(parent)->context_detach) { | ||
167 | mutex_lock(&pardev->mutex); | ||
168 | ret = nv_parent(parent)->context_detach(parent, suspend, object); | ||
169 | mutex_unlock(&pardev->mutex); | ||
170 | } | ||
171 | |||
172 | if (ret) { | ||
173 | nvkm_error(pardev, "failed to detach %s context, %d\n", | ||
174 | nvkm_subdev_name[subdev->index], ret); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | nvkm_trace(pardev, "detached %s context\n", nvkm_subdev_name[subdev->index]); | ||
179 | return nvkm_gpuobj_fini(&engctx->gpuobj, suspend); | 133 | return nvkm_gpuobj_fini(&engctx->gpuobj, suspend); |
180 | } | 134 | } |
181 | 135 | ||
@@ -210,30 +164,3 @@ _nvkm_engctx_fini(struct nvkm_object *object, bool suspend) | |||
210 | { | 164 | { |
211 | return nvkm_engctx_fini(nv_engctx(object), suspend); | 165 | return nvkm_engctx_fini(nv_engctx(object), suspend); |
212 | } | 166 | } |
213 | |||
214 | struct nvkm_object * | ||
215 | nvkm_engctx_get(struct nvkm_engine *engine, u64 addr) | ||
216 | { | ||
217 | struct nvkm_engctx *engctx; | ||
218 | unsigned long flags; | ||
219 | |||
220 | spin_lock_irqsave(&engine->lock, flags); | ||
221 | list_for_each_entry(engctx, &engine->contexts, head) { | ||
222 | if (engctx->addr == addr) { | ||
223 | engctx->save = flags; | ||
224 | return nv_object(engctx); | ||
225 | } | ||
226 | } | ||
227 | spin_unlock_irqrestore(&engine->lock, flags); | ||
228 | return NULL; | ||
229 | } | ||
230 | |||
231 | void | ||
232 | nvkm_engctx_put(struct nvkm_object *object) | ||
233 | { | ||
234 | if (object) { | ||
235 | struct nvkm_engine *engine = nv_engine(object->engine); | ||
236 | struct nvkm_engctx *engctx = nv_engctx(object); | ||
237 | spin_unlock_irqrestore(&engine->lock, engctx->save); | ||
238 | } | ||
239 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c index 54b46037f4ba..e056f7afc35c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | |||
@@ -231,6 +231,8 @@ nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) | |||
231 | nvkm_object_destroy(&gpuobj->object); | 231 | nvkm_object_destroy(&gpuobj->object); |
232 | } | 232 | } |
233 | 233 | ||
234 | #include <engine/fifo.h> | ||
235 | |||
234 | int | 236 | int |
235 | nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, | 237 | nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
236 | struct nvkm_oclass *oclass, u32 pclass, | 238 | struct nvkm_oclass *oclass, u32 pclass, |
@@ -240,11 +242,19 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, | |||
240 | struct nvkm_device *device = nv_device(parent); | 242 | struct nvkm_device *device = nv_device(parent); |
241 | struct nvkm_gpuobj *pargpu = NULL; | 243 | struct nvkm_gpuobj *pargpu = NULL; |
242 | struct nvkm_gpuobj *gpuobj; | 244 | struct nvkm_gpuobj *gpuobj; |
245 | struct nvkm_object *object = objgpu; | ||
243 | const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC); | 246 | const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC); |
244 | int ret; | 247 | int ret; |
245 | 248 | ||
246 | *pobject = NULL; | 249 | *pobject = NULL; |
247 | 250 | ||
251 | while (object && object->func != &nvkm_fifo_chan_func) | ||
252 | object = object->parent; | ||
253 | |||
254 | if (object) { | ||
255 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
256 | pargpu = chan->inst; | ||
257 | } else | ||
248 | if (objgpu) { | 258 | if (objgpu) { |
249 | while ((objgpu = nv_pclass(objgpu, NV_GPUOBJ_CLASS))) { | 259 | while ((objgpu = nv_pclass(objgpu, NV_GPUOBJ_CLASS))) { |
250 | if (nv_gpuobj(objgpu)->heap.block_size) | 260 | if (nv_gpuobj(objgpu)->heap.block_size) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c index 2b52a655309b..a74ee1c29f8c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/handle.c | |||
@@ -23,7 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #include <core/handle.h> | 24 | #include <core/handle.h> |
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/namedb.h> | 26 | #include <core/parent.h> |
27 | 27 | ||
28 | #define hprintk(h,l,f,a...) do { \ | 28 | #define hprintk(h,l,f,a...) do { \ |
29 | struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ | 29 | struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ |
@@ -98,14 +98,9 @@ int | |||
98 | nvkm_handle_create(struct nvkm_handle *parent, u32 _handle, | 98 | nvkm_handle_create(struct nvkm_handle *parent, u32 _handle, |
99 | struct nvkm_object *object, struct nvkm_handle **phandle) | 99 | struct nvkm_object *object, struct nvkm_handle **phandle) |
100 | { | 100 | { |
101 | struct nvkm_object *namedb; | ||
102 | struct nvkm_handle *handle; | 101 | struct nvkm_handle *handle; |
103 | int ret; | 102 | int ret; |
104 | 103 | ||
105 | namedb = parent ? parent->object : NULL; | ||
106 | while (namedb && !nv_iclass(namedb, NV_NAMEDB_CLASS)) | ||
107 | namedb = namedb->parent; | ||
108 | |||
109 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); | 104 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
110 | if (!handle) | 105 | if (!handle) |
111 | return -ENOMEM; | 106 | return -ENOMEM; |
@@ -118,15 +113,6 @@ nvkm_handle_create(struct nvkm_handle *parent, u32 _handle, | |||
118 | handle->parent = parent; | 113 | handle->parent = parent; |
119 | nvkm_object_ref(object, &handle->object); | 114 | nvkm_object_ref(object, &handle->object); |
120 | 115 | ||
121 | if (namedb) { | ||
122 | ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, | ||
123 | object, handle); | ||
124 | if (ret) { | ||
125 | kfree(handle); | ||
126 | return ret; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if (parent) { | 116 | if (parent) { |
131 | if (nv_iclass(parent->object, NV_PARENT_CLASS) && | 117 | if (nv_iclass(parent->object, NV_PARENT_CLASS) && |
132 | nv_parent(parent->object)->object_attach) { | 118 | nv_parent(parent->object)->object_attach) { |
@@ -168,40 +154,6 @@ nvkm_handle_destroy(struct nvkm_handle *handle) | |||
168 | } | 154 | } |
169 | 155 | ||
170 | hprintk(handle, TRACE, "destroy completed\n"); | 156 | hprintk(handle, TRACE, "destroy completed\n"); |
171 | nvkm_namedb_remove(handle); | 157 | nvkm_object_ref(NULL, &handle->object); |
172 | kfree(handle); | 158 | kfree(handle); |
173 | } | 159 | } |
174 | |||
175 | struct nvkm_handle * | ||
176 | nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass) | ||
177 | { | ||
178 | struct nvkm_namedb *namedb; | ||
179 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | ||
180 | return nvkm_namedb_get_class(namedb, oclass); | ||
181 | return NULL; | ||
182 | } | ||
183 | |||
184 | struct nvkm_handle * | ||
185 | nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst) | ||
186 | { | ||
187 | struct nvkm_namedb *namedb; | ||
188 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | ||
189 | return nvkm_namedb_get_vinst(namedb, vinst); | ||
190 | return NULL; | ||
191 | } | ||
192 | |||
193 | struct nvkm_handle * | ||
194 | nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst) | ||
195 | { | ||
196 | struct nvkm_namedb *namedb; | ||
197 | if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) | ||
198 | return nvkm_namedb_get_cinst(namedb, cinst); | ||
199 | return NULL; | ||
200 | } | ||
201 | |||
202 | void | ||
203 | nvkm_handle_put(struct nvkm_handle *handle) | ||
204 | { | ||
205 | if (handle) | ||
206 | nvkm_namedb_put(handle); | ||
207 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index 04f1bc2d0f8e..28f9fa289e80 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/engine.h> | 26 | #include <core/engine.h> |
27 | #include <core/handle.h> | 27 | #include <core/handle.h> |
28 | #include <core/namedb.h> | 28 | #include <core/parent.h> |
29 | 29 | ||
30 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
31 | #include <nvif/ioctl.h> | 31 | #include <nvif/ioctl.h> |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c deleted file mode 100644 index 9be1ce967034..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c +++ /dev/null | |||
@@ -1,201 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | #include <core/namedb.h> | ||
25 | #include <core/gpuobj.h> | ||
26 | #include <core/handle.h> | ||
27 | |||
28 | static struct nvkm_handle * | ||
29 | nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name) | ||
30 | { | ||
31 | struct nvkm_handle *handle; | ||
32 | |||
33 | list_for_each_entry(handle, &namedb->list, node) { | ||
34 | if (handle->name == name) | ||
35 | return handle; | ||
36 | } | ||
37 | |||
38 | return NULL; | ||
39 | } | ||
40 | |||
41 | static struct nvkm_handle * | ||
42 | nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, s32 oclass) | ||
43 | { | ||
44 | struct nvkm_handle *handle; | ||
45 | |||
46 | list_for_each_entry(handle, &namedb->list, node) { | ||
47 | if (nv_mclass(handle->object) == oclass) | ||
48 | return handle; | ||
49 | } | ||
50 | |||
51 | return NULL; | ||
52 | } | ||
53 | |||
54 | static struct nvkm_handle * | ||
55 | nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst) | ||
56 | { | ||
57 | struct nvkm_handle *handle; | ||
58 | |||
59 | list_for_each_entry(handle, &namedb->list, node) { | ||
60 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { | ||
61 | if (nv_gpuobj(handle->object)->addr == vinst) | ||
62 | return handle; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | static struct nvkm_handle * | ||
70 | nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst) | ||
71 | { | ||
72 | struct nvkm_handle *handle; | ||
73 | |||
74 | list_for_each_entry(handle, &namedb->list, node) { | ||
75 | if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { | ||
76 | if (nv_gpuobj(handle->object)->node && | ||
77 | nv_gpuobj(handle->object)->node->offset == cinst) | ||
78 | return handle; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | int | ||
86 | nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name, | ||
87 | struct nvkm_object *object, | ||
88 | struct nvkm_handle *handle) | ||
89 | { | ||
90 | int ret = -EEXIST; | ||
91 | write_lock_irq(&namedb->lock); | ||
92 | if (!nvkm_namedb_lookup(namedb, name)) { | ||
93 | nvkm_object_ref(object, &handle->object); | ||
94 | handle->namedb = namedb; | ||
95 | list_add(&handle->node, &namedb->list); | ||
96 | ret = 0; | ||
97 | } | ||
98 | write_unlock_irq(&namedb->lock); | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | void | ||
103 | nvkm_namedb_remove(struct nvkm_handle *handle) | ||
104 | { | ||
105 | struct nvkm_namedb *namedb = handle->namedb; | ||
106 | struct nvkm_object *object = handle->object; | ||
107 | if (handle->namedb) { | ||
108 | write_lock_irq(&namedb->lock); | ||
109 | list_del(&handle->node); | ||
110 | write_unlock_irq(&namedb->lock); | ||
111 | } | ||
112 | nvkm_object_ref(NULL, &object); | ||
113 | } | ||
114 | |||
115 | struct nvkm_handle * | ||
116 | nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name) | ||
117 | { | ||
118 | struct nvkm_handle *handle; | ||
119 | read_lock(&namedb->lock); | ||
120 | handle = nvkm_namedb_lookup(namedb, name); | ||
121 | if (handle == NULL) | ||
122 | read_unlock(&namedb->lock); | ||
123 | return handle; | ||
124 | } | ||
125 | |||
126 | struct nvkm_handle * | ||
127 | nvkm_namedb_get_class(struct nvkm_namedb *namedb, s32 oclass) | ||
128 | { | ||
129 | struct nvkm_handle *handle; | ||
130 | read_lock(&namedb->lock); | ||
131 | handle = nvkm_namedb_lookup_class(namedb, oclass); | ||
132 | if (handle == NULL) | ||
133 | read_unlock(&namedb->lock); | ||
134 | return handle; | ||
135 | } | ||
136 | |||
137 | struct nvkm_handle * | ||
138 | nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst) | ||
139 | { | ||
140 | struct nvkm_handle *handle; | ||
141 | read_lock(&namedb->lock); | ||
142 | handle = nvkm_namedb_lookup_vinst(namedb, vinst); | ||
143 | if (handle == NULL) | ||
144 | read_unlock(&namedb->lock); | ||
145 | return handle; | ||
146 | } | ||
147 | |||
148 | struct nvkm_handle * | ||
149 | nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst) | ||
150 | { | ||
151 | struct nvkm_handle *handle; | ||
152 | read_lock(&namedb->lock); | ||
153 | handle = nvkm_namedb_lookup_cinst(namedb, cinst); | ||
154 | if (handle == NULL) | ||
155 | read_unlock(&namedb->lock); | ||
156 | return handle; | ||
157 | } | ||
158 | |||
159 | void | ||
160 | nvkm_namedb_put(struct nvkm_handle *handle) | ||
161 | { | ||
162 | if (handle) | ||
163 | read_unlock(&handle->namedb->lock); | ||
164 | } | ||
165 | |||
166 | int | ||
167 | nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine, | ||
168 | struct nvkm_oclass *oclass, u32 pclass, | ||
169 | struct nvkm_oclass *sclass, u64 engcls, | ||
170 | int length, void **pobject) | ||
171 | { | ||
172 | struct nvkm_namedb *namedb; | ||
173 | int ret; | ||
174 | |||
175 | ret = nvkm_parent_create_(parent, engine, oclass, pclass | | ||
176 | NV_NAMEDB_CLASS, sclass, engcls, | ||
177 | length, pobject); | ||
178 | namedb = *pobject; | ||
179 | if (ret) | ||
180 | return ret; | ||
181 | |||
182 | rwlock_init(&namedb->lock); | ||
183 | INIT_LIST_HEAD(&namedb->list); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | int | ||
188 | _nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | ||
189 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
190 | struct nvkm_object **pobject) | ||
191 | { | ||
192 | struct nvkm_namedb *object; | ||
193 | int ret; | ||
194 | |||
195 | ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object); | ||
196 | *pobject = nv_object(object); | ||
197 | if (ret) | ||
198 | return ret; | ||
199 | |||
200 | return 0; | ||
201 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index a632570f20e1..1a15b8d6fece 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c | |||
@@ -85,7 +85,8 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan) | |||
85 | nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] " | 85 | nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] " |
86 | "subc %d mthd %04x data %08x\n", ssta, | 86 | "subc %d mthd %04x data %08x\n", ssta, |
87 | en ? en->name : "", chan ? chan->chid : -1, | 87 | en ? en->name : "", chan ? chan->chid : -1, |
88 | chan ? chan->inst : 0, nvkm_client_name(chan), | 88 | chan ? chan->inst->addr : 0, |
89 | chan ? chan->object.client->name : "unknown", | ||
89 | subc, mthd, data); | 90 | subc, mthd, data); |
90 | } | 91 | } |
91 | 92 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c index 62cdd1e50a95..74bea4397bf4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c | |||
@@ -121,9 +121,10 @@ g84_cipher_intr(struct nvkm_subdev *subdev) | |||
121 | if (stat) { | 121 | if (stat) { |
122 | nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat); | 122 | nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat); |
123 | nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] " | 123 | nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] " |
124 | "mthd %04x data %08x\n", | 124 | "mthd %04x data %08x\n", stat, msg, |
125 | stat, msg, chan ? chan->chid : -1, (u64)inst << 12, | 125 | chan ? chan->chid : -1, (u64)inst << 12, |
126 | nvkm_client_name(chan), mthd, data); | 126 | chan ? chan->object.client->name : "unknown", |
127 | mthd, data); | ||
127 | } | 128 | } |
128 | nvkm_fifo_chan_put(fifo, flags, &chan); | 129 | nvkm_fifo_chan_put(fifo, flags, &chan); |
129 | 130 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c index 2d9b81fb83a2..62d3fb66d0ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * | 21 | * |
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | #define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object); | 24 | #define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object) |
25 | #include "priv.h" | 25 | #include "priv.h" |
26 | 26 | ||
27 | #include <core/client.h> | 27 | #include <core/client.h> |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c index 57f6eca078ef..1a377201949c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "priv.h" | 24 | #include "priv.h" |
25 | 25 | ||
26 | #include <core/client.h> | 26 | #include <core/client.h> |
27 | #include <engine/fifo.h> | ||
27 | 28 | ||
28 | #include <nvif/class.h> | 29 | #include <nvif/class.h> |
29 | 30 | ||
@@ -88,11 +89,19 @@ nvkm_dma_oclass_base = { | |||
88 | .ctor = nvkm_dma_oclass_new, | 89 | .ctor = nvkm_dma_oclass_new, |
89 | }; | 90 | }; |
90 | 91 | ||
92 | static int | ||
93 | nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size, | ||
94 | struct nvkm_object **pobject) | ||
95 | { | ||
96 | return nvkm_dma_oclass_new(oclass->engine->subdev.device, | ||
97 | oclass, data, size, pobject); | ||
98 | } | ||
99 | |||
91 | static const struct nvkm_sclass | 100 | static const struct nvkm_sclass |
92 | nvkm_dma_sclass[] = { | 101 | nvkm_dma_sclass[] = { |
93 | { 0, 0, NV_DMA_FROM_MEMORY }, | 102 | { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, |
94 | { 0, 0, NV_DMA_TO_MEMORY }, | 103 | { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, |
95 | { 0, 0, NV_DMA_IN_MEMORY }, | 104 | { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new }, |
96 | }; | 105 | }; |
97 | 106 | ||
98 | static int | 107 | static int |
@@ -110,89 +119,21 @@ nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index, | |||
110 | return count; | 119 | return count; |
111 | } | 120 | } |
112 | 121 | ||
113 | static const struct nvkm_engine_func | ||
114 | nvkm_dma = { | ||
115 | .base.sclass = nvkm_dma_oclass_base_get, | ||
116 | }; | ||
117 | |||
118 | #include <core/gpuobj.h> | ||
119 | |||
120 | static struct nvkm_oclass empty = { | ||
121 | .ofuncs = &(struct nvkm_ofuncs) { | ||
122 | .dtor = nvkm_object_destroy, | ||
123 | .init = _nvkm_object_init, | ||
124 | .fini = _nvkm_object_fini, | ||
125 | }, | ||
126 | }; | ||
127 | |||
128 | static int | 122 | static int |
129 | nvkm_dmaobj_compat_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 123 | nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index) |
130 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
131 | struct nvkm_object **pobject) | ||
132 | { | ||
133 | struct nvkm_oclass hack = { | ||
134 | .base.oclass = oclass->handle, | ||
135 | .client = nvkm_client(parent), | ||
136 | .parent = parent, | ||
137 | .engine = nv_engine(engine), | ||
138 | }; | ||
139 | struct nvkm_dma *dma = (void *)engine; | ||
140 | struct nvkm_dma_impl *impl = (void *)dma->engine.subdev.object.oclass; | ||
141 | struct nvkm_dmaobj *dmaobj = NULL; | ||
142 | struct nvkm_gpuobj *gpuobj; | ||
143 | int ret; | ||
144 | |||
145 | ret = impl->class_new(dma, &hack, data, size, &dmaobj); | ||
146 | if (dmaobj) | ||
147 | *pobject = &dmaobj->object; | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | gpuobj = (void *)nv_pclass(parent, NV_GPUOBJ_CLASS); | ||
152 | |||
153 | ret = dmaobj->func->bind(dmaobj, gpuobj, 16, &gpuobj); | ||
154 | nvkm_object_ref(NULL, pobject); | ||
155 | if (ret) | ||
156 | return ret; | ||
157 | |||
158 | ret = nvkm_object_create(parent, engine, &empty, 0, pobject); | ||
159 | if (ret) | ||
160 | return ret; | ||
161 | |||
162 | gpuobj->object.parent = *pobject; | ||
163 | gpuobj->object.engine = &dma->engine; | ||
164 | gpuobj->object.oclass = oclass; | ||
165 | gpuobj->object.pclass = NV_GPUOBJ_CLASS; | ||
166 | #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA | ||
167 | gpuobj->object._magic = NVKM_OBJECT_MAGIC; | ||
168 | #endif | ||
169 | *pobject = &gpuobj->object; | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static void | ||
174 | nvkm_dmaobj_compat_dtor(struct nvkm_object *object) | ||
175 | { | 124 | { |
176 | struct nvkm_object *parent = object->parent; | 125 | const int count = ARRAY_SIZE(nvkm_dma_sclass); |
177 | struct nvkm_gpuobj *gpuobj = (void *)object; | 126 | if (index < count) { |
178 | nvkm_gpuobj_del(&gpuobj); | 127 | oclass->base = nvkm_dma_sclass[index]; |
179 | nvkm_object_ref(NULL, &parent); | 128 | return index; |
129 | } | ||
130 | return count; | ||
180 | } | 131 | } |
181 | 132 | ||
182 | static struct nvkm_ofuncs | 133 | static const struct nvkm_engine_func |
183 | nvkm_dmaobj_compat_ofuncs = { | 134 | nvkm_dma = { |
184 | .ctor = nvkm_dmaobj_compat_ctor, | 135 | .base.sclass = nvkm_dma_oclass_base_get, |
185 | .dtor = nvkm_dmaobj_compat_dtor, | 136 | .fifo.sclass = nvkm_dma_oclass_fifo_get, |
186 | .init = _nvkm_object_init, | ||
187 | .fini = _nvkm_object_fini, | ||
188 | }; | ||
189 | |||
190 | static struct nvkm_oclass | ||
191 | nvkm_dma_compat_sclass[] = { | ||
192 | { NV_DMA_FROM_MEMORY, &nvkm_dmaobj_compat_ofuncs }, | ||
193 | { NV_DMA_TO_MEMORY, &nvkm_dmaobj_compat_ofuncs }, | ||
194 | { NV_DMA_IN_MEMORY, &nvkm_dmaobj_compat_ofuncs }, | ||
195 | {} | ||
196 | }; | 137 | }; |
197 | 138 | ||
198 | int | 139 | int |
@@ -209,7 +150,6 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
209 | if (ret) | 150 | if (ret) |
210 | return ret; | 151 | return ret; |
211 | 152 | ||
212 | dmaeng->engine.sclass = nvkm_dma_compat_sclass; | ||
213 | dmaeng->engine.func = &nvkm_dma; | 153 | dmaeng->engine.func = &nvkm_dma; |
214 | return 0; | 154 | return 0; |
215 | } | 155 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 510de3c2d2e4..b693127d80e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | |||
@@ -44,12 +44,13 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, | |||
44 | struct nvkm_fifo_chan * | 44 | struct nvkm_fifo_chan * |
45 | nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) | 45 | nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) |
46 | { | 46 | { |
47 | struct nvkm_fifo_chan *chan; | ||
47 | unsigned long flags; | 48 | unsigned long flags; |
48 | int i; | ||
49 | spin_lock_irqsave(&fifo->lock, flags); | 49 | spin_lock_irqsave(&fifo->lock, flags); |
50 | for (i = fifo->min; i < fifo->max; i++) { | 50 | list_for_each_entry(chan, &fifo->chan, head) { |
51 | struct nvkm_fifo_chan *chan = (void *)fifo->channel[i]; | 51 | if (chan->inst->addr == inst) { |
52 | if (chan && chan->inst == inst) { | 52 | list_del(&chan->head); |
53 | list_add(&chan->head, &fifo->chan); | ||
53 | *rflags = flags; | 54 | *rflags = flags; |
54 | return chan; | 55 | return chan; |
55 | } | 56 | } |
@@ -61,46 +62,22 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) | |||
61 | struct nvkm_fifo_chan * | 62 | struct nvkm_fifo_chan * |
62 | nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) | 63 | nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) |
63 | { | 64 | { |
65 | struct nvkm_fifo_chan *chan; | ||
64 | unsigned long flags; | 66 | unsigned long flags; |
65 | spin_lock_irqsave(&fifo->lock, flags); | 67 | spin_lock_irqsave(&fifo->lock, flags); |
66 | if (fifo->channel[chid]) { | 68 | list_for_each_entry(chan, &fifo->chan, head) { |
67 | *rflags = flags; | 69 | if (chan->chid == chid) { |
68 | return (void *)fifo->channel[chid]; | 70 | list_del(&chan->head); |
71 | list_add(&chan->head, &fifo->chan); | ||
72 | *rflags = flags; | ||
73 | return chan; | ||
74 | } | ||
69 | } | 75 | } |
70 | spin_unlock_irqrestore(&fifo->lock, flags); | 76 | spin_unlock_irqrestore(&fifo->lock, flags); |
71 | return NULL; | 77 | return NULL; |
72 | } | 78 | } |
73 | 79 | ||
74 | static int | 80 | static int |
75 | nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object) | ||
76 | { | ||
77 | int engidx = nv_hclass(fifo) & 0xff; | ||
78 | |||
79 | while (object && object->parent) { | ||
80 | if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && | ||
81 | (nv_hclass(object->parent) & 0xff) == engidx) | ||
82 | return nvkm_fifo_chan(object)->chid; | ||
83 | object = object->parent; | ||
84 | } | ||
85 | |||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | const char * | ||
90 | nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid) | ||
91 | { | ||
92 | struct nvkm_fifo_chan *chan = NULL; | ||
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&fifo->lock, flags); | ||
96 | if (chid >= fifo->min && chid <= fifo->max) | ||
97 | chan = (void *)fifo->channel[chid]; | ||
98 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
99 | |||
100 | return nvkm_client_name(chan); | ||
101 | } | ||
102 | |||
103 | static int | ||
104 | nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, | 81 | nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, |
105 | struct nvkm_notify *notify) | 82 | struct nvkm_notify *notify) |
106 | { | 83 | { |
@@ -144,21 +121,62 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo) | |||
144 | nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); | 121 | nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); |
145 | } | 122 | } |
146 | 123 | ||
124 | static int | ||
125 | nvkm_fifo_class_new(struct nvkm_device *device, | ||
126 | const struct nvkm_oclass *oclass, void *data, u32 size, | ||
127 | struct nvkm_object **pobject) | ||
128 | { | ||
129 | const struct nvkm_fifo_chan_oclass *sclass = oclass->engn; | ||
130 | struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); | ||
131 | return sclass->ctor(fifo, oclass, data, size, pobject); | ||
132 | } | ||
133 | |||
134 | static const struct nvkm_device_oclass | ||
135 | nvkm_fifo_class = { | ||
136 | .ctor = nvkm_fifo_class_new, | ||
137 | }; | ||
138 | |||
139 | static int | ||
140 | nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, | ||
141 | const struct nvkm_device_oclass **class) | ||
142 | { | ||
143 | struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); | ||
144 | const struct nvkm_fifo_chan_oclass *sclass; | ||
145 | int c = 0; | ||
146 | |||
147 | while ((sclass = fifo->func->chan[c])) { | ||
148 | if (c++ == index) { | ||
149 | oclass->base = sclass->base; | ||
150 | oclass->engn = sclass; | ||
151 | *class = &nvkm_fifo_class; | ||
152 | return 0; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | return c; | ||
157 | } | ||
158 | |||
147 | void | 159 | void |
148 | nvkm_fifo_destroy(struct nvkm_fifo *fifo) | 160 | nvkm_fifo_destroy(struct nvkm_fifo *fifo) |
149 | { | 161 | { |
150 | kfree(fifo->channel); | ||
151 | nvkm_event_fini(&fifo->uevent); | 162 | nvkm_event_fini(&fifo->uevent); |
152 | nvkm_event_fini(&fifo->cevent); | 163 | nvkm_event_fini(&fifo->cevent); |
153 | nvkm_engine_destroy(&fifo->engine); | 164 | nvkm_engine_destroy(&fifo->engine); |
154 | } | 165 | } |
155 | 166 | ||
167 | static const struct nvkm_engine_func | ||
168 | nvkm_fifo_func = { | ||
169 | .base.sclass = nvkm_fifo_class_get, | ||
170 | }; | ||
171 | |||
156 | int | 172 | int |
157 | nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, | 173 | nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, |
158 | struct nvkm_oclass *oclass, | 174 | struct nvkm_oclass *oclass, |
159 | int min, int max, int length, void **pobject) | 175 | int min, int max, int length, void **pobject) |
160 | { | 176 | { |
161 | struct nvkm_fifo *fifo; | 177 | struct nvkm_fifo *fifo; |
178 | int nr = max + 1; | ||
179 | int cnt = nr - min; | ||
162 | int ret; | 180 | int ret; |
163 | 181 | ||
164 | ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", | 182 | ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", |
@@ -167,17 +185,21 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, | |||
167 | if (ret) | 185 | if (ret) |
168 | return ret; | 186 | return ret; |
169 | 187 | ||
170 | fifo->min = min; | 188 | fifo->engine.func = &nvkm_fifo_func; |
171 | fifo->max = max; | 189 | INIT_LIST_HEAD(&fifo->chan); |
172 | fifo->channel = kzalloc(sizeof(*fifo->channel) * (max + 1), GFP_KERNEL); | 190 | |
173 | if (!fifo->channel) | 191 | fifo->nr = nr; |
174 | return -ENOMEM; | 192 | if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR)) { |
193 | fifo->nr = NVKM_FIFO_CHID_NR; | ||
194 | cnt = fifo->nr - min; | ||
195 | } | ||
196 | bitmap_fill(fifo->mask, NVKM_FIFO_CHID_NR); | ||
197 | bitmap_clear(fifo->mask, min, cnt); | ||
175 | 198 | ||
176 | ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); | 199 | ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); |
177 | if (ret) | 200 | if (ret) |
178 | return ret; | 201 | return ret; |
179 | 202 | ||
180 | fifo->chid = nvkm_fifo_chid; | ||
181 | spin_lock_init(&fifo->lock); | 203 | spin_lock_init(&fifo->lock); |
182 | return 0; | 204 | return 0; |
183 | } | 205 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index cc401ae1d6a5..2735c2df2218 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | |||
@@ -24,139 +24,472 @@ | |||
24 | #include "chan.h" | 24 | #include "chan.h" |
25 | 25 | ||
26 | #include <core/client.h> | 26 | #include <core/client.h> |
27 | #include <core/oproxy.h> | ||
28 | #include <subdev/mmu.h> | ||
27 | #include <engine/dma.h> | 29 | #include <engine/dma.h> |
28 | 30 | ||
29 | #include <nvif/class.h> | 31 | struct nvkm_fifo_chan_object { |
32 | struct nvkm_oproxy oproxy; | ||
33 | struct nvkm_fifo_chan *chan; | ||
34 | int hash; | ||
35 | }; | ||
30 | 36 | ||
31 | int | 37 | static int |
32 | _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type, | 38 | nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend) |
33 | struct nvkm_event **event) | 39 | { |
40 | struct nvkm_fifo_chan_object *object = | ||
41 | container_of(base, typeof(*object), oproxy); | ||
42 | struct nvkm_engine *engine = object->oproxy.object->engine; | ||
43 | struct nvkm_fifo_chan *chan = object->chan; | ||
44 | struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; | ||
45 | const char *name = nvkm_subdev_name[engine->subdev.index]; | ||
46 | int ret = 0; | ||
47 | |||
48 | if (--engn->usecount) | ||
49 | return 0; | ||
50 | |||
51 | if (chan->func->engine_fini) { | ||
52 | ret = chan->func->engine_fini(chan, engine, suspend); | ||
53 | if (ret) { | ||
54 | nvif_error(&chan->object, | ||
55 | "detach %s failed, %d\n", name, ret); | ||
56 | return ret; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | if (engn->object) { | ||
61 | ret = nvkm_object_fini(engn->object, suspend); | ||
62 | if (ret && suspend) | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | nvif_trace(&chan->object, "detached %s\n", name); | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static int | ||
71 | nvkm_fifo_chan_child_init(struct nvkm_oproxy *base) | ||
72 | { | ||
73 | struct nvkm_fifo_chan_object *object = | ||
74 | container_of(base, typeof(*object), oproxy); | ||
75 | struct nvkm_engine *engine = object->oproxy.object->engine; | ||
76 | struct nvkm_fifo_chan *chan = object->chan; | ||
77 | struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; | ||
78 | const char *name = nvkm_subdev_name[engine->subdev.index]; | ||
79 | int ret; | ||
80 | |||
81 | if (engn->usecount++) | ||
82 | return 0; | ||
83 | |||
84 | if (engn->object) { | ||
85 | ret = nvkm_object_init(engn->object); | ||
86 | if (ret) | ||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | if (chan->func->engine_init) { | ||
91 | ret = chan->func->engine_init(chan, engine); | ||
92 | if (ret) { | ||
93 | nvif_error(&chan->object, | ||
94 | "attach %s failed, %d\n", name, ret); | ||
95 | return ret; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | nvif_trace(&chan->object, "attached %s\n", name); | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static void | ||
104 | nvkm_fifo_chan_child_del(struct nvkm_oproxy *base) | ||
105 | { | ||
106 | struct nvkm_fifo_chan_object *object = | ||
107 | container_of(base, typeof(*object), oproxy); | ||
108 | struct nvkm_engine *engine = object->oproxy.base.engine; | ||
109 | struct nvkm_fifo_chan *chan = object->chan; | ||
110 | struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; | ||
111 | |||
112 | if (chan->func->object_dtor) | ||
113 | chan->func->object_dtor(chan, object->hash); | ||
114 | |||
115 | if (!--engn->refcount) { | ||
116 | if (chan->func->engine_dtor) | ||
117 | chan->func->engine_dtor(chan, engine); | ||
118 | nvkm_object_ref(NULL, &engn->object); | ||
119 | if (chan->vm) | ||
120 | atomic_dec(&chan->vm->engref[engine->subdev.index]); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static const struct nvkm_oproxy_func | ||
125 | nvkm_fifo_chan_child_func = { | ||
126 | .dtor[0] = nvkm_fifo_chan_child_del, | ||
127 | .init[0] = nvkm_fifo_chan_child_init, | ||
128 | .fini[0] = nvkm_fifo_chan_child_fini, | ||
129 | }; | ||
130 | |||
131 | static int | ||
132 | nvkm_fifo_chan_child_old(const struct nvkm_oclass *oclass, | ||
133 | void *data, u32 size, struct nvkm_object **pobject) | ||
34 | { | 134 | { |
35 | struct nvkm_fifo *fifo = (void *)object->engine; | 135 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent); |
36 | switch (type) { | 136 | struct nvkm_object *parent = &chan->object; |
37 | case G82_CHANNEL_DMA_V0_NTFY_UEVENT: | 137 | struct nvkm_engine *engine = oclass->engine; |
38 | if (nv_mclass(object) >= G82_CHANNEL_DMA) { | 138 | struct nvkm_oclass *eclass = (void *)oclass->priv; |
39 | *event = &fifo->uevent; | 139 | struct nvkm_object *engctx = NULL; |
40 | return 0; | 140 | struct nvkm_fifo_chan_object *object; |
141 | struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; | ||
142 | int ret; | ||
143 | |||
144 | if (!(object = kzalloc(sizeof(*object), GFP_KERNEL))) | ||
145 | return -ENOMEM; | ||
146 | nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy); | ||
147 | *pobject = &object->oproxy.base; | ||
148 | object->chan = chan; | ||
149 | |||
150 | if (!engn->refcount++) { | ||
151 | if (chan->vm) | ||
152 | atomic_inc(&chan->vm->engref[engine->subdev.index]); | ||
153 | if (engine->cclass && !engn->object) { | ||
154 | ret = nvkm_object_old(parent, &engine->subdev.object, | ||
155 | engine->cclass, NULL, 0, | ||
156 | &engn->object); | ||
157 | if (ret) { | ||
158 | nvkm_engine_unref(&engine); | ||
159 | return ret; | ||
160 | } | ||
161 | } else { | ||
162 | nvkm_object_ref(parent, &engn->object); | ||
41 | } | 163 | } |
42 | break; | 164 | |
43 | default: | 165 | if (chan->func->engine_ctor) { |
44 | break; | 166 | ret = chan->func->engine_ctor(chan, engine, |
167 | engn->object); | ||
168 | if (ret) | ||
169 | return ret; | ||
170 | } | ||
171 | } | ||
172 | nvkm_object_ref(engn->object, &engctx); | ||
173 | |||
174 | ret = nvkm_object_old(engctx, &engine->subdev.object, eclass, | ||
175 | data, size, &object->oproxy.object); | ||
176 | nvkm_object_ref(NULL, &engctx); | ||
177 | if (ret) | ||
178 | return ret; | ||
179 | |||
180 | object->oproxy.object->handle = oclass->handle; | ||
181 | |||
182 | if (chan->func->object_ctor) { | ||
183 | object->hash = | ||
184 | chan->func->object_ctor(chan, object->oproxy.object); | ||
185 | if (object->hash < 0) | ||
186 | return object->hash; | ||
45 | } | 187 | } |
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static int | ||
193 | nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size, | ||
194 | struct nvkm_object **pobject) | ||
195 | { | ||
196 | struct nvkm_engine *engine = oclass->engine; | ||
197 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent); | ||
198 | struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; | ||
199 | struct nvkm_fifo_chan_object *object; | ||
200 | int ret = 0; | ||
201 | |||
202 | if (!(object = kzalloc(sizeof(*object), GFP_KERNEL))) | ||
203 | return -ENOMEM; | ||
204 | nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy); | ||
205 | object->chan = chan; | ||
206 | *pobject = &object->oproxy.base; | ||
207 | |||
208 | if (!engn->refcount++) { | ||
209 | struct nvkm_oclass cclass = { | ||
210 | .client = oclass->client, | ||
211 | .engine = oclass->engine, | ||
212 | }; | ||
213 | |||
214 | if (chan->vm) | ||
215 | atomic_inc(&chan->vm->engref[engine->subdev.index]); | ||
216 | |||
217 | if (engine->func->fifo.cclass) { | ||
218 | ret = engine->func->fifo.cclass(chan, &cclass, | ||
219 | &engn->object); | ||
220 | } else | ||
221 | if (engine->func->cclass) { | ||
222 | ret = nvkm_object_new_(engine->func->cclass, &cclass, | ||
223 | NULL, 0, &engn->object); | ||
224 | } | ||
225 | if (ret) | ||
226 | return ret; | ||
227 | |||
228 | if (chan->func->engine_ctor) { | ||
229 | ret = chan->func->engine_ctor(chan, oclass->engine, | ||
230 | engn->object); | ||
231 | if (ret) | ||
232 | return ret; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | ret = oclass->base.ctor(&(const struct nvkm_oclass) { | ||
237 | .base = oclass->base, | ||
238 | .engn = oclass->engn, | ||
239 | .handle = oclass->handle, | ||
240 | .object = oclass->object, | ||
241 | .client = oclass->client, | ||
242 | .parent = engn->object ? | ||
243 | engn->object : | ||
244 | oclass->parent, | ||
245 | .engine = engine, | ||
246 | }, data, size, &object->oproxy.object); | ||
247 | if (ret) | ||
248 | return ret; | ||
249 | |||
250 | if (chan->func->object_ctor) { | ||
251 | object->hash = | ||
252 | chan->func->object_ctor(chan, object->oproxy.object); | ||
253 | if (object->hash < 0) | ||
254 | return object->hash; | ||
255 | } | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int | ||
261 | nvkm_fifo_chan_child_get(struct nvkm_object *object, int index, | ||
262 | struct nvkm_oclass *oclass) | ||
263 | { | ||
264 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
265 | struct nvkm_fifo *fifo = chan->fifo; | ||
266 | struct nvkm_device *device = fifo->engine.subdev.device; | ||
267 | struct nvkm_engine *engine; | ||
268 | u64 mask = chan->engines; | ||
269 | int ret, i, c; | ||
270 | |||
271 | for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) { | ||
272 | if ((engine = nvkm_device_engine(device, i)) && | ||
273 | !engine->func) { | ||
274 | struct nvkm_oclass *sclass = engine->sclass; | ||
275 | int c = 0; | ||
276 | while (sclass && sclass->ofuncs) { | ||
277 | if (c++ == index) { | ||
278 | oclass->base.oclass = sclass->handle; | ||
279 | oclass->base.minver = -2; | ||
280 | oclass->base.maxver = -2; | ||
281 | oclass->ctor = nvkm_fifo_chan_child_old; | ||
282 | oclass->priv = sclass; | ||
283 | oclass->engine = engine; | ||
284 | return 0; | ||
285 | } | ||
286 | sclass++; | ||
287 | } | ||
288 | index -= c; | ||
289 | continue; | ||
290 | } | ||
291 | |||
292 | if (!(engine = nvkm_device_engine(device, i))) | ||
293 | continue; | ||
294 | oclass->engine = engine; | ||
295 | oclass->base.oclass = 0; | ||
296 | |||
297 | if (engine->func->fifo.sclass) { | ||
298 | ret = engine->func->fifo.sclass(oclass, index); | ||
299 | if (oclass->base.oclass) { | ||
300 | if (!oclass->base.ctor) | ||
301 | oclass->base.ctor = nvkm_object_new; | ||
302 | oclass->ctor = nvkm_fifo_chan_child_new; | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | index -= ret; | ||
307 | continue; | ||
308 | } | ||
309 | |||
310 | while (engine->func->sclass[c].oclass) { | ||
311 | if (c++ == index) { | ||
312 | oclass->base = engine->func->sclass[index]; | ||
313 | if (!oclass->base.ctor) | ||
314 | oclass->base.ctor = nvkm_object_new; | ||
315 | oclass->ctor = nvkm_fifo_chan_child_new; | ||
316 | return 0; | ||
317 | } | ||
318 | } | ||
319 | index -= c; | ||
320 | } | ||
321 | |||
46 | return -EINVAL; | 322 | return -EINVAL; |
47 | } | 323 | } |
48 | 324 | ||
49 | int | 325 | static int |
50 | _nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size) | 326 | nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type, |
327 | struct nvkm_event **pevent) | ||
51 | { | 328 | { |
52 | struct nvkm_fifo_chan *chan = (void *)object; | 329 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); |
330 | if (chan->func->ntfy) | ||
331 | return chan->func->ntfy(chan, type, pevent); | ||
332 | return -ENODEV; | ||
333 | } | ||
334 | |||
335 | static int | ||
336 | nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) | ||
337 | { | ||
338 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
53 | *addr = chan->addr; | 339 | *addr = chan->addr; |
54 | *size = chan->size; | 340 | *size = chan->size; |
55 | return 0; | 341 | return 0; |
56 | } | 342 | } |
57 | 343 | ||
58 | u32 | 344 | static int |
59 | _nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr) | 345 | nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) |
60 | { | 346 | { |
61 | struct nvkm_fifo_chan *chan = (void *)object; | 347 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); |
62 | if (unlikely(!chan->user)) { | 348 | if (unlikely(!chan->user)) { |
63 | chan->user = ioremap(chan->addr, chan->size); | 349 | chan->user = ioremap(chan->addr, chan->size); |
64 | if (WARN_ON_ONCE(chan->user == NULL)) | 350 | if (!chan->user) |
65 | return 0; | 351 | return -ENOMEM; |
66 | } | 352 | } |
67 | return ioread32_native(chan->user + addr); | 353 | if (unlikely(addr + 4 > chan->size)) |
354 | return -EINVAL; | ||
355 | *data = ioread32_native(chan->user + addr); | ||
356 | return 0; | ||
68 | } | 357 | } |
69 | 358 | ||
70 | void | 359 | static int |
71 | _nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data) | 360 | nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) |
72 | { | 361 | { |
73 | struct nvkm_fifo_chan *chan = (void *)object; | 362 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); |
74 | if (unlikely(!chan->user)) { | 363 | if (unlikely(!chan->user)) { |
75 | chan->user = ioremap(chan->addr, chan->size); | 364 | chan->user = ioremap(chan->addr, chan->size); |
76 | if (WARN_ON_ONCE(chan->user == NULL)) | 365 | if (!chan->user) |
77 | return; | 366 | return -ENOMEM; |
78 | } | 367 | } |
368 | if (unlikely(addr + 4 > chan->size)) | ||
369 | return -EINVAL; | ||
79 | iowrite32_native(data, chan->user + addr); | 370 | iowrite32_native(data, chan->user + addr); |
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int | ||
375 | nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend) | ||
376 | { | ||
377 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
378 | chan->func->fini(chan); | ||
379 | return 0; | ||
80 | } | 380 | } |
81 | 381 | ||
82 | void | 382 | static int |
83 | nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) | 383 | nvkm_fifo_chan_init(struct nvkm_object *object) |
84 | { | 384 | { |
85 | struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine; | 385 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); |
386 | chan->func->init(chan); | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static void * | ||
391 | nvkm_fifo_chan_dtor(struct nvkm_object *object) | ||
392 | { | ||
393 | struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); | ||
394 | struct nvkm_fifo *fifo = chan->fifo; | ||
395 | void *data = chan->func->dtor(chan); | ||
86 | unsigned long flags; | 396 | unsigned long flags; |
87 | 397 | ||
398 | spin_lock_irqsave(&fifo->lock, flags); | ||
399 | if (!list_empty(&chan->head)) { | ||
400 | __clear_bit(chan->chid, fifo->mask); | ||
401 | list_del(&chan->head); | ||
402 | } | ||
403 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
404 | |||
88 | if (chan->user) | 405 | if (chan->user) |
89 | iounmap(chan->user); | 406 | iounmap(chan->user); |
90 | 407 | ||
91 | spin_lock_irqsave(&fifo->lock, flags); | 408 | nvkm_vm_ref(NULL, &chan->vm, NULL); |
92 | fifo->channel[chan->chid] = NULL; | ||
93 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
94 | 409 | ||
95 | nvkm_gpuobj_del(&chan->pushgpu); | 410 | nvkm_gpuobj_del(&chan->push); |
96 | nvkm_namedb_destroy(&chan->namedb); | 411 | nvkm_gpuobj_del(&chan->inst); |
412 | return data; | ||
97 | } | 413 | } |
98 | 414 | ||
99 | void | 415 | const struct nvkm_object_func |
100 | _nvkm_fifo_channel_dtor(struct nvkm_object *object) | 416 | nvkm_fifo_chan_func = { |
101 | { | 417 | .dtor = nvkm_fifo_chan_dtor, |
102 | struct nvkm_fifo_chan *chan = (void *)object; | 418 | .init = nvkm_fifo_chan_init, |
103 | nvkm_fifo_channel_destroy(chan); | 419 | .fini = nvkm_fifo_chan_fini, |
104 | } | 420 | .ntfy = nvkm_fifo_chan_ntfy, |
421 | .map = nvkm_fifo_chan_map, | ||
422 | .rd32 = nvkm_fifo_chan_rd32, | ||
423 | .wr32 = nvkm_fifo_chan_wr32, | ||
424 | .sclass = nvkm_fifo_chan_child_get, | ||
425 | }; | ||
105 | 426 | ||
106 | int | 427 | int |
107 | nvkm_fifo_channel_create_(struct nvkm_object *parent, | 428 | nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func, |
108 | struct nvkm_object *engine, | 429 | struct nvkm_fifo *fifo, u32 size, u32 align, bool zero, |
109 | struct nvkm_oclass *oclass, | 430 | u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user, |
110 | int bar, u32 addr, u32 size, u64 pushbuf, | 431 | const struct nvkm_oclass *oclass, |
111 | u64 engmask, int len, void **ptr) | 432 | struct nvkm_fifo_chan *chan) |
112 | { | 433 | { |
113 | struct nvkm_client *client = nvkm_client(parent); | 434 | struct nvkm_client *client = oclass->client; |
114 | struct nvkm_fifo *fifo = (void *)engine; | 435 | struct nvkm_device *device = fifo->engine.subdev.device; |
115 | struct nvkm_fifo_base *base = (void *)parent; | 436 | struct nvkm_mmu *mmu = device->mmu; |
116 | struct nvkm_fifo_chan *chan; | ||
117 | struct nvkm_subdev *subdev = &fifo->engine.subdev; | ||
118 | struct nvkm_device *device = subdev->device; | ||
119 | struct nvkm_dmaobj *dmaobj; | 437 | struct nvkm_dmaobj *dmaobj; |
120 | unsigned long flags; | 438 | unsigned long flags; |
121 | int ret; | 439 | int ret; |
122 | 440 | ||
123 | /* create base object class */ | 441 | nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object); |
124 | ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, | 442 | chan->func = func; |
125 | engmask, len, ptr); | 443 | chan->fifo = fifo; |
126 | chan = *ptr; | 444 | chan->engines = engines; |
445 | INIT_LIST_HEAD(&chan->head); | ||
446 | |||
447 | /* instance memory */ | ||
448 | ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst); | ||
127 | if (ret) | 449 | if (ret) |
128 | return ret; | 450 | return ret; |
129 | 451 | ||
130 | /* validate dma object representing push buffer */ | 452 | /* allocate push buffer ctxdma instance */ |
131 | if (pushbuf) { | 453 | if (push) { |
132 | dmaobj = nvkm_dma_search(device->dma, client, pushbuf); | 454 | dmaobj = nvkm_dma_search(device->dma, oclass->client, push); |
133 | if (!dmaobj) | 455 | if (!dmaobj) |
134 | return -ENOENT; | 456 | return -ENOENT; |
135 | 457 | ||
136 | ret = nvkm_object_bind(&dmaobj->object, &base->gpuobj, 16, | 458 | ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, |
137 | &chan->pushgpu); | 459 | &chan->push); |
138 | if (ret) | 460 | if (ret) |
139 | return ret; | 461 | return ret; |
140 | } | 462 | } |
141 | 463 | ||
142 | /* find a free fifo channel */ | 464 | /* channel address space */ |
143 | spin_lock_irqsave(&fifo->lock, flags); | 465 | if (!vm && mmu) { |
144 | for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) { | 466 | if (!client->vm || client->vm->mmu == mmu) { |
145 | if (!fifo->channel[chan->chid]) { | 467 | ret = nvkm_vm_ref(client->vm, &chan->vm, NULL); |
146 | fifo->channel[chan->chid] = nv_object(chan); | 468 | if (ret) |
147 | break; | 469 | return ret; |
470 | } else { | ||
471 | return -EINVAL; | ||
148 | } | 472 | } |
473 | } else { | ||
474 | return -ENOENT; | ||
149 | } | 475 | } |
150 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
151 | 476 | ||
152 | if (chan->chid == fifo->max) { | 477 | /* allocate channel id */ |
153 | nvkm_error(subdev, "no free channels\n"); | 478 | spin_lock_irqsave(&fifo->lock, flags); |
479 | chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR); | ||
480 | if (chan->chid >= NVKM_FIFO_CHID_NR) { | ||
481 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
154 | return -ENOSPC; | 482 | return -ENOSPC; |
155 | } | 483 | } |
484 | list_add(&chan->head, &fifo->chan); | ||
485 | __set_bit(chan->chid, fifo->mask); | ||
486 | spin_unlock_irqrestore(&fifo->lock, flags); | ||
156 | 487 | ||
488 | /* determine address of this channel's user registers */ | ||
157 | chan->addr = nv_device_resource_start(device, bar) + | 489 | chan->addr = nv_device_resource_start(device, bar) + |
158 | addr + size * chan->chid; | 490 | base + user * chan->chid; |
159 | chan->size = size; | 491 | chan->size = user; |
492 | |||
160 | nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); | 493 | nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); |
161 | return 0; | 494 | return 0; |
162 | } | 495 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 63209bc8856b..bfec12dbf492 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h | |||
@@ -2,27 +2,31 @@ | |||
2 | #define __NVKM_FIFO_CHAN_H__ | 2 | #define __NVKM_FIFO_CHAN_H__ |
3 | #include "priv.h" | 3 | #include "priv.h" |
4 | 4 | ||
5 | #define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \ | 5 | struct nvkm_fifo_chan_func { |
6 | nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \ | 6 | void *(*dtor)(struct nvkm_fifo_chan *); |
7 | (m), sizeof(**d), (void **)d) | 7 | void (*init)(struct nvkm_fifo_chan *); |
8 | #define nvkm_fifo_channel_init(p) \ | 8 | void (*fini)(struct nvkm_fifo_chan *); |
9 | nvkm_namedb_init(&(p)->namedb) | 9 | int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **); |
10 | #define nvkm_fifo_channel_fini(p,s) \ | 10 | int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *, |
11 | nvkm_namedb_fini(&(p)->namedb, (s)) | 11 | struct nvkm_object *); |
12 | void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *); | ||
13 | int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *); | ||
14 | int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *, | ||
15 | bool suspend); | ||
16 | int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *); | ||
17 | void (*object_dtor)(struct nvkm_fifo_chan *, int); | ||
18 | }; | ||
12 | 19 | ||
13 | int nvkm_fifo_channel_create_(struct nvkm_object *, | 20 | int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *, |
14 | struct nvkm_object *, | 21 | u32 size, u32 align, bool zero, u64 vm, u64 push, |
15 | struct nvkm_oclass *, | 22 | u64 engines, int bar, u32 base, u32 user, |
16 | int bar, u32 addr, u32 size, u64 push, | 23 | const struct nvkm_oclass *, struct nvkm_fifo_chan *); |
17 | u64 engmask, int len, void **); | ||
18 | void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *); | ||
19 | 24 | ||
20 | #define _nvkm_fifo_channel_init _nvkm_namedb_init | 25 | struct nvkm_fifo_chan_oclass { |
21 | #define _nvkm_fifo_channel_fini _nvkm_namedb_fini | 26 | int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *, |
27 | void *data, u32 size, struct nvkm_object **); | ||
28 | struct nvkm_sclass base; | ||
29 | }; | ||
22 | 30 | ||
23 | void _nvkm_fifo_channel_dtor(struct nvkm_object *); | 31 | int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **); |
24 | int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *); | ||
25 | u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64); | ||
26 | void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32); | ||
27 | int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **); | ||
28 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c index f2b4a96f8794..a7e5dfae3833 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c | |||
@@ -25,38 +25,86 @@ | |||
25 | 25 | ||
26 | #include <core/client.h> | 26 | #include <core/client.h> |
27 | #include <core/ramht.h> | 27 | #include <core/ramht.h> |
28 | #include <subdev/mmu.h> | ||
28 | #include <subdev/timer.h> | 29 | #include <subdev/timer.h> |
29 | 30 | ||
31 | #include <nvif/class.h> | ||
32 | |||
30 | int | 33 | int |
31 | g84_fifo_context_detach(struct nvkm_object *parent, bool suspend, | 34 | g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, |
32 | struct nvkm_object *object) | 35 | struct nvkm_event **pevent) |
33 | { | 36 | { |
34 | struct nv50_fifo *fifo = (void *)parent->engine; | 37 | switch (type) { |
35 | struct nv50_fifo_base *base = (void *)parent->parent; | 38 | case G82_CHANNEL_DMA_V0_NTFY_UEVENT: |
36 | struct nv50_fifo_chan *chan = (void *)parent; | 39 | *pevent = &chan->fifo->uevent; |
37 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 40 | return 0; |
38 | struct nvkm_device *device = subdev->device; | 41 | default: |
39 | u32 addr, save, engn; | 42 | break; |
40 | bool done; | 43 | } |
44 | return -EINVAL; | ||
45 | } | ||
41 | 46 | ||
42 | switch (nv_engidx(object->engine)) { | 47 | static int |
43 | case NVDEV_ENGINE_SW : return 0; | 48 | g84_fifo_chan_engine(struct nvkm_engine *engine) |
44 | case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break; | 49 | { |
50 | switch (engine->subdev.index) { | ||
51 | case NVDEV_ENGINE_GR : return 0; | ||
52 | case NVDEV_ENGINE_MPEG : | ||
53 | case NVDEV_ENGINE_MSPPP : return 1; | ||
54 | case NVDEV_ENGINE_CE0 : return 2; | ||
45 | case NVDEV_ENGINE_VP : | 55 | case NVDEV_ENGINE_VP : |
46 | case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break; | 56 | case NVDEV_ENGINE_MSPDEC: return 3; |
47 | case NVDEV_ENGINE_MSPPP : | 57 | case NVDEV_ENGINE_CIPHER: |
48 | case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break; | 58 | case NVDEV_ENGINE_SEC : return 4; |
49 | case NVDEV_ENGINE_BSP : | 59 | case NVDEV_ENGINE_BSP : |
50 | case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break; | 60 | case NVDEV_ENGINE_MSVLD : return 5; |
61 | default: | ||
62 | WARN_ON(1); | ||
63 | return 0; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static int | ||
68 | g84_fifo_chan_engine_addr(struct nvkm_engine *engine) | ||
69 | { | ||
70 | switch (engine->subdev.index) { | ||
71 | case NVDEV_ENGINE_DMAOBJ: | ||
72 | case NVDEV_ENGINE_SW : return -1; | ||
73 | case NVDEV_ENGINE_GR : return 0x0020; | ||
74 | case NVDEV_ENGINE_VP : | ||
75 | case NVDEV_ENGINE_MSPDEC: return 0x0040; | ||
76 | case NVDEV_ENGINE_MPEG : | ||
77 | case NVDEV_ENGINE_MSPPP : return 0x0060; | ||
78 | case NVDEV_ENGINE_BSP : | ||
79 | case NVDEV_ENGINE_MSVLD : return 0x0080; | ||
51 | case NVDEV_ENGINE_CIPHER: | 80 | case NVDEV_ENGINE_CIPHER: |
52 | case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break; | 81 | case NVDEV_ENGINE_SEC : return 0x00a0; |
53 | case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break; | 82 | case NVDEV_ENGINE_CE0 : return 0x00c0; |
54 | default: | 83 | default: |
55 | return -EINVAL; | 84 | WARN_ON(1); |
85 | return -1; | ||
56 | } | 86 | } |
87 | } | ||
57 | 88 | ||
89 | static int | ||
90 | g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, | ||
91 | struct nvkm_engine *engine, bool suspend) | ||
92 | { | ||
93 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); | ||
94 | struct nv50_fifo *fifo = chan->fifo; | ||
95 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | ||
96 | struct nvkm_device *device = subdev->device; | ||
97 | u32 engn, save; | ||
98 | int offset; | ||
99 | bool done; | ||
100 | |||
101 | offset = g84_fifo_chan_engine_addr(engine); | ||
102 | if (offset < 0) | ||
103 | return 0; | ||
104 | |||
105 | engn = g84_fifo_chan_engine(engine); | ||
58 | save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); | 106 | save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); |
59 | nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12); | 107 | nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); |
60 | done = nvkm_msec(device, 2000, | 108 | done = nvkm_msec(device, 2000, |
61 | if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) | 109 | if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) |
62 | break; | 110 | break; |
@@ -64,168 +112,179 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend, | |||
64 | nvkm_wr32(device, 0x002520, save); | 112 | nvkm_wr32(device, 0x002520, save); |
65 | if (!done) { | 113 | if (!done) { |
66 | nvkm_error(subdev, "channel %d [%s] unload timeout\n", | 114 | nvkm_error(subdev, "channel %d [%s] unload timeout\n", |
67 | chan->base.chid, nvkm_client_name(chan)); | 115 | chan->base.chid, chan->base.object.client->name); |
68 | if (suspend) | 116 | if (suspend) |
69 | return -EBUSY; | 117 | return -EBUSY; |
70 | } | 118 | } |
71 | 119 | ||
72 | nvkm_kmap(base->eng); | 120 | nvkm_kmap(chan->eng); |
73 | nvkm_wo32(base->eng, addr + 0x00, 0x00000000); | 121 | nvkm_wo32(chan->eng, offset + 0x00, 0x00000000); |
74 | nvkm_wo32(base->eng, addr + 0x04, 0x00000000); | 122 | nvkm_wo32(chan->eng, offset + 0x04, 0x00000000); |
75 | nvkm_wo32(base->eng, addr + 0x08, 0x00000000); | 123 | nvkm_wo32(chan->eng, offset + 0x08, 0x00000000); |
76 | nvkm_wo32(base->eng, addr + 0x0c, 0x00000000); | 124 | nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000); |
77 | nvkm_wo32(base->eng, addr + 0x10, 0x00000000); | 125 | nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); |
78 | nvkm_wo32(base->eng, addr + 0x14, 0x00000000); | 126 | nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); |
79 | nvkm_done(base->eng); | 127 | nvkm_done(chan->eng); |
80 | return 0; | 128 | return 0; |
81 | } | 129 | } |
82 | 130 | ||
83 | 131 | ||
84 | int | 132 | int |
85 | g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object) | 133 | g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base, |
134 | struct nvkm_engine *engine) | ||
86 | { | 135 | { |
87 | struct nv50_fifo_base *base = (void *)parent->parent; | 136 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
88 | struct nvkm_gpuobj *ectx = (void *)object; | 137 | struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; |
89 | u64 limit = ectx->addr + ectx->size - 1; | 138 | u64 limit, start; |
90 | u64 start = ectx->addr; | 139 | int offset; |
91 | u32 addr; | ||
92 | |||
93 | switch (nv_engidx(object->engine)) { | ||
94 | case NVDEV_ENGINE_SW : return 0; | ||
95 | case NVDEV_ENGINE_GR : addr = 0x0020; break; | ||
96 | case NVDEV_ENGINE_VP : | ||
97 | case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break; | ||
98 | case NVDEV_ENGINE_MSPPP : | ||
99 | case NVDEV_ENGINE_MPEG : addr = 0x0060; break; | ||
100 | case NVDEV_ENGINE_BSP : | ||
101 | case NVDEV_ENGINE_MSVLD : addr = 0x0080; break; | ||
102 | case NVDEV_ENGINE_CIPHER: | ||
103 | case NVDEV_ENGINE_SEC : addr = 0x00a0; break; | ||
104 | case NVDEV_ENGINE_CE0 : addr = 0x00c0; break; | ||
105 | default: | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | 140 | ||
109 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | 141 | offset = g84_fifo_chan_engine_addr(engine); |
110 | nvkm_kmap(base->eng); | 142 | if (offset < 0) |
111 | nvkm_wo32(base->eng, addr + 0x00, 0x00190000); | 143 | return 0; |
112 | nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); | 144 | limit = engn->addr + engn->size - 1; |
113 | nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start)); | 145 | start = engn->addr; |
114 | nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | | 146 | |
115 | upper_32_bits(start)); | 147 | nvkm_kmap(chan->eng); |
116 | nvkm_wo32(base->eng, addr + 0x10, 0x00000000); | 148 | nvkm_wo32(chan->eng, offset + 0x00, 0x00190000); |
117 | nvkm_wo32(base->eng, addr + 0x14, 0x00000000); | 149 | nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit)); |
118 | nvkm_done(base->eng); | 150 | nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); |
151 | nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 | | ||
152 | upper_32_bits(start)); | ||
153 | nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); | ||
154 | nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); | ||
155 | nvkm_done(chan->eng); | ||
119 | return 0; | 156 | return 0; |
120 | } | 157 | } |
121 | 158 | ||
159 | static int | ||
160 | g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, | ||
161 | struct nvkm_engine *engine, | ||
162 | struct nvkm_object *object) | ||
163 | { | ||
164 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); | ||
165 | int engn = engine->subdev.index; | ||
166 | |||
167 | if (g84_fifo_chan_engine_addr(engine) < 0) | ||
168 | return 0; | ||
169 | |||
170 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) { | ||
171 | chan->engn[engn] = nv_gpuobj(object); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); | ||
176 | } | ||
177 | |||
122 | int | 178 | int |
123 | g84_fifo_object_attach(struct nvkm_object *parent, | 179 | g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, |
124 | struct nvkm_object *object, u32 handle) | 180 | struct nvkm_object *object) |
125 | { | 181 | { |
126 | struct nv50_fifo_chan *chan = (void *)parent; | 182 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
183 | u32 handle = object->handle; | ||
127 | u32 context; | 184 | u32 context; |
128 | 185 | ||
129 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) | 186 | switch (object->engine->subdev.index) { |
130 | context = nv_gpuobj(object)->node->offset >> 4; | 187 | case NVDEV_ENGINE_DMAOBJ: |
131 | else | 188 | case NVDEV_ENGINE_SW : context = 0x00000000; break; |
132 | context = 0x00000004; /* just non-zero */ | 189 | case NVDEV_ENGINE_GR : context = 0x00100000; break; |
133 | 190 | case NVDEV_ENGINE_MPEG : | |
134 | if (object->engine) { | 191 | case NVDEV_ENGINE_MSPPP : context = 0x00200000; break; |
135 | switch (nv_engidx(object->engine)) { | 192 | case NVDEV_ENGINE_ME : |
136 | case NVDEV_ENGINE_DMAOBJ: | 193 | case NVDEV_ENGINE_CE0 : context = 0x00300000; break; |
137 | case NVDEV_ENGINE_SW : context |= 0x00000000; break; | 194 | case NVDEV_ENGINE_VP : |
138 | case NVDEV_ENGINE_GR : context |= 0x00100000; break; | 195 | case NVDEV_ENGINE_MSPDEC: context = 0x00400000; break; |
139 | case NVDEV_ENGINE_MPEG : | 196 | case NVDEV_ENGINE_CIPHER: |
140 | case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break; | 197 | case NVDEV_ENGINE_SEC : |
141 | case NVDEV_ENGINE_ME : | 198 | case NVDEV_ENGINE_VIC : context = 0x00500000; break; |
142 | case NVDEV_ENGINE_CE0 : context |= 0x00300000; break; | 199 | case NVDEV_ENGINE_BSP : |
143 | case NVDEV_ENGINE_VP : | 200 | case NVDEV_ENGINE_MSVLD : context = 0x00600000; break; |
144 | case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break; | 201 | default: |
145 | case NVDEV_ENGINE_CIPHER: | 202 | WARN_ON(1); |
146 | case NVDEV_ENGINE_SEC : | 203 | return -EINVAL; |
147 | case NVDEV_ENGINE_VIC : context |= 0x00500000; break; | ||
148 | case NVDEV_ENGINE_BSP : | ||
149 | case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break; | ||
150 | default: | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | } | 204 | } |
154 | 205 | ||
155 | return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context); | 206 | return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context); |
156 | } | 207 | } |
157 | 208 | ||
158 | int | 209 | static void |
159 | g84_fifo_chan_init(struct nvkm_object *object) | 210 | g84_fifo_chan_init(struct nvkm_fifo_chan *base) |
160 | { | 211 | { |
161 | struct nv50_fifo *fifo = (void *)object->engine; | 212 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
162 | struct nv50_fifo_base *base = (void *)object->parent; | 213 | struct nv50_fifo *fifo = chan->fifo; |
163 | struct nv50_fifo_chan *chan = (void *)object; | ||
164 | struct nvkm_gpuobj *ramfc = base->ramfc; | ||
165 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 214 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
215 | u64 addr = chan->ramfc->addr >> 8; | ||
166 | u32 chid = chan->base.chid; | 216 | u32 chid = chan->base.chid; |
167 | int ret; | ||
168 | |||
169 | ret = nvkm_fifo_channel_init(&chan->base); | ||
170 | if (ret) | ||
171 | return ret; | ||
172 | 217 | ||
173 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8); | 218 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr); |
174 | nv50_fifo_runlist_update(fifo); | 219 | nv50_fifo_runlist_update(fifo); |
175 | return 0; | ||
176 | } | 220 | } |
177 | 221 | ||
178 | static int | 222 | static const struct nvkm_fifo_chan_func |
179 | g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 223 | g84_fifo_chan_func = { |
180 | struct nvkm_oclass *oclass, void *data, u32 size, | 224 | .dtor = nv50_fifo_chan_dtor, |
181 | struct nvkm_object **pobject) | 225 | .init = g84_fifo_chan_init, |
226 | .fini = nv50_fifo_chan_fini, | ||
227 | .ntfy = g84_fifo_chan_ntfy, | ||
228 | .engine_ctor = g84_fifo_chan_engine_ctor, | ||
229 | .engine_dtor = nv50_fifo_chan_engine_dtor, | ||
230 | .engine_init = g84_fifo_chan_engine_init, | ||
231 | .engine_fini = g84_fifo_chan_engine_fini, | ||
232 | .object_ctor = g84_fifo_chan_object_ctor, | ||
233 | .object_dtor = nv50_fifo_chan_object_dtor, | ||
234 | }; | ||
235 | |||
236 | int | ||
237 | g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push, | ||
238 | const struct nvkm_oclass *oclass, | ||
239 | struct nv50_fifo_chan *chan) | ||
182 | { | 240 | { |
183 | struct nvkm_device *device = nv_engine(engine)->subdev.device; | 241 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
184 | struct nv50_fifo_base *base; | ||
185 | int ret; | 242 | int ret; |
186 | 243 | ||
187 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000, | 244 | ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base, |
188 | 0x1000, NVOBJ_FLAG_HEAP, &base); | 245 | 0x10000, 0x1000, false, vm, push, |
189 | *pobject = nv_object(base); | 246 | (1ULL << NVDEV_ENGINE_BSP) | |
247 | (1ULL << NVDEV_ENGINE_CE0) | | ||
248 | (1ULL << NVDEV_ENGINE_CIPHER) | | ||
249 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | ||
250 | (1ULL << NVDEV_ENGINE_GR) | | ||
251 | (1ULL << NVDEV_ENGINE_ME) | | ||
252 | (1ULL << NVDEV_ENGINE_MPEG) | | ||
253 | (1ULL << NVDEV_ENGINE_MSPDEC) | | ||
254 | (1ULL << NVDEV_ENGINE_MSPPP) | | ||
255 | (1ULL << NVDEV_ENGINE_MSVLD) | | ||
256 | (1ULL << NVDEV_ENGINE_SEC) | | ||
257 | (1ULL << NVDEV_ENGINE_SW) | | ||
258 | (1ULL << NVDEV_ENGINE_VIC) | | ||
259 | (1ULL << NVDEV_ENGINE_VP), | ||
260 | 0, 0xc00000, 0x2000, oclass, &chan->base); | ||
261 | chan->fifo = fifo; | ||
190 | if (ret) | 262 | if (ret) |
191 | return ret; | 263 | return ret; |
192 | 264 | ||
193 | ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj, | 265 | ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst, |
194 | &base->eng); | 266 | &chan->eng); |
195 | if (ret) | 267 | if (ret) |
196 | return ret; | 268 | return ret; |
197 | 269 | ||
198 | ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj, | 270 | ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst, |
199 | &base->pgd); | 271 | &chan->pgd); |
200 | if (ret) | 272 | if (ret) |
201 | return ret; | 273 | return ret; |
202 | 274 | ||
203 | ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); | 275 | ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst, |
276 | &chan->cache); | ||
204 | if (ret) | 277 | if (ret) |
205 | return ret; | 278 | return ret; |
206 | 279 | ||
207 | ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj, | 280 | ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst, |
208 | &base->cache); | 281 | &chan->ramfc); |
209 | if (ret) | 282 | if (ret) |
210 | return ret; | 283 | return ret; |
211 | 284 | ||
212 | ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj, | 285 | ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht); |
213 | &base->ramfc); | ||
214 | if (ret) | 286 | if (ret) |
215 | return ret; | 287 | return ret; |
216 | 288 | ||
217 | return 0; | 289 | return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); |
218 | } | 290 | } |
219 | |||
220 | struct nvkm_oclass | ||
221 | g84_fifo_cclass = { | ||
222 | .handle = NV_ENGCTX(FIFO, 0x84), | ||
223 | .ofuncs = &(struct nvkm_ofuncs) { | ||
224 | .ctor = g84_fifo_context_ctor, | ||
225 | .dtor = nv50_fifo_context_dtor, | ||
226 | .init = _nvkm_fifo_context_init, | ||
227 | .fini = _nvkm_fifo_context_fini, | ||
228 | .rd32 = _nvkm_fifo_context_rd32, | ||
229 | .wr32 = _nvkm_fifo_context_wr32, | ||
230 | }, | ||
231 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h index 99324222dade..413288597e04 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h | |||
@@ -1,23 +1,24 @@ | |||
1 | #ifndef __GF100_FIFO_CHAN_H__ | 1 | #ifndef __GF100_FIFO_CHAN_H__ |
2 | #define __GF100_FIFO_CHAN_H__ | 2 | #define __GF100_FIFO_CHAN_H__ |
3 | #define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base) | ||
3 | #include "chan.h" | 4 | #include "chan.h" |
4 | #include "gf100.h" | 5 | #include "gf100.h" |
5 | 6 | ||
6 | struct gf100_fifo_base { | 7 | struct gf100_fifo_chan { |
7 | struct nvkm_fifo_base base; | 8 | struct nvkm_fifo_chan base; |
9 | struct gf100_fifo *fifo; | ||
10 | |||
11 | struct list_head head; | ||
12 | bool killed; | ||
13 | |||
8 | struct nvkm_gpuobj *pgd; | 14 | struct nvkm_gpuobj *pgd; |
9 | struct nvkm_vm *vm; | 15 | struct nvkm_vm *vm; |
10 | }; | ||
11 | 16 | ||
12 | struct gf100_fifo_chan { | 17 | struct { |
13 | struct nvkm_fifo_chan base; | 18 | struct nvkm_gpuobj *inst; |
14 | enum { | 19 | struct nvkm_vma vma; |
15 | STOPPED, | 20 | } engn[NVDEV_SUBDEV_NR]; |
16 | RUNNING, | ||
17 | KILLED | ||
18 | } state; | ||
19 | }; | 21 | }; |
20 | 22 | ||
21 | extern struct nvkm_oclass gf100_fifo_cclass; | 23 | extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass; |
22 | extern struct nvkm_oclass gf100_fifo_sclass[]; | ||
23 | #endif | 24 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h index 3490cb6d8bd3..2b9d8bfc7fd7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h | |||
@@ -1,27 +1,29 @@ | |||
1 | #ifndef __GK104_FIFO_CHAN_H__ | 1 | #ifndef __GK104_FIFO_CHAN_H__ |
2 | #define __GK104_FIFO_CHAN_H__ | 2 | #define __GK104_FIFO_CHAN_H__ |
3 | #define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base) | ||
3 | #include "chan.h" | 4 | #include "chan.h" |
4 | #include "gk104.h" | 5 | #include "gk104.h" |
5 | 6 | ||
6 | struct gk104_fifo_base { | 7 | struct gk104_fifo_chan { |
7 | struct nvkm_fifo_base base; | 8 | struct nvkm_fifo_chan base; |
9 | struct gk104_fifo *fifo; | ||
10 | int engine; | ||
11 | |||
12 | struct list_head head; | ||
13 | bool killed; | ||
14 | |||
8 | struct nvkm_gpuobj *pgd; | 15 | struct nvkm_gpuobj *pgd; |
9 | struct nvkm_vm *vm; | 16 | struct nvkm_vm *vm; |
10 | }; | ||
11 | 17 | ||
12 | struct gk104_fifo_chan { | 18 | struct { |
13 | struct nvkm_fifo_chan base; | 19 | struct nvkm_gpuobj *inst; |
14 | u32 engine; | 20 | struct nvkm_vma vma; |
15 | enum { | 21 | } engn[NVDEV_SUBDEV_NR]; |
16 | STOPPED, | ||
17 | RUNNING, | ||
18 | KILLED | ||
19 | } state; | ||
20 | }; | 22 | }; |
21 | 23 | ||
22 | extern struct nvkm_oclass gk104_fifo_cclass; | 24 | int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *, |
23 | extern struct nvkm_oclass gk104_fifo_sclass[]; | 25 | void *data, u32 size, struct nvkm_object **); |
24 | extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs; | ||
25 | 26 | ||
26 | extern struct nvkm_oclass gm204_fifo_sclass[]; | 27 | extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass; |
28 | extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass; | ||
27 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h index 028212df41bc..ac62a6404f87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h | |||
@@ -1,24 +1,24 @@ | |||
1 | #ifndef __NV04_FIFO_CHAN_H__ | 1 | #ifndef __NV04_FIFO_CHAN_H__ |
2 | #define __NV04_FIFO_CHAN_H__ | 2 | #define __NV04_FIFO_CHAN_H__ |
3 | #define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base) | ||
3 | #include "chan.h" | 4 | #include "chan.h" |
4 | #include "nv04.h" | 5 | #include "nv04.h" |
5 | 6 | ||
6 | struct nv04_fifo_chan { | 7 | struct nv04_fifo_chan { |
7 | struct nvkm_fifo_chan base; | 8 | struct nvkm_fifo_chan base; |
8 | u32 subc[8]; | 9 | struct nv04_fifo *fifo; |
9 | u32 ramfc; | 10 | u32 ramfc; |
11 | struct nvkm_gpuobj *engn[NVDEV_SUBDEV_NR]; | ||
10 | }; | 12 | }; |
11 | 13 | ||
12 | int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32); | 14 | extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func; |
13 | void nv04_fifo_object_detach(struct nvkm_object *, int); | 15 | void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *); |
16 | void nv04_fifo_dma_init(struct nvkm_fifo_chan *); | ||
17 | void nv04_fifo_dma_fini(struct nvkm_fifo_chan *); | ||
18 | void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int); | ||
14 | 19 | ||
15 | void nv04_fifo_chan_dtor(struct nvkm_object *); | 20 | extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass; |
16 | int nv04_fifo_chan_init(struct nvkm_object *); | 21 | extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass; |
17 | int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend); | 22 | extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass; |
18 | 23 | extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass; | |
19 | extern struct nvkm_oclass nv04_fifo_cclass; | ||
20 | extern struct nvkm_oclass nv04_fifo_sclass[]; | ||
21 | extern struct nvkm_oclass nv10_fifo_sclass[]; | ||
22 | extern struct nvkm_oclass nv17_fifo_sclass[]; | ||
23 | extern struct nvkm_oclass nv40_fifo_sclass[]; | ||
24 | #endif | 24 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c index aeaba7b9bcae..2a25019ce0f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c | |||
@@ -25,27 +25,37 @@ | |||
25 | 25 | ||
26 | #include <core/client.h> | 26 | #include <core/client.h> |
27 | #include <core/ramht.h> | 27 | #include <core/ramht.h> |
28 | #include <subdev/mmu.h> | ||
28 | #include <subdev/timer.h> | 29 | #include <subdev/timer.h> |
29 | 30 | ||
30 | int | 31 | static int |
31 | nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend, | 32 | nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) |
32 | struct nvkm_object *object) | ||
33 | { | 33 | { |
34 | struct nv50_fifo *fifo = (void *)parent->engine; | 34 | switch (engine->subdev.index) { |
35 | struct nv50_fifo_base *base = (void *)parent->parent; | 35 | case NVDEV_ENGINE_DMAOBJ: |
36 | struct nv50_fifo_chan *chan = (void *)parent; | 36 | case NVDEV_ENGINE_SW : return -1; |
37 | case NVDEV_ENGINE_GR : return 0x0000; | ||
38 | case NVDEV_ENGINE_MPEG : return 0x0060; | ||
39 | default: | ||
40 | WARN_ON(1); | ||
41 | return -1; | ||
42 | } | ||
43 | } | ||
44 | |||
45 | static int | ||
46 | nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, | ||
47 | struct nvkm_engine *engine, bool suspend) | ||
48 | { | ||
49 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); | ||
50 | struct nv50_fifo *fifo = chan->fifo; | ||
37 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 51 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
38 | struct nvkm_device *device = subdev->device; | 52 | struct nvkm_device *device = subdev->device; |
39 | u32 addr, me; | 53 | int offset, ret = 0; |
40 | int ret = 0; | 54 | u32 me; |
41 | 55 | ||
42 | switch (nv_engidx(object->engine)) { | 56 | offset = nv50_fifo_chan_engine_addr(engine); |
43 | case NVDEV_ENGINE_SW : return 0; | 57 | if (offset < 0) |
44 | case NVDEV_ENGINE_GR : addr = 0x0000; break; | 58 | return 0; |
45 | case NVDEV_ENGINE_MPEG : addr = 0x0060; break; | ||
46 | default: | ||
47 | return -EINVAL; | ||
48 | } | ||
49 | 59 | ||
50 | /* HW bug workaround: | 60 | /* HW bug workaround: |
51 | * | 61 | * |
@@ -62,101 +72,124 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend, | |||
62 | me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001); | 72 | me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001); |
63 | 73 | ||
64 | /* do the kickoff... */ | 74 | /* do the kickoff... */ |
65 | nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12); | 75 | nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); |
66 | if (nvkm_msec(device, 2000, | 76 | if (nvkm_msec(device, 2000, |
67 | if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) | 77 | if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) |
68 | break; | 78 | break; |
69 | ) < 0) { | 79 | ) < 0) { |
70 | nvkm_error(subdev, "channel %d [%s] unload timeout\n", | 80 | nvkm_error(subdev, "channel %d [%s] unload timeout\n", |
71 | chan->base.chid, nvkm_client_name(chan)); | 81 | chan->base.chid, chan->base.object.client->name); |
72 | if (suspend) | 82 | if (suspend) |
73 | ret = -EBUSY; | 83 | ret = -EBUSY; |
74 | } | 84 | } |
75 | nvkm_wr32(device, 0x00b860, me); | 85 | nvkm_wr32(device, 0x00b860, me); |
76 | 86 | ||
77 | if (ret == 0) { | 87 | if (ret == 0) { |
78 | nvkm_kmap(base->eng); | 88 | nvkm_kmap(chan->eng); |
79 | nvkm_wo32(base->eng, addr + 0x00, 0x00000000); | 89 | nvkm_wo32(chan->eng, offset + 0x00, 0x00000000); |
80 | nvkm_wo32(base->eng, addr + 0x04, 0x00000000); | 90 | nvkm_wo32(chan->eng, offset + 0x04, 0x00000000); |
81 | nvkm_wo32(base->eng, addr + 0x08, 0x00000000); | 91 | nvkm_wo32(chan->eng, offset + 0x08, 0x00000000); |
82 | nvkm_wo32(base->eng, addr + 0x0c, 0x00000000); | 92 | nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000); |
83 | nvkm_wo32(base->eng, addr + 0x10, 0x00000000); | 93 | nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); |
84 | nvkm_wo32(base->eng, addr + 0x14, 0x00000000); | 94 | nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); |
85 | nvkm_done(base->eng); | 95 | nvkm_done(chan->eng); |
86 | } | 96 | } |
87 | 97 | ||
88 | return ret; | 98 | return ret; |
89 | } | 99 | } |
90 | 100 | ||
91 | int | 101 | static int |
92 | nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object) | 102 | nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base, |
103 | struct nvkm_engine *engine) | ||
93 | { | 104 | { |
94 | struct nv50_fifo_base *base = (void *)parent->parent; | 105 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
95 | struct nvkm_gpuobj *ectx = (void *)object; | 106 | struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; |
96 | u64 limit = ectx->addr + ectx->size - 1; | 107 | u64 limit, start; |
97 | u64 start = ectx->addr; | 108 | int offset; |
98 | u32 addr; | 109 | |
99 | 110 | offset = nv50_fifo_chan_engine_addr(engine); | |
100 | switch (nv_engidx(object->engine)) { | 111 | if (offset < 0) |
101 | case NVDEV_ENGINE_SW : return 0; | 112 | return 0; |
102 | case NVDEV_ENGINE_GR : addr = 0x0000; break; | 113 | limit = engn->addr + engn->size - 1; |
103 | case NVDEV_ENGINE_MPEG : addr = 0x0060; break; | 114 | start = engn->addr; |
104 | default: | 115 | |
105 | return -EINVAL; | 116 | nvkm_kmap(chan->eng); |
117 | nvkm_wo32(chan->eng, offset + 0x00, 0x00190000); | ||
118 | nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit)); | ||
119 | nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); | ||
120 | nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 | | ||
121 | upper_32_bits(start)); | ||
122 | nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); | ||
123 | nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); | ||
124 | nvkm_done(chan->eng); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | void | ||
129 | nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base, | ||
130 | struct nvkm_engine *engine) | ||
131 | { | ||
132 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); | ||
133 | if (!chan->engn[engine->subdev.index] || | ||
134 | chan->engn[engine->subdev.index]->object.oclass) { | ||
135 | chan->engn[engine->subdev.index] = NULL; | ||
136 | return; | ||
106 | } | 137 | } |
138 | nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); | ||
139 | } | ||
107 | 140 | ||
108 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | 141 | static int |
109 | 142 | nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, | |
110 | nvkm_kmap(base->eng); | 143 | struct nvkm_engine *engine, |
111 | nvkm_wo32(base->eng, addr + 0x00, 0x00190000); | 144 | struct nvkm_object *object) |
112 | nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); | 145 | { |
113 | nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start)); | 146 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
114 | nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | | 147 | int engn = engine->subdev.index; |
115 | upper_32_bits(start)); | 148 | |
116 | nvkm_wo32(base->eng, addr + 0x10, 0x00000000); | 149 | if (nv50_fifo_chan_engine_addr(engine) < 0) |
117 | nvkm_wo32(base->eng, addr + 0x14, 0x00000000); | 150 | return 0; |
118 | nvkm_done(base->eng); | 151 | |
119 | return 0; | 152 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) { |
153 | chan->engn[engn] = nv_gpuobj(object); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); | ||
120 | } | 158 | } |
121 | 159 | ||
122 | void | 160 | void |
123 | nv50_fifo_object_detach(struct nvkm_object *parent, int cookie) | 161 | nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie) |
124 | { | 162 | { |
125 | struct nv50_fifo_chan *chan = (void *)parent; | 163 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
126 | nvkm_ramht_remove(chan->ramht, cookie); | 164 | nvkm_ramht_remove(chan->ramht, cookie); |
127 | } | 165 | } |
128 | 166 | ||
129 | int | 167 | static int |
130 | nv50_fifo_object_attach(struct nvkm_object *parent, | 168 | nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, |
131 | struct nvkm_object *object, u32 handle) | 169 | struct nvkm_object *object) |
132 | { | 170 | { |
133 | struct nv50_fifo_chan *chan = (void *)parent; | 171 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
172 | u32 handle = object->handle; | ||
134 | u32 context; | 173 | u32 context; |
135 | 174 | ||
136 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) | 175 | switch (object->engine->subdev.index) { |
137 | context = nv_gpuobj(object)->node->offset >> 4; | 176 | case NVDEV_ENGINE_DMAOBJ: |
138 | else | 177 | case NVDEV_ENGINE_SW : context = 0x00000000; break; |
139 | context = 0x00000004; /* just non-zero */ | 178 | case NVDEV_ENGINE_GR : context = 0x00100000; break; |
140 | 179 | case NVDEV_ENGINE_MPEG : context = 0x00200000; break; | |
141 | if (object->engine) { | 180 | default: |
142 | switch (nv_engidx(object->engine)) { | 181 | WARN_ON(1); |
143 | case NVDEV_ENGINE_DMAOBJ: | 182 | return -EINVAL; |
144 | case NVDEV_ENGINE_SW : context |= 0x00000000; break; | ||
145 | case NVDEV_ENGINE_GR : context |= 0x00100000; break; | ||
146 | case NVDEV_ENGINE_MPEG : context |= 0x00200000; break; | ||
147 | default: | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | } | 183 | } |
151 | 184 | ||
152 | return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context); | 185 | return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context); |
153 | } | 186 | } |
154 | 187 | ||
155 | int | 188 | void |
156 | nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend) | 189 | nv50_fifo_chan_fini(struct nvkm_fifo_chan *base) |
157 | { | 190 | { |
158 | struct nv50_fifo *fifo = (void *)object->engine; | 191 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
159 | struct nv50_fifo_chan *chan = (void *)object; | 192 | struct nv50_fifo *fifo = chan->fifo; |
160 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 193 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
161 | u32 chid = chan->base.chid; | 194 | u32 chid = chan->base.chid; |
162 | 195 | ||
@@ -164,96 +197,84 @@ nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend) | |||
164 | nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000); | 197 | nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000); |
165 | nv50_fifo_runlist_update(fifo); | 198 | nv50_fifo_runlist_update(fifo); |
166 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000); | 199 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000); |
167 | |||
168 | return nvkm_fifo_channel_fini(&chan->base, suspend); | ||
169 | } | 200 | } |
170 | 201 | ||
171 | int | 202 | static void |
172 | nv50_fifo_chan_init(struct nvkm_object *object) | 203 | nv50_fifo_chan_init(struct nvkm_fifo_chan *base) |
173 | { | 204 | { |
174 | struct nv50_fifo *fifo = (void *)object->engine; | 205 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
175 | struct nv50_fifo_base *base = (void *)object->parent; | 206 | struct nv50_fifo *fifo = chan->fifo; |
176 | struct nv50_fifo_chan *chan = (void *)object; | ||
177 | struct nvkm_gpuobj *ramfc = base->ramfc; | ||
178 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 207 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
208 | u64 addr = chan->ramfc->addr >> 12; | ||
179 | u32 chid = chan->base.chid; | 209 | u32 chid = chan->base.chid; |
180 | int ret; | ||
181 | |||
182 | ret = nvkm_fifo_channel_init(&chan->base); | ||
183 | if (ret) | ||
184 | return ret; | ||
185 | 210 | ||
186 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12); | 211 | nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr); |
187 | nv50_fifo_runlist_update(fifo); | 212 | nv50_fifo_runlist_update(fifo); |
188 | return 0; | ||
189 | } | 213 | } |
190 | 214 | ||
191 | void | 215 | void * |
192 | nv50_fifo_chan_dtor(struct nvkm_object *object) | 216 | nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base) |
193 | { | 217 | { |
194 | struct nv50_fifo_chan *chan = (void *)object; | 218 | struct nv50_fifo_chan *chan = nv50_fifo_chan(base); |
219 | nvkm_vm_ref(NULL, &chan->vm, chan->pgd); | ||
195 | nvkm_ramht_del(&chan->ramht); | 220 | nvkm_ramht_del(&chan->ramht); |
196 | nvkm_fifo_channel_destroy(&chan->base); | 221 | nvkm_gpuobj_del(&chan->pgd); |
222 | nvkm_gpuobj_del(&chan->eng); | ||
223 | nvkm_gpuobj_del(&chan->cache); | ||
224 | nvkm_gpuobj_del(&chan->ramfc); | ||
225 | return chan; | ||
197 | } | 226 | } |
198 | 227 | ||
199 | void | 228 | static const struct nvkm_fifo_chan_func |
200 | nv50_fifo_context_dtor(struct nvkm_object *object) | 229 | nv50_fifo_chan_func = { |
201 | { | 230 | .dtor = nv50_fifo_chan_dtor, |
202 | struct nv50_fifo_base *base = (void *)object; | 231 | .init = nv50_fifo_chan_init, |
203 | nvkm_vm_ref(NULL, &base->vm, base->pgd); | 232 | .fini = nv50_fifo_chan_fini, |
204 | nvkm_gpuobj_del(&base->pgd); | 233 | .engine_ctor = nv50_fifo_chan_engine_ctor, |
205 | nvkm_gpuobj_del(&base->eng); | 234 | .engine_dtor = nv50_fifo_chan_engine_dtor, |
206 | nvkm_gpuobj_del(&base->ramfc); | 235 | .engine_init = nv50_fifo_chan_engine_init, |
207 | nvkm_gpuobj_del(&base->cache); | 236 | .engine_fini = nv50_fifo_chan_engine_fini, |
208 | nvkm_fifo_context_destroy(&base->base); | 237 | .object_ctor = nv50_fifo_chan_object_ctor, |
209 | } | 238 | .object_dtor = nv50_fifo_chan_object_dtor, |
239 | }; | ||
210 | 240 | ||
211 | static int | 241 | int |
212 | nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 242 | nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push, |
213 | struct nvkm_oclass *oclass, void *data, u32 size, | 243 | const struct nvkm_oclass *oclass, |
214 | struct nvkm_object **pobject) | 244 | struct nv50_fifo_chan *chan) |
215 | { | 245 | { |
216 | struct nvkm_device *device = nv_engine(engine)->subdev.device; | 246 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
217 | struct nv50_fifo_base *base; | ||
218 | int ret; | 247 | int ret; |
219 | 248 | ||
220 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000, | 249 | ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base, |
221 | 0x1000, NVOBJ_FLAG_HEAP, &base); | 250 | 0x10000, 0x1000, false, vm, push, |
222 | *pobject = nv_object(base); | 251 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
252 | (1ULL << NVDEV_ENGINE_SW) | | ||
253 | (1ULL << NVDEV_ENGINE_GR) | | ||
254 | (1ULL << NVDEV_ENGINE_MPEG), | ||
255 | 0, 0xc00000, 0x2000, oclass, &chan->base); | ||
256 | chan->fifo = fifo; | ||
223 | if (ret) | 257 | if (ret) |
224 | return ret; | 258 | return ret; |
225 | 259 | ||
226 | ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj, | 260 | ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst, |
227 | &base->ramfc); | 261 | &chan->ramfc); |
228 | if (ret) | 262 | if (ret) |
229 | return ret; | 263 | return ret; |
230 | 264 | ||
231 | ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj, | 265 | ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst, |
232 | &base->eng); | 266 | &chan->eng); |
233 | if (ret) | 267 | if (ret) |
234 | return ret; | 268 | return ret; |
235 | 269 | ||
236 | ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj, | 270 | ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst, |
237 | &base->pgd); | 271 | &chan->pgd); |
238 | if (ret) | 272 | if (ret) |
239 | return ret; | 273 | return ret; |
240 | 274 | ||
241 | ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); | 275 | ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht); |
242 | if (ret) | 276 | if (ret) |
243 | return ret; | 277 | return ret; |
244 | 278 | ||
245 | return 0; | 279 | return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); |
246 | } | 280 | } |
247 | |||
248 | struct nvkm_oclass | ||
249 | nv50_fifo_cclass = { | ||
250 | .handle = NV_ENGCTX(FIFO, 0x50), | ||
251 | .ofuncs = &(struct nvkm_ofuncs) { | ||
252 | .ctor = nv50_fifo_context_ctor, | ||
253 | .dtor = nv50_fifo_context_dtor, | ||
254 | .init = _nvkm_fifo_context_init, | ||
255 | .fini = _nvkm_fifo_context_fini, | ||
256 | .rd32 = _nvkm_fifo_context_rd32, | ||
257 | .wr32 = _nvkm_fifo_context_wr32, | ||
258 | }, | ||
259 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h index c4f2f1ff4c9e..7ef6bc2e27ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h | |||
@@ -1,42 +1,35 @@ | |||
1 | #ifndef __NV50_FIFO_CHAN_H__ | 1 | #ifndef __NV50_FIFO_CHAN_H__ |
2 | #define __NV50_FIFO_CHAN_H__ | 2 | #define __NV50_FIFO_CHAN_H__ |
3 | #define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base) | ||
3 | #include "chan.h" | 4 | #include "chan.h" |
4 | #include "nv50.h" | 5 | #include "nv50.h" |
5 | 6 | ||
6 | struct nv50_fifo_base { | 7 | struct nv50_fifo_chan { |
7 | struct nvkm_fifo_base base; | 8 | struct nv50_fifo *fifo; |
9 | struct nvkm_fifo_chan base; | ||
10 | |||
8 | struct nvkm_gpuobj *ramfc; | 11 | struct nvkm_gpuobj *ramfc; |
9 | struct nvkm_gpuobj *cache; | 12 | struct nvkm_gpuobj *cache; |
10 | struct nvkm_gpuobj *eng; | 13 | struct nvkm_gpuobj *eng; |
11 | struct nvkm_gpuobj *pgd; | 14 | struct nvkm_gpuobj *pgd; |
15 | struct nvkm_ramht *ramht; | ||
12 | struct nvkm_vm *vm; | 16 | struct nvkm_vm *vm; |
13 | }; | ||
14 | 17 | ||
15 | struct nv50_fifo_chan { | 18 | struct nvkm_gpuobj *engn[NVDEV_SUBDEV_NR]; |
16 | struct nvkm_fifo_chan base; | ||
17 | u32 subc[8]; | ||
18 | struct nvkm_ramht *ramht; | ||
19 | }; | 19 | }; |
20 | 20 | ||
21 | extern struct nvkm_oclass nv50_fifo_cclass; | 21 | int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push, |
22 | extern struct nvkm_oclass nv50_fifo_sclass[]; | 22 | const struct nvkm_oclass *, struct nv50_fifo_chan *); |
23 | void nv50_fifo_context_dtor(struct nvkm_object *); | 23 | void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *); |
24 | void nv50_fifo_chan_dtor(struct nvkm_object *); | 24 | void nv50_fifo_chan_fini(struct nvkm_fifo_chan *); |
25 | int nv50_fifo_chan_init(struct nvkm_object *); | 25 | void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *); |
26 | int nv50_fifo_chan_fini(struct nvkm_object *, bool); | 26 | void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int); |
27 | int nv50_fifo_context_attach(struct nvkm_object *, struct nvkm_object *); | 27 | |
28 | int nv50_fifo_context_detach(struct nvkm_object *, bool, | 28 | int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push, |
29 | struct nvkm_object *); | 29 | const struct nvkm_oclass *, struct nv50_fifo_chan *); |
30 | int nv50_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32); | ||
31 | void nv50_fifo_object_detach(struct nvkm_object *, int); | ||
32 | extern struct nvkm_ofuncs nv50_fifo_ofuncs_ind; | ||
33 | 30 | ||
34 | extern struct nvkm_oclass g84_fifo_cclass; | 31 | extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass; |
35 | extern struct nvkm_oclass g84_fifo_sclass[]; | 32 | extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass; |
36 | int g84_fifo_chan_init(struct nvkm_object *); | 33 | extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass; |
37 | int g84_fifo_context_attach(struct nvkm_object *, struct nvkm_object *); | 34 | extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass; |
38 | int g84_fifo_context_detach(struct nvkm_object *, bool, | ||
39 | struct nvkm_object *); | ||
40 | int g84_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32); | ||
41 | extern struct nvkm_ofuncs g84_fifo_ofuncs_ind; | ||
42 | #endif | 35 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c index 2016a9884b38..a5ca52c7b74f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c | |||
@@ -30,15 +30,14 @@ | |||
30 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
31 | 31 | ||
32 | static int | 32 | static int |
33 | g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, | 33 | g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
34 | struct nvkm_oclass *oclass, void *data, u32 size, | 34 | void *data, u32 size, struct nvkm_object **pobject) |
35 | struct nvkm_object **pobject) | ||
36 | { | 35 | { |
36 | struct nvkm_object *parent = oclass->parent; | ||
37 | union { | 37 | union { |
38 | struct nv50_channel_dma_v0 v0; | 38 | struct nv50_channel_dma_v0 v0; |
39 | } *args = data; | 39 | } *args = data; |
40 | struct nvkm_device *device = parent->engine->subdev.device; | 40 | struct nv50_fifo *fifo = nv50_fifo(base); |
41 | struct nv50_fifo_base *base = (void *)parent; | ||
42 | struct nv50_fifo_chan *chan; | 41 | struct nv50_fifo_chan *chan; |
43 | int ret; | 42 | int ret; |
44 | 43 | ||
@@ -48,80 +47,47 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, | |||
48 | "pushbuf %llx offset %016llx\n", | 47 | "pushbuf %llx offset %016llx\n", |
49 | args->v0.version, args->v0.vm, args->v0.pushbuf, | 48 | args->v0.version, args->v0.vm, args->v0.pushbuf, |
50 | args->v0.offset); | 49 | args->v0.offset); |
51 | if (args->v0.vm) | 50 | if (!args->v0.pushbuf) |
52 | return -ENOENT; | 51 | return -EINVAL; |
53 | } else | 52 | } else |
54 | return ret; | 53 | return ret; |
55 | 54 | ||
56 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 55 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
57 | 0x2000, args->v0.pushbuf, | 56 | return -ENOMEM; |
58 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 57 | *pobject = &chan->base.object; |
59 | (1ULL << NVDEV_ENGINE_SW) | | ||
60 | (1ULL << NVDEV_ENGINE_GR) | | ||
61 | (1ULL << NVDEV_ENGINE_MPEG) | | ||
62 | (1ULL << NVDEV_ENGINE_ME) | | ||
63 | (1ULL << NVDEV_ENGINE_VP) | | ||
64 | (1ULL << NVDEV_ENGINE_CIPHER) | | ||
65 | (1ULL << NVDEV_ENGINE_SEC) | | ||
66 | (1ULL << NVDEV_ENGINE_BSP) | | ||
67 | (1ULL << NVDEV_ENGINE_MSVLD) | | ||
68 | (1ULL << NVDEV_ENGINE_MSPDEC) | | ||
69 | (1ULL << NVDEV_ENGINE_MSPPP) | | ||
70 | (1ULL << NVDEV_ENGINE_CE0) | | ||
71 | (1ULL << NVDEV_ENGINE_VIC), &chan); | ||
72 | *pobject = nv_object(chan); | ||
73 | if (ret) | ||
74 | return ret; | ||
75 | |||
76 | chan->base.inst = base->base.gpuobj.addr; | ||
77 | args->v0.chid = chan->base.chid; | ||
78 | 58 | ||
79 | ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, | 59 | ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf, |
80 | &chan->ramht); | 60 | oclass, chan); |
81 | if (ret) | 61 | if (ret) |
82 | return ret; | 62 | return ret; |
83 | 63 | ||
84 | nv_parent(chan)->context_attach = g84_fifo_context_attach; | 64 | args->v0.chid = chan->base.chid; |
85 | nv_parent(chan)->context_detach = g84_fifo_context_detach; | ||
86 | nv_parent(chan)->object_attach = g84_fifo_object_attach; | ||
87 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | ||
88 | 65 | ||
89 | nvkm_kmap(base->ramfc); | 66 | nvkm_kmap(chan->ramfc); |
90 | nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); | 67 | nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); |
91 | nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); | 68 | nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); |
92 | nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); | 69 | nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); |
93 | nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); | 70 | nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); |
94 | nvkm_wo32(base->ramfc, 0x3c, 0x003f6078); | 71 | nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); |
95 | nvkm_wo32(base->ramfc, 0x44, 0x01003fff); | 72 | nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); |
96 | nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | 73 | nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); |
97 | nvkm_wo32(base->ramfc, 0x4c, 0xffffffff); | 74 | nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); |
98 | nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); | 75 | nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); |
99 | nvkm_wo32(base->ramfc, 0x78, 0x00000000); | 76 | nvkm_wo32(chan->ramfc, 0x78, 0x00000000); |
100 | nvkm_wo32(base->ramfc, 0x7c, 0x30000001); | 77 | nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); |
101 | nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | 78 | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | |
102 | (4 << 24) /* SEARCH_FULL */ | | 79 | (4 << 24) /* SEARCH_FULL */ | |
103 | (chan->ramht->gpuobj->node->offset >> 4)); | 80 | (chan->ramht->gpuobj->node->offset >> 4)); |
104 | nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10); | 81 | nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); |
105 | nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); | 82 | nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12); |
106 | nvkm_done(base->ramfc); | 83 | nvkm_done(chan->ramfc); |
107 | return 0; | 84 | return 0; |
108 | } | 85 | } |
109 | 86 | ||
110 | static struct nvkm_ofuncs | 87 | const struct nvkm_fifo_chan_oclass |
111 | g84_fifo_ofuncs_dma = { | 88 | g84_fifo_dma_oclass = { |
112 | .ctor = g84_fifo_chan_ctor_dma, | 89 | .base.oclass = G82_CHANNEL_DMA, |
113 | .dtor = nv50_fifo_chan_dtor, | 90 | .base.minver = 0, |
114 | .init = g84_fifo_chan_init, | 91 | .base.maxver = 0, |
115 | .fini = nv50_fifo_chan_fini, | 92 | .ctor = g84_fifo_dma_new, |
116 | .map = _nvkm_fifo_channel_map, | ||
117 | .rd32 = _nvkm_fifo_channel_rd32, | ||
118 | .wr32 = _nvkm_fifo_channel_wr32, | ||
119 | .ntfy = _nvkm_fifo_channel_ntfy | ||
120 | }; | ||
121 | |||
122 | struct nvkm_oclass | ||
123 | g84_fifo_sclass[] = { | ||
124 | { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma }, | ||
125 | { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind }, | ||
126 | {} | ||
127 | }; | 93 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c index 8cc87103a369..eafa87886643 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c | |||
@@ -31,74 +31,51 @@ | |||
31 | #include <nvif/class.h> | 31 | #include <nvif/class.h> |
32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
33 | 33 | ||
34 | int | ||
35 | nv04_fifo_context_attach(struct nvkm_object *parent, | ||
36 | struct nvkm_object *object) | ||
37 | { | ||
38 | nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | void | 34 | void |
43 | nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) | 35 | nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie) |
44 | { | 36 | { |
45 | struct nv04_fifo *fifo = (void *)parent->engine; | 37 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
46 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 38 | struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; |
47 | mutex_lock(&nv_subdev(fifo)->mutex); | ||
48 | nvkm_ramht_remove(imem->ramht, cookie); | 39 | nvkm_ramht_remove(imem->ramht, cookie); |
49 | mutex_unlock(&nv_subdev(fifo)->mutex); | ||
50 | } | 40 | } |
51 | 41 | ||
52 | int | 42 | static int |
53 | nv04_fifo_object_attach(struct nvkm_object *parent, | 43 | nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, |
54 | struct nvkm_object *object, u32 handle) | 44 | struct nvkm_object *object) |
55 | { | 45 | { |
56 | struct nv04_fifo *fifo = (void *)parent->engine; | 46 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
57 | struct nv04_fifo_chan *chan = (void *)parent; | 47 | struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; |
58 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 48 | u32 context = 0x80000000 | chan->base.chid << 24; |
59 | u32 context, chid = chan->base.chid; | 49 | u32 handle = object->handle; |
60 | int ret; | 50 | int hash; |
61 | 51 | ||
62 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) | 52 | switch (object->engine->subdev.index) { |
63 | context = nv_gpuobj(object)->addr >> 4; | 53 | case NVDEV_ENGINE_DMAOBJ: |
64 | else | 54 | case NVDEV_ENGINE_SW : context |= 0x00000000; break; |
65 | context = 0x00000004; /* just non-zero */ | 55 | case NVDEV_ENGINE_GR : context |= 0x00010000; break; |
66 | 56 | case NVDEV_ENGINE_MPEG : context |= 0x00020000; break; | |
67 | if (object->engine) { | 57 | default: |
68 | switch (nv_engidx(object->engine)) { | 58 | WARN_ON(1); |
69 | case NVDEV_ENGINE_DMAOBJ: | 59 | return -EINVAL; |
70 | case NVDEV_ENGINE_SW: | ||
71 | context |= 0x00000000; | ||
72 | break; | ||
73 | case NVDEV_ENGINE_GR: | ||
74 | context |= 0x00010000; | ||
75 | break; | ||
76 | case NVDEV_ENGINE_MPEG: | ||
77 | context |= 0x00020000; | ||
78 | break; | ||
79 | default: | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | } | 60 | } |
83 | 61 | ||
84 | context |= 0x80000000; /* valid */ | 62 | mutex_lock(&chan->fifo->base.engine.subdev.mutex); |
85 | context |= chid << 24; | 63 | hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, |
86 | 64 | handle, context); | |
87 | mutex_lock(&nv_subdev(fifo)->mutex); | 65 | mutex_unlock(&chan->fifo->base.engine.subdev.mutex); |
88 | ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context); | 66 | return hash; |
89 | mutex_unlock(&nv_subdev(fifo)->mutex); | ||
90 | return ret; | ||
91 | } | 67 | } |
92 | 68 | ||
93 | int | 69 | void |
94 | nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) | 70 | nv04_fifo_dma_fini(struct nvkm_fifo_chan *base) |
95 | { | 71 | { |
96 | struct nv04_fifo *fifo = (void *)object->engine; | 72 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
97 | struct nv04_fifo_chan *chan = (void *)object; | 73 | struct nv04_fifo *fifo = chan->fifo; |
98 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 74 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
99 | struct nvkm_memory *fctx = device->imem->ramfc; | 75 | struct nvkm_memory *fctx = device->imem->ramfc; |
100 | struct ramfc_desc *c; | 76 | struct ramfc_desc *c; |
101 | unsigned long flags; | 77 | unsigned long flags; |
78 | u32 mask = fifo->base.nr - 1; | ||
102 | u32 data = chan->ramfc; | 79 | u32 data = chan->ramfc; |
103 | u32 chid; | 80 | u32 chid; |
104 | 81 | ||
@@ -107,7 +84,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) | |||
107 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0); | 84 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0); |
108 | 85 | ||
109 | /* if this channel is active, replace it with a null context */ | 86 | /* if this channel is active, replace it with a null context */ |
110 | chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; | 87 | chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask; |
111 | if (chid == chan->base.chid) { | 88 | if (chid == chan->base.chid) { |
112 | nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | 89 | nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); |
113 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); | 90 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); |
@@ -129,7 +106,7 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) | |||
129 | 106 | ||
130 | nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); | 107 | nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); |
131 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); | 108 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); |
132 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); | 109 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask); |
133 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); | 110 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); |
134 | nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); | 111 | nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); |
135 | } | 112 | } |
@@ -138,35 +115,26 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) | |||
138 | nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); | 115 | nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); |
139 | nvkm_wr32(device, NV03_PFIFO_CACHES, 1); | 116 | nvkm_wr32(device, NV03_PFIFO_CACHES, 1); |
140 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 117 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
141 | |||
142 | return nvkm_fifo_channel_fini(&chan->base, suspend); | ||
143 | } | 118 | } |
144 | 119 | ||
145 | int | 120 | void |
146 | nv04_fifo_chan_init(struct nvkm_object *object) | 121 | nv04_fifo_dma_init(struct nvkm_fifo_chan *base) |
147 | { | 122 | { |
148 | struct nv04_fifo *fifo = (void *)object->engine; | 123 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
149 | struct nv04_fifo_chan *chan = (void *)object; | 124 | struct nv04_fifo *fifo = chan->fifo; |
150 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 125 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
151 | u32 mask = 1 << chan->base.chid; | 126 | u32 mask = 1 << chan->base.chid; |
152 | unsigned long flags; | 127 | unsigned long flags; |
153 | int ret; | ||
154 | |||
155 | ret = nvkm_fifo_channel_init(&chan->base); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | |||
159 | spin_lock_irqsave(&fifo->base.lock, flags); | 128 | spin_lock_irqsave(&fifo->base.lock, flags); |
160 | nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); | 129 | nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); |
161 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 130 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
162 | return 0; | ||
163 | } | 131 | } |
164 | 132 | ||
165 | void | 133 | void * |
166 | nv04_fifo_chan_dtor(struct nvkm_object *object) | 134 | nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base) |
167 | { | 135 | { |
168 | struct nv04_fifo *fifo = (void *)object->engine; | 136 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
169 | struct nv04_fifo_chan *chan = (void *)object; | 137 | struct nv04_fifo *fifo = chan->fifo; |
170 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 138 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; |
171 | struct ramfc_desc *c = fifo->ramfc_desc; | 139 | struct ramfc_desc *c = fifo->ramfc_desc; |
172 | 140 | ||
@@ -175,22 +143,30 @@ nv04_fifo_chan_dtor(struct nvkm_object *object) | |||
175 | nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000); | 143 | nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000); |
176 | } while ((++c)->bits); | 144 | } while ((++c)->bits); |
177 | nvkm_done(imem->ramfc); | 145 | nvkm_done(imem->ramfc); |
178 | 146 | return chan; | |
179 | nvkm_fifo_channel_destroy(&chan->base); | ||
180 | } | 147 | } |
181 | 148 | ||
149 | const struct nvkm_fifo_chan_func | ||
150 | nv04_fifo_dma_func = { | ||
151 | .dtor = nv04_fifo_dma_dtor, | ||
152 | .init = nv04_fifo_dma_init, | ||
153 | .fini = nv04_fifo_dma_fini, | ||
154 | .object_ctor = nv04_fifo_dma_object_ctor, | ||
155 | .object_dtor = nv04_fifo_dma_object_dtor, | ||
156 | }; | ||
157 | |||
182 | static int | 158 | static int |
183 | nv04_fifo_chan_ctor(struct nvkm_object *parent, | 159 | nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
184 | struct nvkm_object *engine, | 160 | void *data, u32 size, struct nvkm_object **pobject) |
185 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
186 | struct nvkm_object **pobject) | ||
187 | { | 161 | { |
162 | struct nvkm_object *parent = oclass->parent; | ||
188 | union { | 163 | union { |
189 | struct nv03_channel_dma_v0 v0; | 164 | struct nv03_channel_dma_v0 v0; |
190 | } *args = data; | 165 | } *args = data; |
191 | struct nv04_fifo *fifo = (void *)engine; | 166 | struct nv04_fifo *fifo = nv04_fifo(base); |
192 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 167 | struct nv04_fifo_chan *chan = NULL; |
193 | struct nv04_fifo_chan *chan; | 168 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
169 | struct nvkm_instmem *imem = device->imem; | ||
194 | int ret; | 170 | int ret; |
195 | 171 | ||
196 | nvif_ioctl(parent, "create channel dma size %d\n", size); | 172 | nvif_ioctl(parent, "create channel dma size %d\n", size); |
@@ -198,29 +174,32 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, | |||
198 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " | 174 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " |
199 | "offset %08x\n", args->v0.version, | 175 | "offset %08x\n", args->v0.version, |
200 | args->v0.pushbuf, args->v0.offset); | 176 | args->v0.pushbuf, args->v0.offset); |
177 | if (!args->v0.pushbuf) | ||
178 | return -EINVAL; | ||
201 | } else | 179 | } else |
202 | return ret; | 180 | return ret; |
203 | 181 | ||
204 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 182 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
205 | 0x10000, args->v0.pushbuf, | 183 | return -ENOMEM; |
206 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 184 | *pobject = &chan->base.object; |
207 | (1ULL << NVDEV_ENGINE_SW) | | 185 | |
208 | (1ULL << NVDEV_ENGINE_GR), &chan); | 186 | ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, |
209 | *pobject = nv_object(chan); | 187 | 0x1000, 0x1000, false, 0, args->v0.pushbuf, |
188 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | ||
189 | (1ULL << NVDEV_ENGINE_GR) | | ||
190 | (1ULL << NVDEV_ENGINE_SW), | ||
191 | 0, 0x800000, 0x10000, oclass, &chan->base); | ||
192 | chan->fifo = fifo; | ||
210 | if (ret) | 193 | if (ret) |
211 | return ret; | 194 | return ret; |
212 | 195 | ||
213 | args->v0.chid = chan->base.chid; | 196 | args->v0.chid = chan->base.chid; |
214 | |||
215 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | ||
216 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | ||
217 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | ||
218 | chan->ramfc = chan->base.chid * 32; | 197 | chan->ramfc = chan->base.chid * 32; |
219 | 198 | ||
220 | nvkm_kmap(imem->ramfc); | 199 | nvkm_kmap(imem->ramfc); |
221 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); | 200 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); |
222 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); | 201 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); |
223 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); | 202 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4); |
224 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x10, | 203 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x10, |
225 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 204 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
226 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 205 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
@@ -232,51 +211,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, | |||
232 | return 0; | 211 | return 0; |
233 | } | 212 | } |
234 | 213 | ||
235 | static struct nvkm_ofuncs | 214 | const struct nvkm_fifo_chan_oclass |
236 | nv04_fifo_ofuncs = { | 215 | nv04_fifo_dma_oclass = { |
237 | .ctor = nv04_fifo_chan_ctor, | 216 | .base.oclass = NV03_CHANNEL_DMA, |
238 | .dtor = nv04_fifo_chan_dtor, | 217 | .base.minver = 0, |
239 | .init = nv04_fifo_chan_init, | 218 | .base.maxver = 0, |
240 | .fini = nv04_fifo_chan_fini, | 219 | .ctor = nv04_fifo_dma_new, |
241 | .map = _nvkm_fifo_channel_map, | ||
242 | .rd32 = _nvkm_fifo_channel_rd32, | ||
243 | .wr32 = _nvkm_fifo_channel_wr32, | ||
244 | .ntfy = _nvkm_fifo_channel_ntfy | ||
245 | }; | ||
246 | |||
247 | struct nvkm_oclass | ||
248 | nv04_fifo_sclass[] = { | ||
249 | { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, | ||
250 | {} | ||
251 | }; | ||
252 | |||
253 | int | ||
254 | nv04_fifo_context_ctor(struct nvkm_object *parent, | ||
255 | struct nvkm_object *engine, | ||
256 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
257 | struct nvkm_object **pobject) | ||
258 | { | ||
259 | struct nv04_fifo_base *base; | ||
260 | int ret; | ||
261 | |||
262 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, | ||
263 | 0x1000, NVOBJ_FLAG_HEAP, &base); | ||
264 | *pobject = nv_object(base); | ||
265 | if (ret) | ||
266 | return ret; | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | struct nvkm_oclass | ||
272 | nv04_fifo_cclass = { | ||
273 | .handle = NV_ENGCTX(FIFO, 0x04), | ||
274 | .ofuncs = &(struct nvkm_ofuncs) { | ||
275 | .ctor = nv04_fifo_context_ctor, | ||
276 | .dtor = _nvkm_fifo_context_dtor, | ||
277 | .init = _nvkm_fifo_context_init, | ||
278 | .fini = _nvkm_fifo_context_fini, | ||
279 | .rd32 = _nvkm_fifo_context_rd32, | ||
280 | .wr32 = _nvkm_fifo_context_wr32, | ||
281 | }, | ||
282 | }; | 220 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c index a542515e63f0..1ad16205305f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c | |||
@@ -31,17 +31,17 @@ | |||
31 | #include <nvif/unpack.h> | 31 | #include <nvif/unpack.h> |
32 | 32 | ||
33 | static int | 33 | static int |
34 | nv10_fifo_chan_ctor(struct nvkm_object *parent, | 34 | nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
35 | struct nvkm_object *engine, | 35 | void *data, u32 size, struct nvkm_object **pobject) |
36 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
37 | struct nvkm_object **pobject) | ||
38 | { | 36 | { |
37 | struct nvkm_object *parent = oclass->parent; | ||
39 | union { | 38 | union { |
40 | struct nv03_channel_dma_v0 v0; | 39 | struct nv03_channel_dma_v0 v0; |
41 | } *args = data; | 40 | } *args = data; |
42 | struct nv04_fifo *fifo = (void *)engine; | 41 | struct nv04_fifo *fifo = nv04_fifo(base); |
43 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 42 | struct nv04_fifo_chan *chan = NULL; |
44 | struct nv04_fifo_chan *chan; | 43 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
44 | struct nvkm_instmem *imem = device->imem; | ||
45 | int ret; | 45 | int ret; |
46 | 46 | ||
47 | nvif_ioctl(parent, "create channel dma size %d\n", size); | 47 | nvif_ioctl(parent, "create channel dma size %d\n", size); |
@@ -49,29 +49,32 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, | |||
49 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " | 49 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " |
50 | "offset %08x\n", args->v0.version, | 50 | "offset %08x\n", args->v0.version, |
51 | args->v0.pushbuf, args->v0.offset); | 51 | args->v0.pushbuf, args->v0.offset); |
52 | if (!args->v0.pushbuf) | ||
53 | return -EINVAL; | ||
52 | } else | 54 | } else |
53 | return ret; | 55 | return ret; |
54 | 56 | ||
55 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 57 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
56 | 0x10000, args->v0.pushbuf, | 58 | return -ENOMEM; |
57 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 59 | *pobject = &chan->base.object; |
58 | (1ULL << NVDEV_ENGINE_SW) | | 60 | |
59 | (1ULL << NVDEV_ENGINE_GR), &chan); | 61 | ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, |
60 | *pobject = nv_object(chan); | 62 | 0x1000, 0x1000, false, 0, args->v0.pushbuf, |
63 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | ||
64 | (1ULL << NVDEV_ENGINE_GR) | | ||
65 | (1ULL << NVDEV_ENGINE_SW), | ||
66 | 0, 0x800000, 0x10000, oclass, &chan->base); | ||
67 | chan->fifo = fifo; | ||
61 | if (ret) | 68 | if (ret) |
62 | return ret; | 69 | return ret; |
63 | 70 | ||
64 | args->v0.chid = chan->base.chid; | 71 | args->v0.chid = chan->base.chid; |
65 | |||
66 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | ||
67 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | ||
68 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | ||
69 | chan->ramfc = chan->base.chid * 32; | 72 | chan->ramfc = chan->base.chid * 32; |
70 | 73 | ||
71 | nvkm_kmap(imem->ramfc); | 74 | nvkm_kmap(imem->ramfc); |
72 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); | 75 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); |
73 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); | 76 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); |
74 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 77 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4); |
75 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, | 78 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, |
76 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 79 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
77 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 80 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
@@ -83,20 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, | |||
83 | return 0; | 86 | return 0; |
84 | } | 87 | } |
85 | 88 | ||
86 | static struct nvkm_ofuncs | 89 | const struct nvkm_fifo_chan_oclass |
87 | nv10_fifo_ofuncs = { | 90 | nv10_fifo_dma_oclass = { |
88 | .ctor = nv10_fifo_chan_ctor, | 91 | .base.oclass = NV10_CHANNEL_DMA, |
89 | .dtor = nv04_fifo_chan_dtor, | 92 | .base.minver = 0, |
90 | .init = nv04_fifo_chan_init, | 93 | .base.maxver = 0, |
91 | .fini = nv04_fifo_chan_fini, | 94 | .ctor = nv10_fifo_dma_new, |
92 | .map = _nvkm_fifo_channel_map, | ||
93 | .rd32 = _nvkm_fifo_channel_rd32, | ||
94 | .wr32 = _nvkm_fifo_channel_wr32, | ||
95 | .ntfy = _nvkm_fifo_channel_ntfy | ||
96 | }; | ||
97 | |||
98 | struct nvkm_oclass | ||
99 | nv10_fifo_sclass[] = { | ||
100 | { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs }, | ||
101 | {} | ||
102 | }; | 95 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c index d0ece53a750b..2fbb9d4f0900 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c | |||
@@ -31,17 +31,17 @@ | |||
31 | #include <nvif/unpack.h> | 31 | #include <nvif/unpack.h> |
32 | 32 | ||
33 | static int | 33 | static int |
34 | nv17_fifo_chan_ctor(struct nvkm_object *parent, | 34 | nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
35 | struct nvkm_object *engine, | 35 | void *data, u32 size, struct nvkm_object **pobject) |
36 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
37 | struct nvkm_object **pobject) | ||
38 | { | 36 | { |
37 | struct nvkm_object *parent = oclass->parent; | ||
39 | union { | 38 | union { |
40 | struct nv03_channel_dma_v0 v0; | 39 | struct nv03_channel_dma_v0 v0; |
41 | } *args = data; | 40 | } *args = data; |
42 | struct nv04_fifo *fifo = (void *)engine; | 41 | struct nv04_fifo *fifo = nv04_fifo(base); |
43 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 42 | struct nv04_fifo_chan *chan = NULL; |
44 | struct nv04_fifo_chan *chan; | 43 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
44 | struct nvkm_instmem *imem = device->imem; | ||
45 | int ret; | 45 | int ret; |
46 | 46 | ||
47 | nvif_ioctl(parent, "create channel dma size %d\n", size); | 47 | nvif_ioctl(parent, "create channel dma size %d\n", size); |
@@ -49,31 +49,33 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, | |||
49 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " | 49 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " |
50 | "offset %08x\n", args->v0.version, | 50 | "offset %08x\n", args->v0.version, |
51 | args->v0.pushbuf, args->v0.offset); | 51 | args->v0.pushbuf, args->v0.offset); |
52 | if (!args->v0.pushbuf) | ||
53 | return -EINVAL; | ||
52 | } else | 54 | } else |
53 | return ret; | 55 | return ret; |
54 | 56 | ||
55 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 57 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
56 | 0x10000, args->v0.pushbuf, | 58 | return -ENOMEM; |
57 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 59 | *pobject = &chan->base.object; |
58 | (1ULL << NVDEV_ENGINE_SW) | | 60 | |
59 | (1ULL << NVDEV_ENGINE_GR) | | 61 | ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, |
60 | (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ | 62 | 0x1000, 0x1000, false, 0, args->v0.pushbuf, |
61 | &chan); | 63 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
62 | *pobject = nv_object(chan); | 64 | (1ULL << NVDEV_ENGINE_GR) | |
65 | (1ULL << NVDEV_ENGINE_MPEG) | /* NV31- */ | ||
66 | (1ULL << NVDEV_ENGINE_SW), | ||
67 | 0, 0x800000, 0x10000, oclass, &chan->base); | ||
68 | chan->fifo = fifo; | ||
63 | if (ret) | 69 | if (ret) |
64 | return ret; | 70 | return ret; |
65 | 71 | ||
66 | args->v0.chid = chan->base.chid; | 72 | args->v0.chid = chan->base.chid; |
67 | |||
68 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | ||
69 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | ||
70 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | ||
71 | chan->ramfc = chan->base.chid * 64; | 73 | chan->ramfc = chan->base.chid * 64; |
72 | 74 | ||
73 | nvkm_kmap(imem->ramfc); | 75 | nvkm_kmap(imem->ramfc); |
74 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); | 76 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); |
75 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); | 77 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); |
76 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 78 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4); |
77 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, | 79 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x14, |
78 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 80 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
79 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 81 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
@@ -85,20 +87,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, | |||
85 | return 0; | 87 | return 0; |
86 | } | 88 | } |
87 | 89 | ||
88 | static struct nvkm_ofuncs | 90 | const struct nvkm_fifo_chan_oclass |
89 | nv17_fifo_ofuncs = { | 91 | nv17_fifo_dma_oclass = { |
90 | .ctor = nv17_fifo_chan_ctor, | 92 | .base.oclass = NV17_CHANNEL_DMA, |
91 | .dtor = nv04_fifo_chan_dtor, | 93 | .base.minver = 0, |
92 | .init = nv04_fifo_chan_init, | 94 | .base.maxver = 0, |
93 | .fini = nv04_fifo_chan_fini, | 95 | .ctor = nv17_fifo_dma_new, |
94 | .map = _nvkm_fifo_channel_map, | ||
95 | .rd32 = _nvkm_fifo_channel_rd32, | ||
96 | .wr32 = _nvkm_fifo_channel_wr32, | ||
97 | .ntfy = _nvkm_fifo_channel_ntfy | ||
98 | }; | ||
99 | |||
100 | struct nvkm_oclass | ||
101 | nv17_fifo_sclass[] = { | ||
102 | { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs }, | ||
103 | {} | ||
104 | }; | 96 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c index cd3503cb6837..b46a3b3cd092 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c | |||
@@ -31,36 +31,47 @@ | |||
31 | #include <nvif/class.h> | 31 | #include <nvif/class.h> |
32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
33 | 33 | ||
34 | static bool | ||
35 | nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) | ||
36 | { | ||
37 | switch (engine->subdev.index) { | ||
38 | case NVDEV_ENGINE_DMAOBJ: | ||
39 | case NVDEV_ENGINE_SW: | ||
40 | return false; | ||
41 | case NVDEV_ENGINE_GR: | ||
42 | *reg = 0x0032e0; | ||
43 | *ctx = 0x38; | ||
44 | return true; | ||
45 | case NVDEV_ENGINE_MPEG: | ||
46 | *reg = 0x00330c; | ||
47 | *ctx = 0x54; | ||
48 | return true; | ||
49 | default: | ||
50 | WARN_ON(1); | ||
51 | return false; | ||
52 | } | ||
53 | } | ||
54 | |||
34 | static int | 55 | static int |
35 | nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, | 56 | nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base, |
36 | struct nvkm_object *engctx) | 57 | struct nvkm_engine *engine, bool suspend) |
37 | { | 58 | { |
38 | struct nv04_fifo *fifo = (void *)parent->engine; | 59 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
39 | struct nv04_fifo_chan *chan = (void *)parent; | 60 | struct nv04_fifo *fifo = chan->fifo; |
40 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 61 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
41 | struct nvkm_instmem *imem = device->imem; | 62 | struct nvkm_instmem *imem = device->imem; |
42 | unsigned long flags; | 63 | unsigned long flags; |
43 | u32 reg, ctx; | 64 | u32 reg, ctx; |
65 | int chid; | ||
44 | 66 | ||
45 | switch (nv_engidx(engctx->engine)) { | 67 | if (!nv40_fifo_dma_engine(engine, ®, &ctx)) |
46 | case NVDEV_ENGINE_SW: | ||
47 | return 0; | 68 | return 0; |
48 | case NVDEV_ENGINE_GR: | ||
49 | reg = 0x32e0; | ||
50 | ctx = 0x38; | ||
51 | break; | ||
52 | case NVDEV_ENGINE_MPEG: | ||
53 | reg = 0x330c; | ||
54 | ctx = 0x54; | ||
55 | break; | ||
56 | default: | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | 69 | ||
60 | spin_lock_irqsave(&fifo->base.lock, flags); | 70 | spin_lock_irqsave(&fifo->base.lock, flags); |
61 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); | 71 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); |
62 | 72 | ||
63 | if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid) | 73 | chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); |
74 | if (chid == chan->base.chid) | ||
64 | nvkm_wr32(device, reg, 0x00000000); | 75 | nvkm_wr32(device, reg, 0x00000000); |
65 | nvkm_kmap(imem->ramfc); | 76 | nvkm_kmap(imem->ramfc); |
66 | nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000); | 77 | nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000); |
@@ -72,38 +83,29 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, | |||
72 | } | 83 | } |
73 | 84 | ||
74 | static int | 85 | static int |
75 | nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) | 86 | nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base, |
87 | struct nvkm_engine *engine) | ||
76 | { | 88 | { |
77 | struct nv04_fifo *fifo = (void *)parent->engine; | 89 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
78 | struct nv04_fifo_chan *chan = (void *)parent; | 90 | struct nv04_fifo *fifo = chan->fifo; |
79 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 91 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
80 | struct nvkm_instmem *imem = device->imem; | 92 | struct nvkm_instmem *imem = device->imem; |
81 | unsigned long flags; | 93 | unsigned long flags; |
82 | u32 reg, ctx; | 94 | u32 inst, reg, ctx; |
95 | int chid; | ||
83 | 96 | ||
84 | switch (nv_engidx(engctx->engine)) { | 97 | if (!nv40_fifo_dma_engine(engine, ®, &ctx)) |
85 | case NVDEV_ENGINE_SW: | ||
86 | return 0; | 98 | return 0; |
87 | case NVDEV_ENGINE_GR: | 99 | inst = chan->engn[engine->subdev.index]->addr >> 4; |
88 | reg = 0x32e0; | ||
89 | ctx = 0x38; | ||
90 | break; | ||
91 | case NVDEV_ENGINE_MPEG: | ||
92 | reg = 0x330c; | ||
93 | ctx = 0x54; | ||
94 | break; | ||
95 | default: | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | 100 | ||
99 | spin_lock_irqsave(&fifo->base.lock, flags); | 101 | spin_lock_irqsave(&fifo->base.lock, flags); |
100 | nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4; | ||
101 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); | 102 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); |
102 | 103 | ||
103 | if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid) | 104 | chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); |
104 | nvkm_wr32(device, reg, nv_engctx(engctx)->addr); | 105 | if (chid == chan->base.chid) |
106 | nvkm_wr32(device, reg, inst); | ||
105 | nvkm_kmap(imem->ramfc); | 107 | nvkm_kmap(imem->ramfc); |
106 | nvkm_wo32(imem->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr); | 108 | nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst); |
107 | nvkm_done(imem->ramfc); | 109 | nvkm_done(imem->ramfc); |
108 | 110 | ||
109 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); | 111 | nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); |
@@ -111,57 +113,91 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) | |||
111 | return 0; | 113 | return 0; |
112 | } | 114 | } |
113 | 115 | ||
116 | static void | ||
117 | nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base, | ||
118 | struct nvkm_engine *engine) | ||
119 | { | ||
120 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); | ||
121 | if (!chan->engn[engine->subdev.index] || | ||
122 | chan->engn[engine->subdev.index]->object.oclass) { | ||
123 | chan->engn[engine->subdev.index] = NULL; | ||
124 | return; | ||
125 | } | ||
126 | nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); | ||
127 | } | ||
128 | |||
114 | static int | 129 | static int |
115 | nv40_fifo_object_attach(struct nvkm_object *parent, | 130 | nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base, |
116 | struct nvkm_object *object, u32 handle) | 131 | struct nvkm_engine *engine, |
132 | struct nvkm_object *object) | ||
117 | { | 133 | { |
118 | struct nv04_fifo *fifo = (void *)parent->engine; | 134 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); |
119 | struct nv04_fifo_chan *chan = (void *)parent; | 135 | const int engn = engine->subdev.index; |
120 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 136 | u32 reg, ctx; |
121 | u32 context, chid = chan->base.chid; | ||
122 | int ret; | ||
123 | 137 | ||
124 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) | 138 | if (!nv40_fifo_dma_engine(engine, ®, &ctx)) |
125 | context = nv_gpuobj(object)->addr >> 4; | 139 | return 0; |
126 | else | 140 | |
127 | context = 0x00000004; /* just non-zero */ | 141 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) { |
128 | 142 | chan->engn[engn] = nv_gpuobj(object); | |
129 | if (object->engine) { | 143 | return 0; |
130 | switch (nv_engidx(object->engine)) { | ||
131 | case NVDEV_ENGINE_DMAOBJ: | ||
132 | case NVDEV_ENGINE_SW: | ||
133 | context |= 0x00000000; | ||
134 | break; | ||
135 | case NVDEV_ENGINE_GR: | ||
136 | context |= 0x00100000; | ||
137 | break; | ||
138 | case NVDEV_ENGINE_MPEG: | ||
139 | context |= 0x00200000; | ||
140 | break; | ||
141 | default: | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | } | 144 | } |
145 | 145 | ||
146 | context |= chid << 23; | 146 | return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); |
147 | } | ||
147 | 148 | ||
148 | mutex_lock(&nv_subdev(fifo)->mutex); | 149 | static int |
149 | ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context); | 150 | nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, |
150 | mutex_unlock(&nv_subdev(fifo)->mutex); | 151 | struct nvkm_object *object) |
151 | return ret; | 152 | { |
153 | struct nv04_fifo_chan *chan = nv04_fifo_chan(base); | ||
154 | struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; | ||
155 | u32 context = chan->base.chid << 23; | ||
156 | u32 handle = object->handle; | ||
157 | int hash; | ||
158 | |||
159 | switch (object->engine->subdev.index) { | ||
160 | case NVDEV_ENGINE_DMAOBJ: | ||
161 | case NVDEV_ENGINE_SW : context |= 0x00000000; break; | ||
162 | case NVDEV_ENGINE_GR : context |= 0x00100000; break; | ||
163 | case NVDEV_ENGINE_MPEG : context |= 0x00200000; break; | ||
164 | default: | ||
165 | WARN_ON(1); | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | mutex_lock(&chan->fifo->base.engine.subdev.mutex); | ||
170 | hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, | ||
171 | handle, context); | ||
172 | mutex_unlock(&chan->fifo->base.engine.subdev.mutex); | ||
173 | return hash; | ||
152 | } | 174 | } |
153 | 175 | ||
176 | static const struct nvkm_fifo_chan_func | ||
177 | nv40_fifo_dma_func = { | ||
178 | .dtor = nv04_fifo_dma_dtor, | ||
179 | .init = nv04_fifo_dma_init, | ||
180 | .fini = nv04_fifo_dma_fini, | ||
181 | .engine_ctor = nv40_fifo_dma_engine_ctor, | ||
182 | .engine_dtor = nv40_fifo_dma_engine_dtor, | ||
183 | .engine_init = nv40_fifo_dma_engine_init, | ||
184 | .engine_fini = nv40_fifo_dma_engine_fini, | ||
185 | .object_ctor = nv40_fifo_dma_object_ctor, | ||
186 | .object_dtor = nv04_fifo_dma_object_dtor, | ||
187 | }; | ||
188 | |||
154 | static int | 189 | static int |
155 | nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 190 | nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
156 | struct nvkm_oclass *oclass, void *data, u32 size, | 191 | void *data, u32 size, struct nvkm_object **pobject) |
157 | struct nvkm_object **pobject) | ||
158 | { | 192 | { |
193 | struct nvkm_object *parent = oclass->parent; | ||
159 | union { | 194 | union { |
160 | struct nv03_channel_dma_v0 v0; | 195 | struct nv03_channel_dma_v0 v0; |
161 | } *args = data; | 196 | } *args = data; |
162 | struct nv04_fifo *fifo = (void *)engine; | 197 | struct nv04_fifo *fifo = nv04_fifo(base); |
163 | struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem; | 198 | struct nv04_fifo_chan *chan = NULL; |
164 | struct nv04_fifo_chan *chan; | 199 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
200 | struct nvkm_instmem *imem = device->imem; | ||
165 | int ret; | 201 | int ret; |
166 | 202 | ||
167 | nvif_ioctl(parent, "create channel dma size %d\n", size); | 203 | nvif_ioctl(parent, "create channel dma size %d\n", size); |
@@ -169,31 +205,33 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
169 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " | 205 | nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx " |
170 | "offset %08x\n", args->v0.version, | 206 | "offset %08x\n", args->v0.version, |
171 | args->v0.pushbuf, args->v0.offset); | 207 | args->v0.pushbuf, args->v0.offset); |
208 | if (!args->v0.pushbuf) | ||
209 | return -EINVAL; | ||
172 | } else | 210 | } else |
173 | return ret; | 211 | return ret; |
174 | 212 | ||
175 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 213 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
176 | 0x1000, args->v0.pushbuf, | 214 | return -ENOMEM; |
177 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 215 | *pobject = &chan->base.object; |
178 | (1ULL << NVDEV_ENGINE_SW) | | 216 | |
179 | (1ULL << NVDEV_ENGINE_GR) | | 217 | ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base, |
180 | (1ULL << NVDEV_ENGINE_MPEG), &chan); | 218 | 0x1000, 0x1000, false, 0, args->v0.pushbuf, |
181 | *pobject = nv_object(chan); | 219 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
220 | (1ULL << NVDEV_ENGINE_GR) | | ||
221 | (1ULL << NVDEV_ENGINE_MPEG) | | ||
222 | (1ULL << NVDEV_ENGINE_SW), | ||
223 | 0, 0xc00000, 0x1000, oclass, &chan->base); | ||
224 | chan->fifo = fifo; | ||
182 | if (ret) | 225 | if (ret) |
183 | return ret; | 226 | return ret; |
184 | 227 | ||
185 | args->v0.chid = chan->base.chid; | 228 | args->v0.chid = chan->base.chid; |
186 | |||
187 | nv_parent(chan)->context_attach = nv40_fifo_context_attach; | ||
188 | nv_parent(chan)->context_detach = nv40_fifo_context_detach; | ||
189 | nv_parent(chan)->object_attach = nv40_fifo_object_attach; | ||
190 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | ||
191 | chan->ramfc = chan->base.chid * 128; | 229 | chan->ramfc = chan->base.chid * 128; |
192 | 230 | ||
193 | nvkm_kmap(imem->ramfc); | 231 | nvkm_kmap(imem->ramfc); |
194 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); | 232 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset); |
195 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); | 233 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset); |
196 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 234 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4); |
197 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 | | 235 | nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 | |
198 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 236 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
199 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 237 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
@@ -206,20 +244,10 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
206 | return 0; | 244 | return 0; |
207 | } | 245 | } |
208 | 246 | ||
209 | static struct nvkm_ofuncs | 247 | const struct nvkm_fifo_chan_oclass |
210 | nv40_fifo_ofuncs = { | 248 | nv40_fifo_dma_oclass = { |
211 | .ctor = nv40_fifo_chan_ctor, | 249 | .base.oclass = NV40_CHANNEL_DMA, |
212 | .dtor = nv04_fifo_chan_dtor, | 250 | .base.minver = 0, |
213 | .init = nv04_fifo_chan_init, | 251 | .base.maxver = 0, |
214 | .fini = nv04_fifo_chan_fini, | 252 | .ctor = nv40_fifo_dma_new, |
215 | .map = _nvkm_fifo_channel_map, | ||
216 | .rd32 = _nvkm_fifo_channel_rd32, | ||
217 | .wr32 = _nvkm_fifo_channel_wr32, | ||
218 | .ntfy = _nvkm_fifo_channel_ntfy | ||
219 | }; | ||
220 | |||
221 | struct nvkm_oclass | ||
222 | nv40_fifo_sclass[] = { | ||
223 | { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs }, | ||
224 | {} | ||
225 | }; | 253 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c index 11a283099235..6b3b15f12c39 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c | |||
@@ -30,15 +30,14 @@ | |||
30 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
31 | 31 | ||
32 | static int | 32 | static int |
33 | nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, | 33 | nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
34 | struct nvkm_oclass *oclass, void *data, u32 size, | 34 | void *data, u32 size, struct nvkm_object **pobject) |
35 | struct nvkm_object **pobject) | ||
36 | { | 35 | { |
36 | struct nvkm_object *parent = oclass->parent; | ||
37 | union { | 37 | union { |
38 | struct nv50_channel_dma_v0 v0; | 38 | struct nv50_channel_dma_v0 v0; |
39 | } *args = data; | 39 | } *args = data; |
40 | struct nvkm_device *device = parent->engine->subdev.device; | 40 | struct nv50_fifo *fifo = nv50_fifo(base); |
41 | struct nv50_fifo_base *base = (void *)parent; | ||
42 | struct nv50_fifo_chan *chan; | 41 | struct nv50_fifo_chan *chan; |
43 | int ret; | 42 | int ret; |
44 | 43 | ||
@@ -48,68 +47,45 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, | |||
48 | "pushbuf %llx offset %016llx\n", | 47 | "pushbuf %llx offset %016llx\n", |
49 | args->v0.version, args->v0.vm, args->v0.pushbuf, | 48 | args->v0.version, args->v0.vm, args->v0.pushbuf, |
50 | args->v0.offset); | 49 | args->v0.offset); |
51 | if (args->v0.vm) | 50 | if (!args->v0.pushbuf) |
52 | return -ENOENT; | 51 | return -EINVAL; |
53 | } else | 52 | } else |
54 | return ret; | 53 | return ret; |
55 | 54 | ||
56 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 55 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
57 | 0x2000, args->v0.pushbuf, | 56 | return -ENOMEM; |
58 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 57 | *pobject = &chan->base.object; |
59 | (1ULL << NVDEV_ENGINE_SW) | | 58 | |
60 | (1ULL << NVDEV_ENGINE_GR) | | 59 | ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf, |
61 | (1ULL << NVDEV_ENGINE_MPEG), &chan); | 60 | oclass, chan); |
62 | *pobject = nv_object(chan); | ||
63 | if (ret) | 61 | if (ret) |
64 | return ret; | 62 | return ret; |
65 | 63 | ||
66 | chan->base.inst = base->base.gpuobj.addr; | ||
67 | args->v0.chid = chan->base.chid; | 64 | args->v0.chid = chan->base.chid; |
68 | 65 | ||
69 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; | 66 | nvkm_kmap(chan->ramfc); |
70 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; | 67 | nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); |
71 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; | 68 | nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); |
72 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | 69 | nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); |
73 | 70 | nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); | |
74 | ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, | 71 | nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); |
75 | &chan->ramht); | 72 | nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); |
76 | if (ret) | 73 | nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); |
77 | return ret; | 74 | nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); |
78 | 75 | nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); | |
79 | nvkm_kmap(base->ramfc); | 76 | nvkm_wo32(chan->ramfc, 0x78, 0x00000000); |
80 | nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); | 77 | nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); |
81 | nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); | 78 | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | |
82 | nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); | ||
83 | nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); | ||
84 | nvkm_wo32(base->ramfc, 0x3c, 0x003f6078); | ||
85 | nvkm_wo32(base->ramfc, 0x44, 0x01003fff); | ||
86 | nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | ||
87 | nvkm_wo32(base->ramfc, 0x4c, 0xffffffff); | ||
88 | nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); | ||
89 | nvkm_wo32(base->ramfc, 0x78, 0x00000000); | ||
90 | nvkm_wo32(base->ramfc, 0x7c, 0x30000001); | ||
91 | nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | ||
92 | (4 << 24) /* SEARCH_FULL */ | | 79 | (4 << 24) /* SEARCH_FULL */ | |
93 | (chan->ramht->gpuobj->node->offset >> 4)); | 80 | (chan->ramht->gpuobj->node->offset >> 4)); |
94 | nvkm_done(base->ramfc); | 81 | nvkm_done(chan->ramfc); |
95 | return 0; | 82 | return 0; |
96 | } | 83 | } |
97 | 84 | ||
98 | static struct nvkm_ofuncs | 85 | const struct nvkm_fifo_chan_oclass |
99 | nv50_fifo_ofuncs_dma = { | 86 | nv50_fifo_dma_oclass = { |
100 | .ctor = nv50_fifo_chan_ctor_dma, | 87 | .base.oclass = NV50_CHANNEL_DMA, |
101 | .dtor = nv50_fifo_chan_dtor, | 88 | .base.minver = 0, |
102 | .init = nv50_fifo_chan_init, | 89 | .base.maxver = 0, |
103 | .fini = nv50_fifo_chan_fini, | 90 | .ctor = nv50_fifo_dma_new, |
104 | .map = _nvkm_fifo_channel_map, | ||
105 | .rd32 = _nvkm_fifo_channel_rd32, | ||
106 | .wr32 = _nvkm_fifo_channel_wr32, | ||
107 | .ntfy = _nvkm_fifo_channel_ntfy | ||
108 | }; | ||
109 | |||
110 | struct nvkm_oclass | ||
111 | nv50_fifo_sclass[] = { | ||
112 | { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma }, | ||
113 | { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind }, | ||
114 | {} | ||
115 | }; | 91 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index ab0ecc423e68..00fa9d3eff7a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c | |||
@@ -47,6 +47,15 @@ g84_fifo_uevent_func = { | |||
47 | .fini = g84_fifo_uevent_fini, | 47 | .fini = g84_fifo_uevent_fini, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static const struct nvkm_fifo_func | ||
51 | g84_fifo_func = { | ||
52 | .chan = { | ||
53 | &g84_fifo_dma_oclass, | ||
54 | &g84_fifo_gpfifo_oclass, | ||
55 | NULL | ||
56 | }, | ||
57 | }; | ||
58 | |||
50 | static int | 59 | static int |
51 | g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 60 | g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
52 | struct nvkm_oclass *oclass, void *data, u32 size, | 61 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -61,6 +70,8 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
61 | if (ret) | 70 | if (ret) |
62 | return ret; | 71 | return ret; |
63 | 72 | ||
73 | fifo->base.func = &g84_fifo_func; | ||
74 | |||
64 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, | 75 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, |
65 | false, &fifo->runlist[0]); | 76 | false, &fifo->runlist[0]); |
66 | if (ret) | 77 | if (ret) |
@@ -77,8 +88,6 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
77 | 88 | ||
78 | nv_subdev(fifo)->unit = 0x00000100; | 89 | nv_subdev(fifo)->unit = 0x00000100; |
79 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 90 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
80 | nv_engine(fifo)->cclass = &g84_fifo_cclass; | ||
81 | nv_engine(fifo)->sclass = g84_fifo_sclass; | ||
82 | fifo->base.pause = nv04_fifo_pause; | 91 | fifo->base.pause = nv04_fifo_pause; |
83 | fifo->base.start = nv04_fifo_start; | 92 | fifo->base.start = nv04_fifo_start; |
84 | return 0; | 93 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index b88e7c569c0a..bdad44e84b92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c | |||
@@ -58,28 +58,26 @@ gf100_fifo_uevent_func = { | |||
58 | void | 58 | void |
59 | gf100_fifo_runlist_update(struct gf100_fifo *fifo) | 59 | gf100_fifo_runlist_update(struct gf100_fifo *fifo) |
60 | { | 60 | { |
61 | struct gf100_fifo_chan *chan; | ||
61 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 62 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
62 | struct nvkm_device *device = subdev->device; | 63 | struct nvkm_device *device = subdev->device; |
63 | struct nvkm_memory *cur; | 64 | struct nvkm_memory *cur; |
64 | int i, p; | 65 | int nr = 0; |
65 | 66 | ||
66 | mutex_lock(&nv_subdev(fifo)->mutex); | 67 | mutex_lock(&nv_subdev(fifo)->mutex); |
67 | cur = fifo->runlist.mem[fifo->runlist.active]; | 68 | cur = fifo->runlist.mem[fifo->runlist.active]; |
68 | fifo->runlist.active = !fifo->runlist.active; | 69 | fifo->runlist.active = !fifo->runlist.active; |
69 | 70 | ||
70 | nvkm_kmap(cur); | 71 | nvkm_kmap(cur); |
71 | for (i = 0, p = 0; i < 128; i++) { | 72 | list_for_each_entry(chan, &fifo->chan, head) { |
72 | struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i]; | 73 | nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid); |
73 | if (chan && chan->state == RUNNING) { | 74 | nvkm_wo32(cur, (nr * 8) + 4, 0x00000004); |
74 | nvkm_wo32(cur, p + 0, i); | 75 | nr++; |
75 | nvkm_wo32(cur, p + 4, 0x00000004); | ||
76 | p += 8; | ||
77 | } | ||
78 | } | 76 | } |
79 | nvkm_done(cur); | 77 | nvkm_done(cur); |
80 | 78 | ||
81 | nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); | 79 | nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); |
82 | nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3)); | 80 | nvkm_wr32(device, 0x002274, 0x01f00000 | nr); |
83 | 81 | ||
84 | if (wait_event_timeout(fifo->runlist.wait, | 82 | if (wait_event_timeout(fifo->runlist.wait, |
85 | !(nvkm_rd32(device, 0x00227c) & 0x00100000), | 83 | !(nvkm_rd32(device, 0x00227c) & 0x00100000), |
@@ -166,7 +164,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, | |||
166 | assert_spin_locked(&fifo->base.lock); | 164 | assert_spin_locked(&fifo->base.lock); |
167 | 165 | ||
168 | nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); | 166 | nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); |
169 | chan->state = KILLED; | 167 | list_del_init(&chan->head); |
168 | chan->killed = true; | ||
170 | 169 | ||
171 | fifo->mask |= 1ULL << nv_engidx(engine); | 170 | fifo->mask |= 1ULL << nv_engidx(engine); |
172 | schedule_work(&fifo->fault); | 171 | schedule_work(&fifo->fault); |
@@ -198,11 +197,15 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo) | |||
198 | (void)save; | 197 | (void)save; |
199 | 198 | ||
200 | if (busy && unk0 && unk1) { | 199 | if (busy && unk0 && unk1) { |
201 | if (!(chan = (void *)fifo->base.channel[chid])) | 200 | list_for_each_entry(chan, &fifo->chan, head) { |
202 | continue; | 201 | if (chan->base.chid == chid) { |
203 | if (!(engine = gf100_fifo_engine(fifo, engn))) | 202 | engine = gf100_fifo_engine(fifo, engn); |
204 | continue; | 203 | if (!engine) |
205 | gf100_fifo_recover(fifo, engine, chan); | 204 | break; |
205 | gf100_fifo_recover(fifo, engine, chan); | ||
206 | break; | ||
207 | } | ||
208 | } | ||
206 | } | 209 | } |
207 | } | 210 | } |
208 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 211 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
@@ -343,7 +346,8 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit) | |||
343 | write ? "write" : "read", (u64)vahi << 32 | valo, | 346 | write ? "write" : "read", (u64)vahi << 32 | valo, |
344 | unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", | 347 | unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", |
345 | reason, er ? er->name : "", chan ? chan->chid : -1, | 348 | reason, er ? er->name : "", chan ? chan->chid : -1, |
346 | (u64)inst << 12, nvkm_client_name(chan)); | 349 | (u64)inst << 12, |
350 | chan ? chan->object.client->name : "unknown"); | ||
347 | 351 | ||
348 | if (engine && chan) | 352 | if (engine && chan) |
349 | gf100_fifo_recover(fifo, engine, (void *)chan); | 353 | gf100_fifo_recover(fifo, engine, (void *)chan); |
@@ -369,6 +373,8 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) | |||
369 | u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f; | 373 | u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f; |
370 | u32 subc = (addr & 0x00070000) >> 16; | 374 | u32 subc = (addr & 0x00070000) >> 16; |
371 | u32 mthd = (addr & 0x00003ffc); | 375 | u32 mthd = (addr & 0x00003ffc); |
376 | struct nvkm_fifo_chan *chan; | ||
377 | unsigned long flags; | ||
372 | u32 show= stat; | 378 | u32 show= stat; |
373 | char msg[128]; | 379 | char msg[128]; |
374 | 380 | ||
@@ -381,11 +387,13 @@ gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) | |||
381 | 387 | ||
382 | if (show) { | 388 | if (show) { |
383 | nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show); | 389 | nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show); |
384 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " | 390 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); |
385 | "mthd %04x data %08x\n", | 391 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " |
386 | unit, show, msg, chid, | 392 | "subc %d mthd %04x data %08x\n", |
387 | nvkm_client_name_for_fifo_chid(&fifo->base, chid), | 393 | unit, show, msg, chid, chan ? chan->inst->addr : 0, |
394 | chan ? chan->object.client->name : "unknown", | ||
388 | subc, mthd, data); | 395 | subc, mthd, data); |
396 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | ||
389 | } | 397 | } |
390 | 398 | ||
391 | nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); | 399 | nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); |
@@ -579,6 +587,14 @@ gf100_fifo_dtor(struct nvkm_object *object) | |||
579 | nvkm_fifo_destroy(&fifo->base); | 587 | nvkm_fifo_destroy(&fifo->base); |
580 | } | 588 | } |
581 | 589 | ||
590 | static const struct nvkm_fifo_func | ||
591 | gf100_fifo_func = { | ||
592 | .chan = { | ||
593 | &gf100_fifo_gpfifo_oclass, | ||
594 | NULL | ||
595 | }, | ||
596 | }; | ||
597 | |||
582 | static int | 598 | static int |
583 | gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 599 | gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
584 | struct nvkm_oclass *oclass, void *data, u32 size, | 600 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -594,6 +610,9 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
594 | if (ret) | 610 | if (ret) |
595 | return ret; | 611 | return ret; |
596 | 612 | ||
613 | fifo->base.func = &gf100_fifo_func; | ||
614 | |||
615 | INIT_LIST_HEAD(&fifo->chan); | ||
597 | INIT_WORK(&fifo->fault, gf100_fifo_recover_work); | 616 | INIT_WORK(&fifo->fault, gf100_fifo_recover_work); |
598 | 617 | ||
599 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, | 618 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, |
@@ -625,8 +644,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
625 | 644 | ||
626 | nv_subdev(fifo)->unit = 0x00000100; | 645 | nv_subdev(fifo)->unit = 0x00000100; |
627 | nv_subdev(fifo)->intr = gf100_fifo_intr; | 646 | nv_subdev(fifo)->intr = gf100_fifo_intr; |
628 | nv_engine(fifo)->cclass = &gf100_fifo_cclass; | ||
629 | nv_engine(fifo)->sclass = gf100_fifo_sclass; | ||
630 | return 0; | 647 | return 0; |
631 | } | 648 | } |
632 | 649 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h index 5190bbc6e1a1..c649ca9b53e3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h | |||
@@ -1,10 +1,15 @@ | |||
1 | #ifndef __GF100_FIFO_H__ | 1 | #ifndef __GF100_FIFO_H__ |
2 | #define __GF100_FIFO_H__ | 2 | #define __GF100_FIFO_H__ |
3 | #define gf100_fifo(p) container_of((p), struct gf100_fifo, base) | ||
3 | #include "priv.h" | 4 | #include "priv.h" |
4 | 5 | ||
6 | #include <subdev/mmu.h> | ||
7 | |||
5 | struct gf100_fifo { | 8 | struct gf100_fifo { |
6 | struct nvkm_fifo base; | 9 | struct nvkm_fifo base; |
7 | 10 | ||
11 | struct list_head chan; | ||
12 | |||
8 | struct work_struct fault; | 13 | struct work_struct fault; |
9 | u64 mask; | 14 | u64 mask; |
10 | 15 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 9ab3fd40b7dd..e7f467997194 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | |||
@@ -32,23 +32,6 @@ | |||
32 | 32 | ||
33 | #include <nvif/class.h> | 33 | #include <nvif/class.h> |
34 | 34 | ||
35 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } | ||
36 | static const struct { | ||
37 | u64 subdev; | ||
38 | u64 mask; | ||
39 | } fifo_engine[] = { | ||
40 | _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | | ||
41 | (1ULL << NVDEV_ENGINE_CE2)), | ||
42 | _(NVDEV_ENGINE_MSPDEC , 0), | ||
43 | _(NVDEV_ENGINE_MSPPP , 0), | ||
44 | _(NVDEV_ENGINE_MSVLD , 0), | ||
45 | _(NVDEV_ENGINE_CE0 , 0), | ||
46 | _(NVDEV_ENGINE_CE1 , 0), | ||
47 | _(NVDEV_ENGINE_MSENC , 0), | ||
48 | }; | ||
49 | #undef _ | ||
50 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) | ||
51 | |||
52 | static void | 35 | static void |
53 | gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) | 36 | gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
54 | { | 37 | { |
@@ -76,28 +59,26 @@ void | |||
76 | gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) | 59 | gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) |
77 | { | 60 | { |
78 | struct gk104_fifo_engn *engn = &fifo->engine[engine]; | 61 | struct gk104_fifo_engn *engn = &fifo->engine[engine]; |
62 | struct gk104_fifo_chan *chan; | ||
79 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 63 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
80 | struct nvkm_device *device = subdev->device; | 64 | struct nvkm_device *device = subdev->device; |
81 | struct nvkm_memory *cur; | 65 | struct nvkm_memory *cur; |
82 | int i, p; | 66 | int nr = 0; |
83 | 67 | ||
84 | mutex_lock(&nv_subdev(fifo)->mutex); | 68 | mutex_lock(&nv_subdev(fifo)->mutex); |
85 | cur = engn->runlist[engn->cur_runlist]; | 69 | cur = engn->runlist[engn->cur_runlist]; |
86 | engn->cur_runlist = !engn->cur_runlist; | 70 | engn->cur_runlist = !engn->cur_runlist; |
87 | 71 | ||
88 | nvkm_kmap(cur); | 72 | nvkm_kmap(cur); |
89 | for (i = 0, p = 0; i < fifo->base.max; i++) { | 73 | list_for_each_entry(chan, &engn->chan, head) { |
90 | struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; | 74 | nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid); |
91 | if (chan && chan->state == RUNNING && chan->engine == engine) { | 75 | nvkm_wo32(cur, (nr * 8) + 4, 0x00000000); |
92 | nvkm_wo32(cur, p + 0, i); | 76 | nr++; |
93 | nvkm_wo32(cur, p + 4, 0x00000000); | ||
94 | p += 8; | ||
95 | } | ||
96 | } | 77 | } |
97 | nvkm_done(cur); | 78 | nvkm_done(cur); |
98 | 79 | ||
99 | nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); | 80 | nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12); |
100 | nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); | 81 | nvkm_wr32(device, 0x002274, (engine << 20) | nr); |
101 | 82 | ||
102 | if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + | 83 | if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + |
103 | (engine * 0x08)) & 0x00100000), | 84 | (engine * 0x08)) & 0x00100000), |
@@ -106,31 +87,13 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) | |||
106 | mutex_unlock(&nv_subdev(fifo)->mutex); | 87 | mutex_unlock(&nv_subdev(fifo)->mutex); |
107 | } | 88 | } |
108 | 89 | ||
109 | static inline int | ||
110 | gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn) | ||
111 | { | ||
112 | switch (engn) { | ||
113 | case NVDEV_ENGINE_GR : | ||
114 | case NVDEV_ENGINE_CE2 : engn = 0; break; | ||
115 | case NVDEV_ENGINE_MSVLD : engn = 1; break; | ||
116 | case NVDEV_ENGINE_MSPPP : engn = 2; break; | ||
117 | case NVDEV_ENGINE_MSPDEC: engn = 3; break; | ||
118 | case NVDEV_ENGINE_CE0 : engn = 4; break; | ||
119 | case NVDEV_ENGINE_CE1 : engn = 5; break; | ||
120 | case NVDEV_ENGINE_MSENC : engn = 6; break; | ||
121 | default: | ||
122 | return -1; | ||
123 | } | ||
124 | |||
125 | return engn; | ||
126 | } | ||
127 | |||
128 | static inline struct nvkm_engine * | 90 | static inline struct nvkm_engine * |
129 | gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) | 91 | gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) |
130 | { | 92 | { |
131 | if (engn >= ARRAY_SIZE(fifo_engine)) | 93 | u64 subdevs = gk104_fifo_engine_subdev(engn); |
132 | return NULL; | 94 | if (subdevs) |
133 | return nvkm_engine(fifo, fifo_engine[engn].subdev); | 95 | return nvkm_engine(fifo, __ffs(subdevs)); |
96 | return NULL; | ||
134 | } | 97 | } |
135 | 98 | ||
136 | static void | 99 | static void |
@@ -149,7 +112,7 @@ gk104_fifo_recover_work(struct work_struct *work) | |||
149 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 112 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
150 | 113 | ||
151 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) | 114 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) |
152 | engm |= 1 << gk104_fifo_engidx(fifo, engn); | 115 | engm |= 1 << gk104_fifo_subdev_engine(engn); |
153 | nvkm_mask(device, 0x002630, engm, engm); | 116 | nvkm_mask(device, 0x002630, engm, engm); |
154 | 117 | ||
155 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { | 118 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { |
@@ -157,7 +120,7 @@ gk104_fifo_recover_work(struct work_struct *work) | |||
157 | nvkm_subdev_fini(&engine->subdev, false); | 120 | nvkm_subdev_fini(&engine->subdev, false); |
158 | WARN_ON(nvkm_subdev_init(&engine->subdev)); | 121 | WARN_ON(nvkm_subdev_init(&engine->subdev)); |
159 | } | 122 | } |
160 | gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); | 123 | gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn)); |
161 | } | 124 | } |
162 | 125 | ||
163 | nvkm_wr32(device, 0x00262c, engm); | 126 | nvkm_wr32(device, 0x00262c, engm); |
@@ -177,7 +140,8 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, | |||
177 | assert_spin_locked(&fifo->base.lock); | 140 | assert_spin_locked(&fifo->base.lock); |
178 | 141 | ||
179 | nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); | 142 | nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); |
180 | chan->state = KILLED; | 143 | list_del_init(&chan->head); |
144 | chan->killed = true; | ||
181 | 145 | ||
182 | fifo->mask |= 1ULL << nv_engidx(engine); | 146 | fifo->mask |= 1ULL << nv_engidx(engine); |
183 | schedule_work(&fifo->fault); | 147 | schedule_work(&fifo->fault); |
@@ -223,7 +187,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) | |||
223 | u32 engn; | 187 | u32 engn; |
224 | 188 | ||
225 | spin_lock_irqsave(&fifo->base.lock, flags); | 189 | spin_lock_irqsave(&fifo->base.lock, flags); |
226 | for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { | 190 | for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) { |
227 | u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); | 191 | u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); |
228 | u32 busy = (stat & 0x80000000); | 192 | u32 busy = (stat & 0x80000000); |
229 | u32 next = (stat & 0x07ff0000) >> 16; | 193 | u32 next = (stat & 0x07ff0000) >> 16; |
@@ -235,11 +199,15 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) | |||
235 | (void)save; | 199 | (void)save; |
236 | 200 | ||
237 | if (busy && chsw) { | 201 | if (busy && chsw) { |
238 | if (!(chan = (void *)fifo->base.channel[chid])) | 202 | list_for_each_entry(chan, &fifo->engine[engn].chan, head) { |
239 | continue; | 203 | if (chan->base.chid == chid) { |
240 | if (!(engine = gk104_fifo_engine(fifo, engn))) | 204 | engine = gk104_fifo_engine(fifo, engn); |
241 | continue; | 205 | if (!engine) |
242 | gk104_fifo_recover(fifo, engine, chan); | 206 | break; |
207 | gk104_fifo_recover(fifo, engine, chan); | ||
208 | break; | ||
209 | } | ||
210 | } | ||
243 | } | 211 | } |
244 | } | 212 | } |
245 | spin_unlock_irqrestore(&fifo->base.lock, flags); | 213 | spin_unlock_irqrestore(&fifo->base.lock, flags); |
@@ -444,7 +412,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) | |||
444 | write ? "write" : "read", (u64)vahi << 32 | valo, | 412 | write ? "write" : "read", (u64)vahi << 32 | valo, |
445 | unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", | 413 | unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", |
446 | reason, er ? er->name : "", chan ? chan->chid : -1, | 414 | reason, er ? er->name : "", chan ? chan->chid : -1, |
447 | (u64)inst << 12, nvkm_client_name(chan)); | 415 | (u64)inst << 12, |
416 | chan ? chan->object.client->name : "unknown"); | ||
448 | 417 | ||
449 | if (engine && chan) | 418 | if (engine && chan) |
450 | gk104_fifo_recover(fifo, engine, (void *)chan); | 419 | gk104_fifo_recover(fifo, engine, (void *)chan); |
@@ -498,6 +467,8 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) | |||
498 | u32 subc = (addr & 0x00070000) >> 16; | 467 | u32 subc = (addr & 0x00070000) >> 16; |
499 | u32 mthd = (addr & 0x00003ffc); | 468 | u32 mthd = (addr & 0x00003ffc); |
500 | u32 show = stat; | 469 | u32 show = stat; |
470 | struct nvkm_fifo_chan *chan; | ||
471 | unsigned long flags; | ||
501 | char msg[128]; | 472 | char msg[128]; |
502 | 473 | ||
503 | if (stat & 0x00800000) { | 474 | if (stat & 0x00800000) { |
@@ -510,11 +481,13 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) | |||
510 | 481 | ||
511 | if (show) { | 482 | if (show) { |
512 | nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); | 483 | nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); |
513 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " | 484 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); |
514 | "mthd %04x data %08x\n", | 485 | nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " |
515 | unit, show, msg, chid, | 486 | "subc %d mthd %04x data %08x\n", |
516 | nvkm_client_name_for_fifo_chid(&fifo->base, chid), | 487 | unit, show, msg, chid, chan ? chan->inst->addr : 0, |
488 | chan ? chan->object.client->name : "unknown", | ||
517 | subc, mthd, data); | 489 | subc, mthd, data); |
490 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | ||
518 | } | 491 | } |
519 | 492 | ||
520 | nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); | 493 | nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); |
@@ -722,7 +695,7 @@ gk104_fifo_dtor(struct nvkm_object *object) | |||
722 | nvkm_vm_put(&fifo->user.bar); | 695 | nvkm_vm_put(&fifo->user.bar); |
723 | nvkm_memory_del(&fifo->user.mem); | 696 | nvkm_memory_del(&fifo->user.mem); |
724 | 697 | ||
725 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | 698 | for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { |
726 | nvkm_memory_del(&fifo->engine[i].runlist[1]); | 699 | nvkm_memory_del(&fifo->engine[i].runlist[1]); |
727 | nvkm_memory_del(&fifo->engine[i].runlist[0]); | 700 | nvkm_memory_del(&fifo->engine[i].runlist[0]); |
728 | } | 701 | } |
@@ -730,6 +703,14 @@ gk104_fifo_dtor(struct nvkm_object *object) | |||
730 | nvkm_fifo_destroy(&fifo->base); | 703 | nvkm_fifo_destroy(&fifo->base); |
731 | } | 704 | } |
732 | 705 | ||
706 | static const struct nvkm_fifo_func | ||
707 | gk104_fifo_func = { | ||
708 | .chan = { | ||
709 | &gk104_fifo_gpfifo_oclass, | ||
710 | NULL | ||
711 | }, | ||
712 | }; | ||
713 | |||
733 | int | 714 | int |
734 | gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 715 | gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
735 | struct nvkm_oclass *oclass, void *data, u32 size, | 716 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -747,9 +728,11 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
747 | if (ret) | 728 | if (ret) |
748 | return ret; | 729 | return ret; |
749 | 730 | ||
731 | fifo->base.func = &gk104_fifo_func; | ||
732 | |||
750 | INIT_WORK(&fifo->fault, gk104_fifo_recover_work); | 733 | INIT_WORK(&fifo->fault, gk104_fifo_recover_work); |
751 | 734 | ||
752 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | 735 | for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) { |
753 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, | 736 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, |
754 | 0x8000, 0x1000, false, | 737 | 0x8000, 0x1000, false, |
755 | &fifo->engine[i].runlist[0]); | 738 | &fifo->engine[i].runlist[0]); |
@@ -763,6 +746,7 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
763 | return ret; | 746 | return ret; |
764 | 747 | ||
765 | init_waitqueue_head(&fifo->engine[i].wait); | 748 | init_waitqueue_head(&fifo->engine[i].wait); |
749 | INIT_LIST_HEAD(&fifo->engine[i].chan); | ||
766 | } | 750 | } |
767 | 751 | ||
768 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, | 752 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, |
@@ -783,8 +767,6 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
783 | 767 | ||
784 | nv_subdev(fifo)->unit = 0x00000100; | 768 | nv_subdev(fifo)->unit = 0x00000100; |
785 | nv_subdev(fifo)->intr = gk104_fifo_intr; | 769 | nv_subdev(fifo)->intr = gk104_fifo_intr; |
786 | nv_engine(fifo)->cclass = &gk104_fifo_cclass; | ||
787 | nv_engine(fifo)->sclass = gk104_fifo_sclass; | ||
788 | return 0; | 770 | return 0; |
789 | } | 771 | } |
790 | 772 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h index b71abef84349..1103e6b1ee5a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h | |||
@@ -1,11 +1,15 @@ | |||
1 | #ifndef __GK104_FIFO_H__ | 1 | #ifndef __GK104_FIFO_H__ |
2 | #define __GK104_FIFO_H__ | 2 | #define __GK104_FIFO_H__ |
3 | #define gk104_fifo(p) container_of((p), struct gk104_fifo, base) | ||
3 | #include "priv.h" | 4 | #include "priv.h" |
4 | 5 | ||
6 | #include <subdev/mmu.h> | ||
7 | |||
5 | struct gk104_fifo_engn { | 8 | struct gk104_fifo_engn { |
6 | struct nvkm_memory *runlist[2]; | 9 | struct nvkm_memory *runlist[2]; |
7 | int cur_runlist; | 10 | int cur_runlist; |
8 | wait_queue_head_t wait; | 11 | wait_queue_head_t wait; |
12 | struct list_head chan; | ||
9 | }; | 13 | }; |
10 | 14 | ||
11 | struct gk104_fifo { | 15 | struct gk104_fifo { |
@@ -38,4 +42,42 @@ void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine); | |||
38 | int gm204_fifo_ctor(struct nvkm_object *, struct nvkm_object *, | 42 | int gm204_fifo_ctor(struct nvkm_object *, struct nvkm_object *, |
39 | struct nvkm_oclass *, void *, u32, | 43 | struct nvkm_oclass *, void *, u32, |
40 | struct nvkm_object **); | 44 | struct nvkm_object **); |
45 | |||
46 | static inline u64 | ||
47 | gk104_fifo_engine_subdev(int engine) | ||
48 | { | ||
49 | switch (engine) { | ||
50 | case 0: return (1ULL << NVDEV_ENGINE_GR) | | ||
51 | (1ULL << NVDEV_ENGINE_SW) | | ||
52 | (1ULL << NVDEV_ENGINE_CE2); | ||
53 | case 1: return (1ULL << NVDEV_ENGINE_MSPDEC); | ||
54 | case 2: return (1ULL << NVDEV_ENGINE_MSPPP); | ||
55 | case 3: return (1ULL << NVDEV_ENGINE_MSVLD); | ||
56 | case 4: return (1ULL << NVDEV_ENGINE_CE0); | ||
57 | case 5: return (1ULL << NVDEV_ENGINE_CE1); | ||
58 | case 6: return (1ULL << NVDEV_ENGINE_MSENC); | ||
59 | default: | ||
60 | WARN_ON(1); | ||
61 | return 0; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | static inline int | ||
66 | gk104_fifo_subdev_engine(int subdev) | ||
67 | { | ||
68 | switch (subdev) { | ||
69 | case NVDEV_ENGINE_GR: | ||
70 | case NVDEV_ENGINE_SW: | ||
71 | case NVDEV_ENGINE_CE2 : return 0; | ||
72 | case NVDEV_ENGINE_MSPDEC: return 1; | ||
73 | case NVDEV_ENGINE_MSPPP : return 2; | ||
74 | case NVDEV_ENGINE_MSVLD : return 3; | ||
75 | case NVDEV_ENGINE_CE0 : return 4; | ||
76 | case NVDEV_ENGINE_CE1 : return 5; | ||
77 | case NVDEV_ENGINE_MSENC : return 6; | ||
78 | default: | ||
79 | WARN_ON(1); | ||
80 | return 0; | ||
81 | } | ||
82 | } | ||
41 | #endif | 83 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c index 2367b4f81a91..18c68ac741a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c | |||
@@ -24,6 +24,14 @@ | |||
24 | #include "gk104.h" | 24 | #include "gk104.h" |
25 | #include "changk104.h" | 25 | #include "changk104.h" |
26 | 26 | ||
27 | static const struct nvkm_fifo_func | ||
28 | gm204_fifo_func = { | ||
29 | .chan = { | ||
30 | &gm204_fifo_gpfifo_oclass, | ||
31 | NULL | ||
32 | }, | ||
33 | }; | ||
34 | |||
27 | int | 35 | int |
28 | gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 36 | gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
29 | struct nvkm_oclass *oclass, void *data, u32 size, | 37 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -32,7 +40,7 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
32 | int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject); | 40 | int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject); |
33 | if (ret == 0) { | 41 | if (ret == 0) { |
34 | struct gk104_fifo *fifo = (void *)*pobject; | 42 | struct gk104_fifo *fifo = (void *)*pobject; |
35 | nv_engine(fifo)->sclass = gm204_fifo_sclass; | 43 | fifo->base.func = &gm204_fifo_func; |
36 | } | 44 | } |
37 | return ret; | 45 | return ret; |
38 | } | 46 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c index fd11e0afec25..820132363f68 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c | |||
@@ -30,15 +30,14 @@ | |||
30 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
31 | 31 | ||
32 | static int | 32 | static int |
33 | g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, | 33 | g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
34 | struct nvkm_oclass *oclass, void *data, u32 size, | 34 | void *data, u32 size, struct nvkm_object **pobject) |
35 | struct nvkm_object **pobject) | ||
36 | { | 35 | { |
36 | struct nvkm_object *parent = oclass->parent; | ||
37 | union { | 37 | union { |
38 | struct nv50_channel_gpfifo_v0 v0; | 38 | struct nv50_channel_gpfifo_v0 v0; |
39 | } *args = data; | 39 | } *args = data; |
40 | struct nvkm_device *device = parent->engine->subdev.device; | 40 | struct nv50_fifo *fifo = nv50_fifo(base); |
41 | struct nv50_fifo_base *base = (void *)parent; | ||
42 | struct nv50_fifo_chan *chan; | 41 | struct nv50_fifo_chan *chan; |
43 | u64 ioffset, ilength; | 42 | u64 ioffset, ilength; |
44 | int ret; | 43 | int ret; |
@@ -50,73 +49,46 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, | |||
50 | "ilength %08x\n", | 49 | "ilength %08x\n", |
51 | args->v0.version, args->v0.vm, args->v0.pushbuf, | 50 | args->v0.version, args->v0.vm, args->v0.pushbuf, |
52 | args->v0.ioffset, args->v0.ilength); | 51 | args->v0.ioffset, args->v0.ilength); |
53 | if (args->v0.vm) | 52 | if (!args->v0.pushbuf) |
54 | return -ENOENT; | 53 | return -EINVAL; |
55 | } else | 54 | } else |
56 | return ret; | 55 | return ret; |
57 | 56 | ||
58 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 57 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
59 | 0x2000, args->v0.pushbuf, | 58 | return -ENOMEM; |
60 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 59 | *pobject = &chan->base.object; |
61 | (1ULL << NVDEV_ENGINE_SW) | | ||
62 | (1ULL << NVDEV_ENGINE_GR) | | ||
63 | (1ULL << NVDEV_ENGINE_MPEG) | | ||
64 | (1ULL << NVDEV_ENGINE_ME) | | ||
65 | (1ULL << NVDEV_ENGINE_VP) | | ||
66 | (1ULL << NVDEV_ENGINE_CIPHER) | | ||
67 | (1ULL << NVDEV_ENGINE_SEC) | | ||
68 | (1ULL << NVDEV_ENGINE_BSP) | | ||
69 | (1ULL << NVDEV_ENGINE_MSVLD) | | ||
70 | (1ULL << NVDEV_ENGINE_MSPDEC) | | ||
71 | (1ULL << NVDEV_ENGINE_MSPPP) | | ||
72 | (1ULL << NVDEV_ENGINE_CE0) | | ||
73 | (1ULL << NVDEV_ENGINE_VIC), &chan); | ||
74 | *pobject = nv_object(chan); | ||
75 | if (ret) | ||
76 | return ret; | ||
77 | |||
78 | chan->base.inst = base->base.gpuobj.addr; | ||
79 | args->v0.chid = chan->base.chid; | ||
80 | 60 | ||
81 | ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, | 61 | ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf, |
82 | &chan->ramht); | 62 | oclass, chan); |
83 | if (ret) | 63 | if (ret) |
84 | return ret; | 64 | return ret; |
85 | 65 | ||
86 | nv_parent(chan)->context_attach = g84_fifo_context_attach; | 66 | args->v0.chid = chan->base.chid; |
87 | nv_parent(chan)->context_detach = g84_fifo_context_detach; | ||
88 | nv_parent(chan)->object_attach = g84_fifo_object_attach; | ||
89 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | ||
90 | |||
91 | ioffset = args->v0.ioffset; | 67 | ioffset = args->v0.ioffset; |
92 | ilength = order_base_2(args->v0.ilength / 8); | 68 | ilength = order_base_2(args->v0.ilength / 8); |
93 | 69 | ||
94 | nvkm_kmap(base->ramfc); | 70 | nvkm_kmap(chan->ramfc); |
95 | nvkm_wo32(base->ramfc, 0x3c, 0x403f6078); | 71 | nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078); |
96 | nvkm_wo32(base->ramfc, 0x44, 0x01003fff); | 72 | nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); |
97 | nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | 73 | nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); |
98 | nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); | 74 | nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset)); |
99 | nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); | 75 | nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); |
100 | nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); | 76 | nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); |
101 | nvkm_wo32(base->ramfc, 0x78, 0x00000000); | 77 | nvkm_wo32(chan->ramfc, 0x78, 0x00000000); |
102 | nvkm_wo32(base->ramfc, 0x7c, 0x30000001); | 78 | nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); |
103 | nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | 79 | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | |
104 | (4 << 24) /* SEARCH_FULL */ | | 80 | (4 << 24) /* SEARCH_FULL */ | |
105 | (chan->ramht->gpuobj->node->offset >> 4)); | 81 | (chan->ramht->gpuobj->node->offset >> 4)); |
106 | nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10); | 82 | nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); |
107 | nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); | 83 | nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12); |
108 | nvkm_done(base->ramfc); | 84 | nvkm_done(chan->ramfc); |
109 | return 0; | 85 | return 0; |
110 | } | 86 | } |
111 | 87 | ||
112 | struct nvkm_ofuncs | 88 | const struct nvkm_fifo_chan_oclass |
113 | g84_fifo_ofuncs_ind = { | 89 | g84_fifo_gpfifo_oclass = { |
114 | .ctor = g84_fifo_chan_ctor_ind, | 90 | .base.oclass = G82_CHANNEL_GPFIFO, |
115 | .dtor = nv50_fifo_chan_dtor, | 91 | .base.minver = 0, |
116 | .init = g84_fifo_chan_init, | 92 | .base.maxver = 0, |
117 | .fini = nv50_fifo_chan_fini, | 93 | .ctor = g84_fifo_gpfifo_new, |
118 | .map = _nvkm_fifo_channel_map, | ||
119 | .rd32 = _nvkm_fifo_channel_rd32, | ||
120 | .wr32 = _nvkm_fifo_channel_wr32, | ||
121 | .ntfy = _nvkm_fifo_channel_ntfy | ||
122 | }; | 94 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c index 7fd6401ca905..eb9195a6f375 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c | |||
@@ -30,29 +30,33 @@ | |||
30 | #include <nvif/class.h> | 30 | #include <nvif/class.h> |
31 | #include <nvif/unpack.h> | 31 | #include <nvif/unpack.h> |
32 | 32 | ||
33 | static int | 33 | static u32 |
34 | gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend, | 34 | gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) |
35 | struct nvkm_object *object) | ||
36 | { | 35 | { |
37 | struct gf100_fifo *fifo = (void *)parent->engine; | 36 | switch (engine->subdev.index) { |
38 | struct gf100_fifo_base *base = (void *)parent->parent; | ||
39 | struct gf100_fifo_chan *chan = (void *)parent; | ||
40 | struct nvkm_gpuobj *engn = &base->base.gpuobj; | ||
41 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | ||
42 | struct nvkm_device *device = subdev->device; | ||
43 | u32 addr; | ||
44 | |||
45 | switch (nv_engidx(object->engine)) { | ||
46 | case NVDEV_ENGINE_SW : return 0; | 37 | case NVDEV_ENGINE_SW : return 0; |
47 | case NVDEV_ENGINE_GR : addr = 0x0210; break; | 38 | case NVDEV_ENGINE_GR : return 0x0210; |
48 | case NVDEV_ENGINE_CE0 : addr = 0x0230; break; | 39 | case NVDEV_ENGINE_CE0 : return 0x0230; |
49 | case NVDEV_ENGINE_CE1 : addr = 0x0240; break; | 40 | case NVDEV_ENGINE_CE1 : return 0x0240; |
50 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | 41 | case NVDEV_ENGINE_MSPDEC: return 0x0250; |
51 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | 42 | case NVDEV_ENGINE_MSPPP : return 0x0260; |
52 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | 43 | case NVDEV_ENGINE_MSVLD : return 0x0270; |
53 | default: | 44 | default: |
54 | return -EINVAL; | 45 | WARN_ON(1); |
46 | return 0; | ||
55 | } | 47 | } |
48 | } | ||
49 | |||
50 | static int | ||
51 | gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, | ||
52 | struct nvkm_engine *engine, bool suspend) | ||
53 | { | ||
54 | const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); | ||
55 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); | ||
56 | struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev; | ||
57 | struct nvkm_device *device = subdev->device; | ||
58 | struct nvkm_gpuobj *inst = chan->base.inst; | ||
59 | int ret = 0; | ||
56 | 60 | ||
57 | nvkm_wr32(device, 0x002634, chan->base.chid); | 61 | nvkm_wr32(device, 0x002634, chan->base.chid); |
58 | if (nvkm_msec(device, 2000, | 62 | if (nvkm_msec(device, 2000, |
@@ -60,143 +64,197 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend, | |||
60 | break; | 64 | break; |
61 | ) < 0) { | 65 | ) < 0) { |
62 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", | 66 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", |
63 | chan->base.chid, nvkm_client_name(chan)); | 67 | chan->base.chid, chan->base.object.client->name); |
68 | ret = -EBUSY; | ||
64 | if (suspend) | 69 | if (suspend) |
65 | return -EBUSY; | 70 | return ret; |
71 | } | ||
72 | |||
73 | if (offset) { | ||
74 | nvkm_kmap(inst); | ||
75 | nvkm_wo32(inst, offset + 0x00, 0x00000000); | ||
76 | nvkm_wo32(inst, offset + 0x04, 0x00000000); | ||
77 | nvkm_done(inst); | ||
78 | } | ||
79 | |||
80 | return ret; | ||
81 | } | ||
82 | |||
83 | static int | ||
84 | gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, | ||
85 | struct nvkm_engine *engine) | ||
86 | { | ||
87 | const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); | ||
88 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); | ||
89 | struct nvkm_gpuobj *inst = chan->base.inst; | ||
90 | |||
91 | if (offset) { | ||
92 | u64 addr = chan->engn[engine->subdev.index].vma.offset; | ||
93 | nvkm_kmap(inst); | ||
94 | nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4); | ||
95 | nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr)); | ||
96 | nvkm_done(inst); | ||
66 | } | 97 | } |
67 | 98 | ||
68 | nvkm_kmap(engn); | ||
69 | nvkm_wo32(engn, addr + 0x00, 0x00000000); | ||
70 | nvkm_wo32(engn, addr + 0x04, 0x00000000); | ||
71 | nvkm_done(engn); | ||
72 | return 0; | 99 | return 0; |
73 | } | 100 | } |
74 | 101 | ||
102 | static void | ||
103 | gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, | ||
104 | struct nvkm_engine *engine) | ||
105 | { | ||
106 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); | ||
107 | nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); | ||
108 | nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); | ||
109 | } | ||
110 | |||
75 | static int | 111 | static int |
76 | gf100_fifo_context_attach(struct nvkm_object *parent, | 112 | gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, |
77 | struct nvkm_object *object) | 113 | struct nvkm_engine *engine, |
114 | struct nvkm_object *object) | ||
78 | { | 115 | { |
79 | struct gf100_fifo_base *base = (void *)parent->parent; | 116 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); |
80 | struct nvkm_gpuobj *engn = &base->base.gpuobj; | 117 | int engn = engine->subdev.index; |
81 | struct nvkm_engctx *ectx = (void *)object; | ||
82 | u32 addr; | ||
83 | int ret; | 118 | int ret; |
84 | 119 | ||
85 | switch (nv_engidx(object->engine)) { | 120 | if (!gf100_fifo_gpfifo_engine_addr(engine)) |
86 | case NVDEV_ENGINE_SW : return 0; | 121 | return 0; |
87 | case NVDEV_ENGINE_GR : addr = 0x0210; break; | ||
88 | case NVDEV_ENGINE_CE0 : addr = 0x0230; break; | ||
89 | case NVDEV_ENGINE_CE1 : addr = 0x0240; break; | ||
90 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | ||
91 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | ||
92 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | ||
93 | default: | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | |||
97 | if (!ectx->vma.node) { | ||
98 | ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm, | ||
99 | NV_MEM_ACCESS_RW, &ectx->vma); | ||
100 | if (ret) | ||
101 | return ret; | ||
102 | 122 | ||
103 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | 123 | if (object->oclass) { |
124 | return nvkm_gpuobj_map(nv_gpuobj(object), chan->vm, | ||
125 | NV_MEM_ACCESS_RW, | ||
126 | &chan->engn[engn].vma); | ||
104 | } | 127 | } |
105 | 128 | ||
106 | nvkm_kmap(engn); | 129 | ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); |
107 | nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); | 130 | if (ret) |
108 | nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset)); | 131 | return ret; |
109 | nvkm_done(engn); | 132 | |
110 | return 0; | 133 | return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm, |
134 | NV_MEM_ACCESS_RW, &chan->engn[engn].vma); | ||
111 | } | 135 | } |
112 | 136 | ||
113 | static int | 137 | static void |
114 | gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend) | 138 | gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base) |
115 | { | 139 | { |
116 | struct gf100_fifo *fifo = (void *)object->engine; | 140 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); |
117 | struct gf100_fifo_chan *chan = (void *)object; | 141 | struct gf100_fifo *fifo = chan->fifo; |
118 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 142 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
119 | u32 chid = chan->base.chid; | 143 | u32 coff = chan->base.chid * 8; |
120 | 144 | ||
121 | if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { | 145 | if (!list_empty(&chan->head) && !chan->killed) { |
122 | nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000); | 146 | list_del_init(&chan->head); |
147 | nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000); | ||
123 | gf100_fifo_runlist_update(fifo); | 148 | gf100_fifo_runlist_update(fifo); |
124 | } | 149 | } |
125 | 150 | ||
126 | gf100_fifo_intr_engine(fifo); | 151 | gf100_fifo_intr_engine(fifo); |
127 | 152 | ||
128 | nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000); | 153 | nvkm_wr32(device, 0x003000 + coff, 0x00000000); |
129 | return nvkm_fifo_channel_fini(&chan->base, suspend); | ||
130 | } | 154 | } |
131 | 155 | ||
132 | static int | 156 | static void |
133 | gf100_fifo_chan_init(struct nvkm_object *object) | 157 | gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base) |
134 | { | 158 | { |
135 | struct nvkm_gpuobj *base = nv_gpuobj(object->parent); | 159 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); |
136 | struct gf100_fifo *fifo = (void *)object->engine; | 160 | struct gf100_fifo *fifo = chan->fifo; |
137 | struct gf100_fifo_chan *chan = (void *)object; | ||
138 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 161 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
139 | u32 chid = chan->base.chid; | 162 | u32 addr = chan->base.inst->addr >> 12; |
140 | int ret; | 163 | u32 coff = chan->base.chid * 8; |
141 | |||
142 | ret = nvkm_fifo_channel_init(&chan->base); | ||
143 | if (ret) | ||
144 | return ret; | ||
145 | 164 | ||
146 | nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); | 165 | nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr); |
147 | 166 | ||
148 | if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { | 167 | if (list_empty(&chan->head) && !chan->killed) { |
149 | nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001); | 168 | list_add_tail(&chan->head, &fifo->chan); |
169 | nvkm_wr32(device, 0x003004 + coff, 0x001f0001); | ||
150 | gf100_fifo_runlist_update(fifo); | 170 | gf100_fifo_runlist_update(fifo); |
151 | } | 171 | } |
172 | } | ||
152 | 173 | ||
153 | return 0; | 174 | static void * |
175 | gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) | ||
176 | { | ||
177 | struct gf100_fifo_chan *chan = gf100_fifo_chan(base); | ||
178 | nvkm_vm_ref(NULL, &chan->vm, chan->pgd); | ||
179 | nvkm_gpuobj_del(&chan->pgd); | ||
180 | return chan; | ||
154 | } | 181 | } |
155 | 182 | ||
183 | static const struct nvkm_fifo_chan_func | ||
184 | gf100_fifo_gpfifo_func = { | ||
185 | .dtor = gf100_fifo_gpfifo_dtor, | ||
186 | .init = gf100_fifo_gpfifo_init, | ||
187 | .fini = gf100_fifo_gpfifo_fini, | ||
188 | .ntfy = g84_fifo_chan_ntfy, | ||
189 | .engine_ctor = gf100_fifo_gpfifo_engine_ctor, | ||
190 | .engine_dtor = gf100_fifo_gpfifo_engine_dtor, | ||
191 | .engine_init = gf100_fifo_gpfifo_engine_init, | ||
192 | .engine_fini = gf100_fifo_gpfifo_engine_fini, | ||
193 | }; | ||
194 | |||
156 | static int | 195 | static int |
157 | gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 196 | gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
158 | struct nvkm_oclass *oclass, void *data, u32 size, | 197 | void *data, u32 size, struct nvkm_object **pobject) |
159 | struct nvkm_object **pobject) | ||
160 | { | 198 | { |
161 | union { | 199 | union { |
162 | struct fermi_channel_gpfifo_v0 v0; | 200 | struct fermi_channel_gpfifo_v0 v0; |
163 | } *args = data; | 201 | } *args = data; |
164 | struct gf100_fifo *fifo = (void *)engine; | 202 | struct gf100_fifo *fifo = gf100_fifo(base); |
165 | struct gf100_fifo_base *base = (void *)parent; | 203 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
204 | struct nvkm_object *parent = oclass->parent; | ||
166 | struct gf100_fifo_chan *chan; | 205 | struct gf100_fifo_chan *chan; |
167 | struct nvkm_gpuobj *ramfc = &base->base.gpuobj; | ||
168 | u64 usermem, ioffset, ilength; | 206 | u64 usermem, ioffset, ilength; |
169 | int ret, i; | 207 | int ret, i; |
170 | 208 | ||
171 | nvif_ioctl(parent, "create channel gpfifo size %d\n", size); | 209 | nvif_ioctl(parent, "create channel gpfifo size %d\n", size); |
172 | if (nvif_unpack(args->v0, 0, 0, false)) { | 210 | if (nvif_unpack(args->v0, 0, 0, false)) { |
173 | nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx" | 211 | nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " |
174 | "ioffset %016llx ilength %08x\n", | 212 | "ioffset %016llx ilength %08x\n", |
175 | args->v0.version, args->v0.vm, args->v0.ioffset, | 213 | args->v0.version, args->v0.vm, args->v0.ioffset, |
176 | args->v0.ilength); | 214 | args->v0.ilength); |
177 | if (args->v0.vm) | ||
178 | return -ENOENT; | ||
179 | } else | 215 | } else |
180 | return ret; | 216 | return ret; |
181 | 217 | ||
182 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, | 218 | /* allocate channel */ |
183 | fifo->user.bar.offset, 0x1000, 0, | 219 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
184 | (1ULL << NVDEV_ENGINE_SW) | | 220 | return -ENOMEM; |
185 | (1ULL << NVDEV_ENGINE_GR) | | 221 | *pobject = &chan->base.object; |
186 | (1ULL << NVDEV_ENGINE_CE0) | | 222 | chan->fifo = fifo; |
187 | (1ULL << NVDEV_ENGINE_CE1) | | 223 | INIT_LIST_HEAD(&chan->head); |
188 | (1ULL << NVDEV_ENGINE_MSVLD) | | 224 | |
189 | (1ULL << NVDEV_ENGINE_MSPDEC) | | 225 | ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base, |
190 | (1ULL << NVDEV_ENGINE_MSPPP), &chan); | 226 | 0x1000, 0x1000, true, args->v0.vm, 0, |
191 | *pobject = nv_object(chan); | 227 | (1ULL << NVDEV_ENGINE_CE0) | |
228 | (1ULL << NVDEV_ENGINE_CE1) | | ||
229 | (1ULL << NVDEV_ENGINE_GR) | | ||
230 | (1ULL << NVDEV_ENGINE_MSPDEC) | | ||
231 | (1ULL << NVDEV_ENGINE_MSPPP) | | ||
232 | (1ULL << NVDEV_ENGINE_MSVLD) | | ||
233 | (1ULL << NVDEV_ENGINE_SW), | ||
234 | 1, fifo->user.bar.offset, 0x1000, | ||
235 | oclass, &chan->base); | ||
192 | if (ret) | 236 | if (ret) |
193 | return ret; | 237 | return ret; |
194 | 238 | ||
195 | chan->base.inst = base->base.gpuobj.addr; | ||
196 | args->v0.chid = chan->base.chid; | 239 | args->v0.chid = chan->base.chid; |
197 | 240 | ||
198 | nv_parent(chan)->context_attach = gf100_fifo_context_attach; | 241 | /* page directory */ |
199 | nv_parent(chan)->context_detach = gf100_fifo_context_detach; | 242 | ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd); |
243 | if (ret) | ||
244 | return ret; | ||
245 | |||
246 | nvkm_kmap(chan->base.inst); | ||
247 | nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr)); | ||
248 | nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr)); | ||
249 | nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff); | ||
250 | nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff); | ||
251 | nvkm_done(chan->base.inst); | ||
252 | |||
253 | ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); | ||
254 | if (ret) | ||
255 | return ret; | ||
256 | |||
257 | /* clear channel control registers */ | ||
200 | 258 | ||
201 | usermem = chan->base.chid * 0x1000; | 259 | usermem = chan->base.chid * 0x1000; |
202 | ioffset = args->v0.ioffset; | 260 | ioffset = args->v0.ioffset; |
@@ -208,97 +266,33 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
208 | nvkm_done(fifo->user.mem); | 266 | nvkm_done(fifo->user.mem); |
209 | usermem = nvkm_memory_addr(fifo->user.mem) + usermem; | 267 | usermem = nvkm_memory_addr(fifo->user.mem) + usermem; |
210 | 268 | ||
211 | nvkm_kmap(ramfc); | 269 | /* RAMFC */ |
212 | nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem)); | 270 | nvkm_kmap(chan->base.inst); |
213 | nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem)); | 271 | nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem)); |
214 | nvkm_wo32(ramfc, 0x10, 0x0000face); | 272 | nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem)); |
215 | nvkm_wo32(ramfc, 0x30, 0xfffff902); | 273 | nvkm_wo32(chan->base.inst, 0x10, 0x0000face); |
216 | nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); | 274 | nvkm_wo32(chan->base.inst, 0x30, 0xfffff902); |
217 | nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); | 275 | nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset)); |
218 | nvkm_wo32(ramfc, 0x54, 0x00000002); | 276 | nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) | |
219 | nvkm_wo32(ramfc, 0x84, 0x20400000); | 277 | (ilength << 16)); |
220 | nvkm_wo32(ramfc, 0x94, 0x30000001); | 278 | nvkm_wo32(chan->base.inst, 0x54, 0x00000002); |
221 | nvkm_wo32(ramfc, 0x9c, 0x00000100); | 279 | nvkm_wo32(chan->base.inst, 0x84, 0x20400000); |
222 | nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f); | 280 | nvkm_wo32(chan->base.inst, 0x94, 0x30000001); |
223 | nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f); | 281 | nvkm_wo32(chan->base.inst, 0x9c, 0x00000100); |
224 | nvkm_wo32(ramfc, 0xac, 0x0000001f); | 282 | nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f); |
225 | nvkm_wo32(ramfc, 0xb8, 0xf8000000); | 283 | nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f); |
226 | nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */ | 284 | nvkm_wo32(chan->base.inst, 0xac, 0x0000001f); |
227 | nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */ | 285 | nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000); |
228 | nvkm_done(ramfc); | 286 | nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */ |
229 | return 0; | 287 | nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */ |
230 | } | 288 | nvkm_done(chan->base.inst); |
231 | |||
232 | static struct nvkm_ofuncs | ||
233 | gf100_fifo_ofuncs = { | ||
234 | .ctor = gf100_fifo_chan_ctor, | ||
235 | .dtor = _nvkm_fifo_channel_dtor, | ||
236 | .init = gf100_fifo_chan_init, | ||
237 | .fini = gf100_fifo_chan_fini, | ||
238 | .map = _nvkm_fifo_channel_map, | ||
239 | .rd32 = _nvkm_fifo_channel_rd32, | ||
240 | .wr32 = _nvkm_fifo_channel_wr32, | ||
241 | .ntfy = _nvkm_fifo_channel_ntfy | ||
242 | }; | ||
243 | |||
244 | struct nvkm_oclass | ||
245 | gf100_fifo_sclass[] = { | ||
246 | { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs }, | ||
247 | {} | ||
248 | }; | ||
249 | |||
250 | static int | ||
251 | gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | ||
252 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
253 | struct nvkm_object **pobject) | ||
254 | { | ||
255 | struct nvkm_device *device = nv_engine(engine)->subdev.device; | ||
256 | struct gf100_fifo_base *base; | ||
257 | int ret; | ||
258 | |||
259 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, | ||
260 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC | | ||
261 | NVOBJ_FLAG_HEAP, &base); | ||
262 | *pobject = nv_object(base); | ||
263 | if (ret) | ||
264 | return ret; | ||
265 | |||
266 | ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd); | ||
267 | if (ret) | ||
268 | return ret; | ||
269 | |||
270 | nvkm_kmap(&base->base.gpuobj); | ||
271 | nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr)); | ||
272 | nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr)); | ||
273 | nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff); | ||
274 | nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff); | ||
275 | nvkm_done(&base->base.gpuobj); | ||
276 | |||
277 | ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); | ||
278 | if (ret) | ||
279 | return ret; | ||
280 | |||
281 | return 0; | 289 | return 0; |
282 | } | 290 | } |
283 | 291 | ||
284 | static void | 292 | const struct nvkm_fifo_chan_oclass |
285 | gf100_fifo_context_dtor(struct nvkm_object *object) | 293 | gf100_fifo_gpfifo_oclass = { |
286 | { | 294 | .base.oclass = FERMI_CHANNEL_GPFIFO, |
287 | struct gf100_fifo_base *base = (void *)object; | 295 | .base.minver = 0, |
288 | nvkm_vm_ref(NULL, &base->vm, base->pgd); | 296 | .base.maxver = 0, |
289 | nvkm_gpuobj_del(&base->pgd); | 297 | .ctor = gf100_fifo_gpfifo_new, |
290 | nvkm_fifo_context_destroy(&base->base); | ||
291 | } | ||
292 | |||
293 | struct nvkm_oclass | ||
294 | gf100_fifo_cclass = { | ||
295 | .handle = NV_ENGCTX(FIFO, 0xc0), | ||
296 | .ofuncs = &(struct nvkm_ofuncs) { | ||
297 | .ctor = gf100_fifo_context_ctor, | ||
298 | .dtor = gf100_fifo_context_dtor, | ||
299 | .init = _nvkm_fifo_context_init, | ||
300 | .fini = _nvkm_fifo_context_fini, | ||
301 | .rd32 = _nvkm_fifo_context_rd32, | ||
302 | .wr32 = _nvkm_fifo_context_wr32, | ||
303 | }, | ||
304 | }; | 298 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index 264c9705bccc..2595cf92ff80 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c | |||
@@ -31,30 +31,13 @@ | |||
31 | #include <nvif/class.h> | 31 | #include <nvif/class.h> |
32 | #include <nvif/unpack.h> | 32 | #include <nvif/unpack.h> |
33 | 33 | ||
34 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } | ||
35 | static const struct { | ||
36 | u64 subdev; | ||
37 | u64 mask; | ||
38 | } fifo_engine[] = { | ||
39 | _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | | ||
40 | (1ULL << NVDEV_ENGINE_CE2)), | ||
41 | _(NVDEV_ENGINE_MSPDEC , 0), | ||
42 | _(NVDEV_ENGINE_MSPPP , 0), | ||
43 | _(NVDEV_ENGINE_MSVLD , 0), | ||
44 | _(NVDEV_ENGINE_CE0 , 0), | ||
45 | _(NVDEV_ENGINE_CE1 , 0), | ||
46 | _(NVDEV_ENGINE_MSENC , 0), | ||
47 | }; | ||
48 | #undef _ | ||
49 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) | ||
50 | |||
51 | static int | 34 | static int |
52 | gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) | 35 | gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) |
53 | { | 36 | { |
54 | struct nvkm_object *obj = (void *)chan; | 37 | struct gk104_fifo *fifo = chan->fifo; |
55 | struct gk104_fifo *fifo = (void *)obj->engine; | ||
56 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 38 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
57 | struct nvkm_device *device = subdev->device; | 39 | struct nvkm_device *device = subdev->device; |
40 | struct nvkm_client *client = chan->base.object.client; | ||
58 | 41 | ||
59 | nvkm_wr32(device, 0x002634, chan->base.chid); | 42 | nvkm_wr32(device, 0x002634, chan->base.chid); |
60 | if (nvkm_msec(device, 2000, | 43 | if (nvkm_msec(device, 2000, |
@@ -62,198 +45,249 @@ gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) | |||
62 | break; | 45 | break; |
63 | ) < 0) { | 46 | ) < 0) { |
64 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", | 47 | nvkm_error(subdev, "channel %d [%s] kick timeout\n", |
65 | chan->base.chid, nvkm_client_name(chan)); | 48 | chan->base.chid, client->name); |
66 | return -EBUSY; | 49 | return -EBUSY; |
67 | } | 50 | } |
68 | 51 | ||
69 | return 0; | 52 | return 0; |
70 | } | 53 | } |
71 | 54 | ||
72 | static int | 55 | static u32 |
73 | gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, | 56 | gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) |
74 | struct nvkm_object *object) | ||
75 | { | 57 | { |
76 | struct gk104_fifo_base *base = (void *)parent->parent; | 58 | switch (engine->subdev.index) { |
77 | struct gk104_fifo_chan *chan = (void *)parent; | 59 | case NVDEV_ENGINE_SW : |
78 | struct nvkm_gpuobj *engn = &base->base.gpuobj; | ||
79 | u32 addr; | ||
80 | int ret; | ||
81 | |||
82 | switch (nv_engidx(object->engine)) { | ||
83 | case NVDEV_ENGINE_SW : return 0; | ||
84 | case NVDEV_ENGINE_CE0 : | 60 | case NVDEV_ENGINE_CE0 : |
85 | case NVDEV_ENGINE_CE1 : | 61 | case NVDEV_ENGINE_CE1 : |
86 | case NVDEV_ENGINE_CE2 : addr = 0x0000; break; | 62 | case NVDEV_ENGINE_CE2 : return 0x0000; |
87 | case NVDEV_ENGINE_GR : addr = 0x0210; break; | 63 | case NVDEV_ENGINE_GR : return 0x0210; |
88 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | 64 | case NVDEV_ENGINE_MSPDEC: return 0x0250; |
89 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | 65 | case NVDEV_ENGINE_MSPPP : return 0x0260; |
90 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | 66 | case NVDEV_ENGINE_MSVLD : return 0x0270; |
91 | default: | 67 | default: |
92 | return -EINVAL; | 68 | WARN_ON(1); |
69 | return 0; | ||
93 | } | 70 | } |
71 | } | ||
94 | 72 | ||
95 | ret = gk104_fifo_chan_kick(chan); | 73 | static int |
74 | gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, | ||
75 | struct nvkm_engine *engine, bool suspend) | ||
76 | { | ||
77 | const u32 offset = gk104_fifo_gpfifo_engine_addr(engine); | ||
78 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); | ||
79 | struct nvkm_gpuobj *inst = chan->base.inst; | ||
80 | int ret; | ||
81 | |||
82 | ret = gk104_fifo_gpfifo_kick(chan); | ||
96 | if (ret && suspend) | 83 | if (ret && suspend) |
97 | return ret; | 84 | return ret; |
98 | 85 | ||
99 | if (addr) { | 86 | if (offset) { |
100 | nvkm_kmap(engn); | 87 | nvkm_kmap(inst); |
101 | nvkm_wo32(engn, addr + 0x00, 0x00000000); | 88 | nvkm_wo32(inst, offset + 0x00, 0x00000000); |
102 | nvkm_wo32(engn, addr + 0x04, 0x00000000); | 89 | nvkm_wo32(inst, offset + 0x04, 0x00000000); |
103 | nvkm_done(engn); | 90 | nvkm_done(inst); |
91 | } | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static int | ||
97 | gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, | ||
98 | struct nvkm_engine *engine) | ||
99 | { | ||
100 | const u32 offset = gk104_fifo_gpfifo_engine_addr(engine); | ||
101 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); | ||
102 | struct nvkm_gpuobj *inst = chan->base.inst; | ||
103 | |||
104 | if (offset) { | ||
105 | u64 addr = chan->engn[engine->subdev.index].vma.offset; | ||
106 | nvkm_kmap(inst); | ||
107 | nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4); | ||
108 | nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr)); | ||
109 | nvkm_done(inst); | ||
104 | } | 110 | } |
105 | 111 | ||
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
115 | static void | ||
116 | gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, | ||
117 | struct nvkm_engine *engine) | ||
118 | { | ||
119 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); | ||
120 | nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); | ||
121 | nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); | ||
122 | } | ||
123 | |||
109 | static int | 124 | static int |
110 | gk104_fifo_context_attach(struct nvkm_object *parent, | 125 | gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, |
111 | struct nvkm_object *object) | 126 | struct nvkm_engine *engine, |
127 | struct nvkm_object *object) | ||
112 | { | 128 | { |
113 | struct gk104_fifo_base *base = (void *)parent->parent; | 129 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); |
114 | struct nvkm_gpuobj *engn = &base->base.gpuobj; | 130 | int engn = engine->subdev.index; |
115 | struct nvkm_engctx *ectx = (void *)object; | ||
116 | u32 addr; | ||
117 | int ret; | 131 | int ret; |
118 | 132 | ||
119 | switch (nv_engidx(object->engine)) { | 133 | if (!gk104_fifo_gpfifo_engine_addr(engine)) |
120 | case NVDEV_ENGINE_SW : | ||
121 | return 0; | ||
122 | case NVDEV_ENGINE_CE0: | ||
123 | case NVDEV_ENGINE_CE1: | ||
124 | case NVDEV_ENGINE_CE2: | ||
125 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | ||
126 | return 0; | 134 | return 0; |
127 | case NVDEV_ENGINE_GR : addr = 0x0210; break; | ||
128 | case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; | ||
129 | case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; | ||
130 | case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; | ||
131 | default: | ||
132 | return -EINVAL; | ||
133 | } | ||
134 | 135 | ||
135 | if (!ectx->vma.node) { | 136 | if (object->oclass) { |
136 | ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm, | 137 | return nvkm_gpuobj_map(nv_gpuobj(object), chan->vm, |
137 | NV_MEM_ACCESS_RW, &ectx->vma); | 138 | NV_MEM_ACCESS_RW, |
138 | if (ret) | 139 | &chan->engn[engn].vma); |
139 | return ret; | ||
140 | |||
141 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; | ||
142 | } | 140 | } |
143 | 141 | ||
144 | nvkm_kmap(engn); | 142 | ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); |
145 | nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); | 143 | if (ret) |
146 | nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset)); | 144 | return ret; |
147 | nvkm_done(engn); | 145 | |
148 | return 0; | 146 | return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm, |
147 | NV_MEM_ACCESS_RW, &chan->engn[engn].vma); | ||
149 | } | 148 | } |
150 | 149 | ||
151 | static int | 150 | static void |
152 | gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) | 151 | gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base) |
153 | { | 152 | { |
154 | struct gk104_fifo *fifo = (void *)object->engine; | 153 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); |
155 | struct gk104_fifo_chan *chan = (void *)object; | 154 | struct gk104_fifo *fifo = chan->fifo; |
156 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 155 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
157 | u32 chid = chan->base.chid; | 156 | u32 coff = chan->base.chid * 8; |
158 | 157 | ||
159 | if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { | 158 | if (!list_empty(&chan->head)) { |
160 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800); | 159 | list_del_init(&chan->head); |
160 | nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800); | ||
161 | gk104_fifo_runlist_update(fifo, chan->engine); | 161 | gk104_fifo_runlist_update(fifo, chan->engine); |
162 | } | 162 | } |
163 | 163 | ||
164 | nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000); | 164 | nvkm_wr32(device, 0x800000 + coff, 0x00000000); |
165 | return nvkm_fifo_channel_fini(&chan->base, suspend); | ||
166 | } | 165 | } |
167 | 166 | ||
168 | static int | 167 | static void |
169 | gk104_fifo_chan_init(struct nvkm_object *object) | 168 | gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base) |
170 | { | 169 | { |
171 | struct nvkm_gpuobj *base = nv_gpuobj(object->parent); | 170 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); |
172 | struct gk104_fifo *fifo = (void *)object->engine; | 171 | struct gk104_fifo *fifo = chan->fifo; |
173 | struct gk104_fifo_chan *chan = (void *)object; | ||
174 | struct nvkm_device *device = fifo->base.engine.subdev.device; | 172 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
175 | u32 chid = chan->base.chid; | 173 | u32 addr = chan->base.inst->addr >> 12; |
176 | int ret; | 174 | u32 coff = chan->base.chid * 8; |
177 | 175 | ||
178 | ret = nvkm_fifo_channel_init(&chan->base); | 176 | nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16); |
179 | if (ret) | 177 | nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr); |
180 | return ret; | ||
181 | 178 | ||
182 | nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); | 179 | if (list_empty(&chan->head) && !chan->killed) { |
183 | nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); | 180 | list_add_tail(&chan->head, &fifo->engine[chan->engine].chan); |
184 | 181 | nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); | |
185 | if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { | ||
186 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); | ||
187 | gk104_fifo_runlist_update(fifo, chan->engine); | 182 | gk104_fifo_runlist_update(fifo, chan->engine); |
188 | nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); | 183 | nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); |
189 | } | 184 | } |
185 | } | ||
190 | 186 | ||
191 | return 0; | 187 | static void * |
188 | gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) | ||
189 | { | ||
190 | struct gk104_fifo_chan *chan = gk104_fifo_chan(base); | ||
191 | nvkm_vm_ref(NULL, &chan->vm, chan->pgd); | ||
192 | nvkm_gpuobj_del(&chan->pgd); | ||
193 | return chan; | ||
192 | } | 194 | } |
193 | 195 | ||
194 | static int | 196 | static const struct nvkm_fifo_chan_func |
195 | gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 197 | gk104_fifo_gpfifo_func = { |
196 | struct nvkm_oclass *oclass, void *data, u32 size, | 198 | .dtor = gk104_fifo_gpfifo_dtor, |
197 | struct nvkm_object **pobject) | 199 | .init = gk104_fifo_gpfifo_init, |
200 | .fini = gk104_fifo_gpfifo_fini, | ||
201 | .ntfy = g84_fifo_chan_ntfy, | ||
202 | .engine_ctor = gk104_fifo_gpfifo_engine_ctor, | ||
203 | .engine_dtor = gk104_fifo_gpfifo_engine_dtor, | ||
204 | .engine_init = gk104_fifo_gpfifo_engine_init, | ||
205 | .engine_fini = gk104_fifo_gpfifo_engine_fini, | ||
206 | }; | ||
207 | |||
208 | int | ||
209 | gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, | ||
210 | void *data, u32 size, struct nvkm_object **pobject) | ||
198 | { | 211 | { |
199 | union { | 212 | union { |
200 | struct kepler_channel_gpfifo_a_v0 v0; | 213 | struct kepler_channel_gpfifo_a_v0 v0; |
201 | } *args = data; | 214 | } *args = data; |
202 | struct gk104_fifo *fifo = (void *)engine; | 215 | struct gk104_fifo *fifo = gk104_fifo(base); |
203 | struct gk104_fifo_base *base = (void *)parent; | 216 | struct nvkm_device *device = fifo->base.engine.subdev.device; |
217 | struct nvkm_object *parent = oclass->parent; | ||
204 | struct gk104_fifo_chan *chan; | 218 | struct gk104_fifo_chan *chan; |
205 | struct nvkm_gpuobj *ramfc = &base->base.gpuobj; | ||
206 | u64 usermem, ioffset, ilength; | 219 | u64 usermem, ioffset, ilength; |
207 | u32 engines; | 220 | u32 engines; |
208 | int ret, i; | 221 | int ret, i; |
209 | 222 | ||
210 | nvif_ioctl(parent, "create channel gpfifo size %d\n", size); | 223 | nvif_ioctl(parent, "create channel gpfifo size %d\n", size); |
211 | if (nvif_unpack(args->v0, 0, 0, false)) { | 224 | if (nvif_unpack(args->v0, 0, 0, false)) { |
212 | nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx" | 225 | nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx " |
213 | "ioffset %016llx ilength %08x engine %08x\n", | 226 | "ioffset %016llx ilength %08x engine %08x\n", |
214 | args->v0.version, args->v0.vm, args->v0.ioffset, | 227 | args->v0.version, args->v0.vm, args->v0.ioffset, |
215 | args->v0.ilength, args->v0.engine); | 228 | args->v0.ilength, args->v0.engine); |
216 | if (args->v0.vm) | ||
217 | return -ENOENT; | ||
218 | } else | 229 | } else |
219 | return ret; | 230 | return ret; |
220 | 231 | ||
221 | for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) { | 232 | /* determine which downstream engines are present */ |
222 | if (!nvkm_engine(parent, fifo_engine[i].subdev)) | 233 | for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) { |
234 | u64 subdevs = gk104_fifo_engine_subdev(i); | ||
235 | if (!nvkm_device_engine(device, __ffs64(subdevs))) | ||
223 | continue; | 236 | continue; |
224 | engines |= (1 << i); | 237 | engines |= (1 << i); |
225 | } | 238 | } |
226 | 239 | ||
240 | /* if this is an engine mask query, we're done */ | ||
227 | if (!args->v0.engine) { | 241 | if (!args->v0.engine) { |
228 | static struct nvkm_oclass oclass = { | ||
229 | .ofuncs = &nvkm_object_ofuncs, | ||
230 | }; | ||
231 | args->v0.engine = engines; | 242 | args->v0.engine = engines; |
232 | return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject); | 243 | return nvkm_object_new(oclass, NULL, 0, pobject); |
233 | } | 244 | } |
234 | 245 | ||
235 | engines &= args->v0.engine; | 246 | /* check that we support a requested engine - note that the user |
236 | if (!engines) { | 247 | * argument is a mask in order to allow the user to request (for |
237 | nvif_ioctl(parent, "unsupported engines %08x\n", | 248 | * example) *any* copy engine, but doesn't matter which. |
238 | args->v0.engine); | 249 | */ |
250 | args->v0.engine &= engines; | ||
251 | if (!args->v0.engine) { | ||
252 | nvif_ioctl(parent, "no supported engine\n"); | ||
239 | return -ENODEV; | 253 | return -ENODEV; |
240 | } | 254 | } |
241 | i = __ffs(engines); | ||
242 | 255 | ||
243 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, | 256 | /* allocate the channel */ |
244 | fifo->user.bar.offset, 0x200, 0, | 257 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
245 | fifo_engine[i].mask, &chan); | 258 | return -ENOMEM; |
246 | *pobject = nv_object(chan); | 259 | *pobject = &chan->base.object; |
260 | chan->fifo = fifo; | ||
261 | chan->engine = __ffs(args->v0.engine); | ||
262 | INIT_LIST_HEAD(&chan->head); | ||
263 | |||
264 | ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base, | ||
265 | 0x1000, 0x1000, true, args->v0.vm, 0, | ||
266 | gk104_fifo_engine_subdev(chan->engine), | ||
267 | 1, fifo->user.bar.offset, 0x200, | ||
268 | oclass, &chan->base); | ||
247 | if (ret) | 269 | if (ret) |
248 | return ret; | 270 | return ret; |
249 | 271 | ||
250 | chan->base.inst = base->base.gpuobj.addr; | ||
251 | args->v0.chid = chan->base.chid; | 272 | args->v0.chid = chan->base.chid; |
252 | 273 | ||
253 | nv_parent(chan)->context_attach = gk104_fifo_context_attach; | 274 | /* page directory */ |
254 | nv_parent(chan)->context_detach = gk104_fifo_context_detach; | 275 | ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd); |
255 | chan->engine = i; | 276 | if (ret) |
277 | return ret; | ||
278 | |||
279 | nvkm_kmap(chan->base.inst); | ||
280 | nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr)); | ||
281 | nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr)); | ||
282 | nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff); | ||
283 | nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff); | ||
284 | nvkm_done(chan->base.inst); | ||
256 | 285 | ||
286 | ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | /* clear channel control registers */ | ||
257 | usermem = chan->base.chid * 0x200; | 291 | usermem = chan->base.chid * 0x200; |
258 | ioffset = args->v0.ioffset; | 292 | ioffset = args->v0.ioffset; |
259 | ilength = order_base_2(args->v0.ilength / 8); | 293 | ilength = order_base_2(args->v0.ilength / 8); |
@@ -264,94 +298,31 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
264 | nvkm_done(fifo->user.mem); | 298 | nvkm_done(fifo->user.mem); |
265 | usermem = nvkm_memory_addr(fifo->user.mem) + usermem; | 299 | usermem = nvkm_memory_addr(fifo->user.mem) + usermem; |
266 | 300 | ||
267 | nvkm_kmap(ramfc); | 301 | /* RAMFC */ |
268 | nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem)); | 302 | nvkm_kmap(chan->base.inst); |
269 | nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem)); | 303 | nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem)); |
270 | nvkm_wo32(ramfc, 0x10, 0x0000face); | 304 | nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem)); |
271 | nvkm_wo32(ramfc, 0x30, 0xfffff902); | 305 | nvkm_wo32(chan->base.inst, 0x10, 0x0000face); |
272 | nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset)); | 306 | nvkm_wo32(chan->base.inst, 0x30, 0xfffff902); |
273 | nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); | 307 | nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset)); |
274 | nvkm_wo32(ramfc, 0x84, 0x20400000); | 308 | nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) | |
275 | nvkm_wo32(ramfc, 0x94, 0x30000001); | 309 | (ilength << 16)); |
276 | nvkm_wo32(ramfc, 0x9c, 0x00000100); | 310 | nvkm_wo32(chan->base.inst, 0x84, 0x20400000); |
277 | nvkm_wo32(ramfc, 0xac, 0x0000001f); | 311 | nvkm_wo32(chan->base.inst, 0x94, 0x30000001); |
278 | nvkm_wo32(ramfc, 0xe8, chan->base.chid); | 312 | nvkm_wo32(chan->base.inst, 0x9c, 0x00000100); |
279 | nvkm_wo32(ramfc, 0xb8, 0xf8000000); | 313 | nvkm_wo32(chan->base.inst, 0xac, 0x0000001f); |
280 | nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */ | 314 | nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid); |
281 | nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */ | 315 | nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000); |
282 | nvkm_done(ramfc); | 316 | nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */ |
283 | return 0; | 317 | nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */ |
284 | } | 318 | nvkm_done(chan->base.inst); |
285 | |||
286 | struct nvkm_ofuncs | ||
287 | gk104_fifo_chan_ofuncs = { | ||
288 | .ctor = gk104_fifo_chan_ctor, | ||
289 | .dtor = _nvkm_fifo_channel_dtor, | ||
290 | .init = gk104_fifo_chan_init, | ||
291 | .fini = gk104_fifo_chan_fini, | ||
292 | .map = _nvkm_fifo_channel_map, | ||
293 | .rd32 = _nvkm_fifo_channel_rd32, | ||
294 | .wr32 = _nvkm_fifo_channel_wr32, | ||
295 | .ntfy = _nvkm_fifo_channel_ntfy | ||
296 | }; | ||
297 | |||
298 | struct nvkm_oclass | ||
299 | gk104_fifo_sclass[] = { | ||
300 | { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, | ||
301 | {} | ||
302 | }; | ||
303 | |||
304 | static int | ||
305 | gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | ||
306 | struct nvkm_oclass *oclass, void *data, u32 size, | ||
307 | struct nvkm_object **pobject) | ||
308 | { | ||
309 | struct nvkm_device *device = nv_engine(engine)->subdev.device; | ||
310 | struct gk104_fifo_base *base; | ||
311 | int ret; | ||
312 | |||
313 | ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, | ||
314 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); | ||
315 | *pobject = nv_object(base); | ||
316 | if (ret) | ||
317 | return ret; | ||
318 | |||
319 | ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd); | ||
320 | if (ret) | ||
321 | return ret; | ||
322 | |||
323 | nvkm_kmap(&base->base.gpuobj); | ||
324 | nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr)); | ||
325 | nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr)); | ||
326 | nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff); | ||
327 | nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff); | ||
328 | nvkm_done(&base->base.gpuobj); | ||
329 | |||
330 | ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); | ||
331 | if (ret) | ||
332 | return ret; | ||
333 | |||
334 | return 0; | 319 | return 0; |
335 | } | 320 | } |
336 | 321 | ||
337 | static void | 322 | const struct nvkm_fifo_chan_oclass |
338 | gk104_fifo_context_dtor(struct nvkm_object *object) | 323 | gk104_fifo_gpfifo_oclass = { |
339 | { | 324 | .base.oclass = KEPLER_CHANNEL_GPFIFO_A, |
340 | struct gk104_fifo_base *base = (void *)object; | 325 | .base.minver = 0, |
341 | nvkm_vm_ref(NULL, &base->vm, base->pgd); | 326 | .base.maxver = 0, |
342 | nvkm_gpuobj_del(&base->pgd); | 327 | .ctor = gk104_fifo_gpfifo_new, |
343 | nvkm_fifo_context_destroy(&base->base); | ||
344 | } | ||
345 | |||
346 | struct nvkm_oclass | ||
347 | gk104_fifo_cclass = { | ||
348 | .handle = NV_ENGCTX(FIFO, 0xe0), | ||
349 | .ofuncs = &(struct nvkm_ofuncs) { | ||
350 | .ctor = gk104_fifo_context_ctor, | ||
351 | .dtor = gk104_fifo_context_dtor, | ||
352 | .init = _nvkm_fifo_context_init, | ||
353 | .fini = _nvkm_fifo_context_fini, | ||
354 | .rd32 = _nvkm_fifo_context_rd32, | ||
355 | .wr32 = _nvkm_fifo_context_wr32, | ||
356 | }, | ||
357 | }; | 328 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c index 7beee1f8729a..6511d6e21ecc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c | |||
@@ -25,8 +25,10 @@ | |||
25 | 25 | ||
26 | #include <nvif/class.h> | 26 | #include <nvif/class.h> |
27 | 27 | ||
28 | struct nvkm_oclass | 28 | const struct nvkm_fifo_chan_oclass |
29 | gm204_fifo_sclass[] = { | 29 | gm204_fifo_gpfifo_oclass = { |
30 | { MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, | 30 | .base.oclass = MAXWELL_CHANNEL_GPFIFO_A, |
31 | {} | 31 | .base.minver = 0, |
32 | .base.maxver = 0, | ||
33 | .ctor = gk104_fifo_gpfifo_new, | ||
32 | }; | 34 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c index ca7de9a6d67f..a8c69f878221 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c | |||
@@ -30,15 +30,14 @@ | |||
30 | #include <nvif/unpack.h> | 30 | #include <nvif/unpack.h> |
31 | 31 | ||
32 | static int | 32 | static int |
33 | nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, | 33 | nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, |
34 | struct nvkm_oclass *oclass, void *data, u32 size, | 34 | void *data, u32 size, struct nvkm_object **pobject) |
35 | struct nvkm_object **pobject) | ||
36 | { | 35 | { |
36 | struct nvkm_object *parent = oclass->parent; | ||
37 | union { | 37 | union { |
38 | struct nv50_channel_gpfifo_v0 v0; | 38 | struct nv50_channel_gpfifo_v0 v0; |
39 | } *args = data; | 39 | } *args = data; |
40 | struct nvkm_device *device = parent->engine->subdev.device; | 40 | struct nv50_fifo *fifo = nv50_fifo(base); |
41 | struct nv50_fifo_base *base = (void *)parent; | ||
42 | struct nv50_fifo_chan *chan; | 41 | struct nv50_fifo_chan *chan; |
43 | u64 ioffset, ilength; | 42 | u64 ioffset, ilength; |
44 | int ret; | 43 | int ret; |
@@ -50,61 +49,44 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, | |||
50 | "ilength %08x\n", | 49 | "ilength %08x\n", |
51 | args->v0.version, args->v0.vm, args->v0.pushbuf, | 50 | args->v0.version, args->v0.vm, args->v0.pushbuf, |
52 | args->v0.ioffset, args->v0.ilength); | 51 | args->v0.ioffset, args->v0.ilength); |
53 | if (args->v0.vm) | 52 | if (!args->v0.pushbuf) |
54 | return -ENOENT; | 53 | return -EINVAL; |
55 | } else | 54 | } else |
56 | return ret; | 55 | return ret; |
57 | 56 | ||
58 | ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 57 | if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) |
59 | 0x2000, args->v0.pushbuf, | 58 | return -ENOMEM; |
60 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 59 | *pobject = &chan->base.object; |
61 | (1ULL << NVDEV_ENGINE_SW) | | ||
62 | (1ULL << NVDEV_ENGINE_GR) | | ||
63 | (1ULL << NVDEV_ENGINE_MPEG), &chan); | ||
64 | *pobject = nv_object(chan); | ||
65 | if (ret) | ||
66 | return ret; | ||
67 | |||
68 | chan->base.inst = base->base.gpuobj.addr; | ||
69 | args->v0.chid = chan->base.chid; | ||
70 | |||
71 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; | ||
72 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; | ||
73 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; | ||
74 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | ||
75 | 60 | ||
76 | ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj, | 61 | ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf, |
77 | &chan->ramht); | 62 | oclass, chan); |
78 | if (ret) | 63 | if (ret) |
79 | return ret; | 64 | return ret; |
80 | 65 | ||
66 | args->v0.chid = chan->base.chid; | ||
81 | ioffset = args->v0.ioffset; | 67 | ioffset = args->v0.ioffset; |
82 | ilength = order_base_2(args->v0.ilength / 8); | 68 | ilength = order_base_2(args->v0.ilength / 8); |
83 | 69 | ||
84 | nvkm_kmap(base->ramfc); | 70 | nvkm_kmap(chan->ramfc); |
85 | nvkm_wo32(base->ramfc, 0x3c, 0x403f6078); | 71 | nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078); |
86 | nvkm_wo32(base->ramfc, 0x44, 0x01003fff); | 72 | nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); |
87 | nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | 73 | nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); |
88 | nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); | 74 | nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset)); |
89 | nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); | 75 | nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); |
90 | nvkm_wo32(base->ramfc, 0x60, 0x7fffffff); | 76 | nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); |
91 | nvkm_wo32(base->ramfc, 0x78, 0x00000000); | 77 | nvkm_wo32(chan->ramfc, 0x78, 0x00000000); |
92 | nvkm_wo32(base->ramfc, 0x7c, 0x30000001); | 78 | nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); |
93 | nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | 79 | nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | |
94 | (4 << 24) /* SEARCH_FULL */ | | 80 | (4 << 24) /* SEARCH_FULL */ | |
95 | (chan->ramht->gpuobj->node->offset >> 4)); | 81 | (chan->ramht->gpuobj->node->offset >> 4)); |
96 | nvkm_done(base->ramfc); | 82 | nvkm_done(chan->ramfc); |
97 | return 0; | 83 | return 0; |
98 | } | 84 | } |
99 | 85 | ||
100 | struct nvkm_ofuncs | 86 | const struct nvkm_fifo_chan_oclass |
101 | nv50_fifo_ofuncs_ind = { | 87 | nv50_fifo_gpfifo_oclass = { |
102 | .ctor = nv50_fifo_chan_ctor_ind, | 88 | .base.oclass = NV50_CHANNEL_GPFIFO, |
103 | .dtor = nv50_fifo_chan_dtor, | 89 | .base.minver = 0, |
104 | .init = nv50_fifo_chan_init, | 90 | .base.maxver = 0, |
105 | .fini = nv50_fifo_chan_fini, | 91 | .ctor = nv50_fifo_gpfifo_new, |
106 | .map = _nvkm_fifo_channel_map, | ||
107 | .rd32 = _nvkm_fifo_channel_rd32, | ||
108 | .wr32 = _nvkm_fifo_channel_wr32, | ||
109 | .ntfy = _nvkm_fifo_channel_ntfy | ||
110 | }; | 92 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index d880cfa6de9e..d1ad3fa72c34 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "channv04.h" | 25 | #include "channv04.h" |
26 | #include "regsnv04.h" | 26 | #include "regsnv04.h" |
27 | 27 | ||
28 | #include <core/client.h> | ||
28 | #include <core/handle.h> | 29 | #include <core/handle.h> |
29 | #include <core/ramht.h> | 30 | #include <core/ramht.h> |
30 | #include <subdev/instmem.h> | 31 | #include <subdev/instmem.h> |
@@ -136,6 +137,8 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) | |||
136 | { | 137 | { |
137 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; | 138 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
138 | struct nvkm_device *device = subdev->device; | 139 | struct nvkm_device *device = subdev->device; |
140 | struct nvkm_fifo_chan *chan; | ||
141 | unsigned long flags; | ||
139 | u32 pull0 = nvkm_rd32(device, 0x003250); | 142 | u32 pull0 = nvkm_rd32(device, 0x003250); |
140 | u32 mthd, data; | 143 | u32 mthd, data; |
141 | int ptr; | 144 | int ptr; |
@@ -157,12 +160,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) | |||
157 | 160 | ||
158 | if (!(pull0 & 0x00000100) || | 161 | if (!(pull0 & 0x00000100) || |
159 | !nv04_fifo_swmthd(device, chid, mthd, data)) { | 162 | !nv04_fifo_swmthd(device, chid, mthd, data)) { |
160 | const char *client_name = | 163 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); |
161 | nvkm_client_name_for_fifo_chid(&fifo->base, chid); | ||
162 | nvkm_error(subdev, "CACHE_ERROR - " | 164 | nvkm_error(subdev, "CACHE_ERROR - " |
163 | "ch %d [%s] subc %d mthd %04x data %08x\n", | 165 | "ch %d [%s] subc %d mthd %04x data %08x\n", |
164 | chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, | 166 | chid, chan ? chan->object.client->name : "unknown", |
165 | data); | 167 | (mthd >> 13) & 7, mthd & 0x1ffc, data); |
168 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | ||
166 | } | 169 | } |
167 | 170 | ||
168 | nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | 171 | nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); |
@@ -189,10 +192,12 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) | |||
189 | u32 dma_put = nvkm_rd32(device, 0x003240); | 192 | u32 dma_put = nvkm_rd32(device, 0x003240); |
190 | u32 push = nvkm_rd32(device, 0x003220); | 193 | u32 push = nvkm_rd32(device, 0x003220); |
191 | u32 state = nvkm_rd32(device, 0x003228); | 194 | u32 state = nvkm_rd32(device, 0x003228); |
192 | const char *client_name; | 195 | struct nvkm_fifo_chan *chan; |
193 | 196 | unsigned long flags; | |
194 | client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); | 197 | const char *name; |
195 | 198 | ||
199 | chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); | ||
200 | name = chan ? chan->object.client->name : "unknown"; | ||
196 | if (device->card_type == NV_50) { | 201 | if (device->card_type == NV_50) { |
197 | u32 ho_get = nvkm_rd32(device, 0x003328); | 202 | u32 ho_get = nvkm_rd32(device, 0x003328); |
198 | u32 ho_put = nvkm_rd32(device, 0x003320); | 203 | u32 ho_put = nvkm_rd32(device, 0x003320); |
@@ -202,7 +207,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) | |||
202 | nvkm_error(subdev, "DMA_PUSHER - " | 207 | nvkm_error(subdev, "DMA_PUSHER - " |
203 | "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " | 208 | "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " |
204 | "ib_put %08x state %08x (err: %s) push %08x\n", | 209 | "ib_put %08x state %08x (err: %s) push %08x\n", |
205 | chid, client_name, ho_get, dma_get, ho_put, dma_put, | 210 | chid, name, ho_get, dma_get, ho_put, dma_put, |
206 | ib_get, ib_put, state, nv_dma_state_err(state), | 211 | ib_get, ib_put, state, nv_dma_state_err(state), |
207 | push); | 212 | push); |
208 | 213 | ||
@@ -217,12 +222,13 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) | |||
217 | } else { | 222 | } else { |
218 | nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " | 223 | nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " |
219 | "state %08x (err: %s) push %08x\n", | 224 | "state %08x (err: %s) push %08x\n", |
220 | chid, client_name, dma_get, dma_put, state, | 225 | chid, name, dma_get, dma_put, state, |
221 | nv_dma_state_err(state), push); | 226 | nv_dma_state_err(state), push); |
222 | 227 | ||
223 | if (dma_get != dma_put) | 228 | if (dma_get != dma_put) |
224 | nvkm_wr32(device, 0x003244, dma_put); | 229 | nvkm_wr32(device, 0x003244, dma_put); |
225 | } | 230 | } |
231 | nvkm_fifo_chan_put(&fifo->base, flags, &chan); | ||
226 | 232 | ||
227 | nvkm_wr32(device, 0x003228, 0x00000000); | 233 | nvkm_wr32(device, 0x003228, 0x00000000); |
228 | nvkm_wr32(device, 0x003220, 0x00000001); | 234 | nvkm_wr32(device, 0x003220, 0x00000001); |
@@ -241,7 +247,7 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) | |||
241 | reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; | 247 | reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; |
242 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0); | 248 | nvkm_wr32(device, NV03_PFIFO_CACHES, 0); |
243 | 249 | ||
244 | chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; | 250 | chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1); |
245 | get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); | 251 | get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); |
246 | 252 | ||
247 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { | 253 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
@@ -311,7 +317,7 @@ nv04_fifo_init(struct nvkm_object *object) | |||
311 | nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); | 317 | nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); |
312 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); | 318 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); |
313 | 319 | ||
314 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); | 320 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); |
315 | 321 | ||
316 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); | 322 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); |
317 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); | 323 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
@@ -329,6 +335,14 @@ nv04_fifo_dtor(struct nvkm_object *object) | |||
329 | nvkm_fifo_destroy(&fifo->base); | 335 | nvkm_fifo_destroy(&fifo->base); |
330 | } | 336 | } |
331 | 337 | ||
338 | static const struct nvkm_fifo_func | ||
339 | nv04_fifo_func = { | ||
340 | .chan = { | ||
341 | &nv04_fifo_dma_oclass, | ||
342 | NULL | ||
343 | }, | ||
344 | }; | ||
345 | |||
332 | static int | 346 | static int |
333 | nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 347 | nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
334 | struct nvkm_oclass *oclass, void *data, u32 size, | 348 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -342,10 +356,10 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
342 | if (ret) | 356 | if (ret) |
343 | return ret; | 357 | return ret; |
344 | 358 | ||
359 | fifo->base.func = &nv04_fifo_func; | ||
360 | |||
345 | nv_subdev(fifo)->unit = 0x00000100; | 361 | nv_subdev(fifo)->unit = 0x00000100; |
346 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 362 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
347 | nv_engine(fifo)->cclass = &nv04_fifo_cclass; | ||
348 | nv_engine(fifo)->sclass = nv04_fifo_sclass; | ||
349 | fifo->base.pause = nv04_fifo_pause; | 363 | fifo->base.pause = nv04_fifo_pause; |
350 | fifo->base.start = nv04_fifo_start; | 364 | fifo->base.start = nv04_fifo_start; |
351 | fifo->ramfc_desc = nv04_ramfc; | 365 | fifo->ramfc_desc = nv04_ramfc; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h index 5cde3310ee4d..c33dc56f8e02 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h | |||
@@ -1,5 +1,6 @@ | |||
1 | #ifndef __NV04_FIFO_H__ | 1 | #ifndef __NV04_FIFO_H__ |
2 | #define __NV04_FIFO_H__ | 2 | #define __NV04_FIFO_H__ |
3 | #define nv04_fifo(p) container_of((p), struct nv04_fifo, base) | ||
3 | #include "priv.h" | 4 | #include "priv.h" |
4 | 5 | ||
5 | struct ramfc_desc { | 6 | struct ramfc_desc { |
@@ -15,14 +16,6 @@ struct nv04_fifo { | |||
15 | struct ramfc_desc *ramfc_desc; | 16 | struct ramfc_desc *ramfc_desc; |
16 | }; | 17 | }; |
17 | 18 | ||
18 | struct nv04_fifo_base { | ||
19 | struct nvkm_fifo_base base; | ||
20 | }; | ||
21 | |||
22 | int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *, | ||
23 | struct nvkm_oclass *, void *, u32, | ||
24 | struct nvkm_object **); | ||
25 | |||
26 | void nv04_fifo_dtor(struct nvkm_object *); | 19 | void nv04_fifo_dtor(struct nvkm_object *); |
27 | int nv04_fifo_init(struct nvkm_object *); | 20 | int nv04_fifo_init(struct nvkm_object *); |
28 | #endif | 21 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c index ae0a1b17eb92..d7fab9598fb0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c | |||
@@ -39,16 +39,11 @@ nv10_ramfc[] = { | |||
39 | {} | 39 | {} |
40 | }; | 40 | }; |
41 | 41 | ||
42 | static struct nvkm_oclass | 42 | static const struct nvkm_fifo_func |
43 | nv10_fifo_cclass = { | 43 | nv10_fifo_func = { |
44 | .handle = NV_ENGCTX(FIFO, 0x10), | 44 | .chan = { |
45 | .ofuncs = &(struct nvkm_ofuncs) { | 45 | &nv10_fifo_dma_oclass, |
46 | .ctor = nv04_fifo_context_ctor, | 46 | NULL |
47 | .dtor = _nvkm_fifo_context_dtor, | ||
48 | .init = _nvkm_fifo_context_init, | ||
49 | .fini = _nvkm_fifo_context_fini, | ||
50 | .rd32 = _nvkm_fifo_context_rd32, | ||
51 | .wr32 = _nvkm_fifo_context_wr32, | ||
52 | }, | 47 | }, |
53 | }; | 48 | }; |
54 | 49 | ||
@@ -65,10 +60,10 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
65 | if (ret) | 60 | if (ret) |
66 | return ret; | 61 | return ret; |
67 | 62 | ||
63 | fifo->base.func = &nv10_fifo_func; | ||
64 | |||
68 | nv_subdev(fifo)->unit = 0x00000100; | 65 | nv_subdev(fifo)->unit = 0x00000100; |
69 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 66 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
70 | nv_engine(fifo)->cclass = &nv10_fifo_cclass; | ||
71 | nv_engine(fifo)->sclass = nv10_fifo_sclass; | ||
72 | fifo->base.pause = nv04_fifo_pause; | 67 | fifo->base.pause = nv04_fifo_pause; |
73 | fifo->base.start = nv04_fifo_start; | 68 | fifo->base.start = nv04_fifo_start; |
74 | fifo->ramfc_desc = nv10_ramfc; | 69 | fifo->ramfc_desc = nv10_ramfc; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c index ff2b6d95d804..a8e28fc24e75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c | |||
@@ -47,19 +47,6 @@ nv17_ramfc[] = { | |||
47 | {} | 47 | {} |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct nvkm_oclass | ||
51 | nv17_fifo_cclass = { | ||
52 | .handle = NV_ENGCTX(FIFO, 0x17), | ||
53 | .ofuncs = &(struct nvkm_ofuncs) { | ||
54 | .ctor = nv04_fifo_context_ctor, | ||
55 | .dtor = _nvkm_fifo_context_dtor, | ||
56 | .init = _nvkm_fifo_context_init, | ||
57 | .fini = _nvkm_fifo_context_fini, | ||
58 | .rd32 = _nvkm_fifo_context_rd32, | ||
59 | .wr32 = _nvkm_fifo_context_wr32, | ||
60 | }, | ||
61 | }; | ||
62 | |||
63 | static int | 50 | static int |
64 | nv17_fifo_init(struct nvkm_object *object) | 51 | nv17_fifo_init(struct nvkm_object *object) |
65 | { | 52 | { |
@@ -85,7 +72,7 @@ nv17_fifo_init(struct nvkm_object *object) | |||
85 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 | | 72 | nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 | |
86 | 0x00010000); | 73 | 0x00010000); |
87 | 74 | ||
88 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); | 75 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); |
89 | 76 | ||
90 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); | 77 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); |
91 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); | 78 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
@@ -96,6 +83,14 @@ nv17_fifo_init(struct nvkm_object *object) | |||
96 | return 0; | 83 | return 0; |
97 | } | 84 | } |
98 | 85 | ||
86 | static const struct nvkm_fifo_func | ||
87 | nv17_fifo_func = { | ||
88 | .chan = { | ||
89 | &nv17_fifo_dma_oclass, | ||
90 | NULL | ||
91 | }, | ||
92 | }; | ||
93 | |||
99 | static int | 94 | static int |
100 | nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 95 | nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
101 | struct nvkm_oclass *oclass, void *data, u32 size, | 96 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -109,10 +104,10 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
109 | if (ret) | 104 | if (ret) |
110 | return ret; | 105 | return ret; |
111 | 106 | ||
107 | fifo->base.func = &nv17_fifo_func; | ||
108 | |||
112 | nv_subdev(fifo)->unit = 0x00000100; | 109 | nv_subdev(fifo)->unit = 0x00000100; |
113 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 110 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
114 | nv_engine(fifo)->cclass = &nv17_fifo_cclass; | ||
115 | nv_engine(fifo)->sclass = nv17_fifo_sclass; | ||
116 | fifo->base.pause = nv04_fifo_pause; | 111 | fifo->base.pause = nv04_fifo_pause; |
117 | fifo->base.start = nv04_fifo_start; | 112 | fifo->base.start = nv04_fifo_start; |
118 | fifo->ramfc_desc = nv17_ramfc; | 113 | fifo->ramfc_desc = nv17_ramfc; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c index 64be69fc9572..aca146377d36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c | |||
@@ -56,19 +56,6 @@ nv40_ramfc[] = { | |||
56 | {} | 56 | {} |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static struct nvkm_oclass | ||
60 | nv40_fifo_cclass = { | ||
61 | .handle = NV_ENGCTX(FIFO, 0x40), | ||
62 | .ofuncs = &(struct nvkm_ofuncs) { | ||
63 | .ctor = nv04_fifo_context_ctor, | ||
64 | .dtor = _nvkm_fifo_context_dtor, | ||
65 | .init = _nvkm_fifo_context_init, | ||
66 | .fini = _nvkm_fifo_context_fini, | ||
67 | .rd32 = _nvkm_fifo_context_rd32, | ||
68 | .wr32 = _nvkm_fifo_context_wr32, | ||
69 | }, | ||
70 | }; | ||
71 | |||
72 | static int | 59 | static int |
73 | nv40_fifo_init(struct nvkm_object *object) | 60 | nv40_fifo_init(struct nvkm_object *object) |
74 | { | 61 | { |
@@ -115,7 +102,7 @@ nv40_fifo_init(struct nvkm_object *object) | |||
115 | break; | 102 | break; |
116 | } | 103 | } |
117 | 104 | ||
118 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); | 105 | nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1); |
119 | 106 | ||
120 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); | 107 | nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); |
121 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); | 108 | nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
@@ -126,6 +113,14 @@ nv40_fifo_init(struct nvkm_object *object) | |||
126 | return 0; | 113 | return 0; |
127 | } | 114 | } |
128 | 115 | ||
116 | static const struct nvkm_fifo_func | ||
117 | nv40_fifo_func = { | ||
118 | .chan = { | ||
119 | &nv40_fifo_dma_oclass, | ||
120 | NULL | ||
121 | }, | ||
122 | }; | ||
123 | |||
129 | static int | 124 | static int |
130 | nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 125 | nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
131 | struct nvkm_oclass *oclass, void *data, u32 size, | 126 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -139,10 +134,10 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
139 | if (ret) | 134 | if (ret) |
140 | return ret; | 135 | return ret; |
141 | 136 | ||
137 | fifo->base.func = &nv40_fifo_func; | ||
138 | |||
142 | nv_subdev(fifo)->unit = 0x00000100; | 139 | nv_subdev(fifo)->unit = 0x00000100; |
143 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 140 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
144 | nv_engine(fifo)->cclass = &nv40_fifo_cclass; | ||
145 | nv_engine(fifo)->sclass = nv40_fifo_sclass; | ||
146 | fifo->base.pause = nv04_fifo_pause; | 141 | fifo->base.pause = nv04_fifo_pause; |
147 | fifo->base.start = nv04_fifo_start; | 142 | fifo->base.start = nv04_fifo_start; |
148 | fifo->ramfc_desc = nv40_ramfc; | 143 | fifo->ramfc_desc = nv40_ramfc; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index bf17cb0e8385..ad653e9c461a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c | |||
@@ -35,7 +35,7 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo) | |||
35 | fifo->cur_runlist = !fifo->cur_runlist; | 35 | fifo->cur_runlist = !fifo->cur_runlist; |
36 | 36 | ||
37 | nvkm_kmap(cur); | 37 | nvkm_kmap(cur); |
38 | for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) { | 38 | for (i = 0, p = 0; i < fifo->base.nr; i++) { |
39 | if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000) | 39 | if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000) |
40 | nvkm_wo32(cur, p++ * 4, i); | 40 | nvkm_wo32(cur, p++ * 4, i); |
41 | } | 41 | } |
@@ -94,6 +94,15 @@ nv50_fifo_dtor(struct nvkm_object *object) | |||
94 | nvkm_fifo_destroy(&fifo->base); | 94 | nvkm_fifo_destroy(&fifo->base); |
95 | } | 95 | } |
96 | 96 | ||
97 | static const struct nvkm_fifo_func | ||
98 | nv50_fifo_func = { | ||
99 | .chan = { | ||
100 | &nv50_fifo_dma_oclass, | ||
101 | &nv50_fifo_gpfifo_oclass, | ||
102 | NULL | ||
103 | }, | ||
104 | }; | ||
105 | |||
97 | static int | 106 | static int |
98 | nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | 107 | nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, |
99 | struct nvkm_oclass *oclass, void *data, u32 size, | 108 | struct nvkm_oclass *oclass, void *data, u32 size, |
@@ -108,6 +117,8 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
108 | if (ret) | 117 | if (ret) |
109 | return ret; | 118 | return ret; |
110 | 119 | ||
120 | fifo->base.func = &nv50_fifo_func; | ||
121 | |||
111 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, | 122 | ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, |
112 | false, &fifo->runlist[0]); | 123 | false, &fifo->runlist[0]); |
113 | if (ret) | 124 | if (ret) |
@@ -120,8 +131,6 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
120 | 131 | ||
121 | nv_subdev(fifo)->unit = 0x00000100; | 132 | nv_subdev(fifo)->unit = 0x00000100; |
122 | nv_subdev(fifo)->intr = nv04_fifo_intr; | 133 | nv_subdev(fifo)->intr = nv04_fifo_intr; |
123 | nv_engine(fifo)->cclass = &nv50_fifo_cclass; | ||
124 | nv_engine(fifo)->sclass = nv50_fifo_sclass; | ||
125 | fifo->base.pause = nv04_fifo_pause; | 134 | fifo->base.pause = nv04_fifo_pause; |
126 | fifo->base.start = nv04_fifo_start; | 135 | fifo->base.start = nv04_fifo_start; |
127 | return 0; | 136 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h index 306593fc56bb..a7d5dba12fb8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h | |||
@@ -1,5 +1,6 @@ | |||
1 | #ifndef __NV50_FIFO_H__ | 1 | #ifndef __NV50_FIFO_H__ |
2 | #define __NV50_FIFO_H__ | 2 | #define __NV50_FIFO_H__ |
3 | #define nv50_fifo(p) container_of((p), struct nv50_fifo, base) | ||
3 | #include "priv.h" | 4 | #include "priv.h" |
4 | 5 | ||
5 | struct nv50_fifo { | 6 | struct nv50_fifo { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index b202f7f9413d..a30d160f30db 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef __NVKM_FIFO_PRIV_H__ | 1 | #ifndef __NVKM_FIFO_PRIV_H__ |
2 | #define __NVKM_FIFO_PRIV_H__ | 2 | #define __NVKM_FIFO_PRIV_H__ |
3 | #include <engine/fifo.h> | 3 | #include <engine/fifo.h> |
4 | #include <core/engctx.h> | ||
5 | 4 | ||
6 | void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *); | 5 | void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *); |
7 | void nv04_fifo_start(struct nvkm_fifo *, unsigned long *); | 6 | void nv04_fifo_start(struct nvkm_fifo *, unsigned long *); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index c6ff24b5a11d..ebc9dee03beb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
@@ -1168,10 +1168,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev) | |||
1168 | u32 data = nvkm_rd32(device, 0x400708); | 1168 | u32 data = nvkm_rd32(device, 0x400708); |
1169 | u32 code = nvkm_rd32(device, 0x400110); | 1169 | u32 code = nvkm_rd32(device, 0x400110); |
1170 | u32 class; | 1170 | u32 class; |
1171 | int chid; | 1171 | const char *name = "unknown"; |
1172 | int chid = -1; | ||
1172 | 1173 | ||
1173 | chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); | 1174 | chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); |
1174 | chid = chan ? chan->chid : -1; | 1175 | if (chan) { |
1176 | name = chan->object.client->name; | ||
1177 | chid = chan->chid; | ||
1178 | } | ||
1175 | 1179 | ||
1176 | if (nv_device(gr)->card_type < NV_E0 || subc < 4) | 1180 | if (nv_device(gr)->card_type < NV_E0 || subc < 4) |
1177 | class = nvkm_rd32(device, 0x404200 + (subc * 4)); | 1181 | class = nvkm_rd32(device, 0x404200 + (subc * 4)); |
@@ -1191,8 +1195,8 @@ gf100_gr_intr(struct nvkm_subdev *subdev) | |||
1191 | if (!gf100_gr_mthd_sw(device, class, mthd, data)) { | 1195 | if (!gf100_gr_mthd_sw(device, class, mthd, data)) { |
1192 | nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] " | 1196 | nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] " |
1193 | "subc %d class %04x mthd %04x data %08x\n", | 1197 | "subc %d class %04x mthd %04x data %08x\n", |
1194 | chid, inst << 12, nvkm_client_name(chan), | 1198 | chid, inst << 12, name, subc, |
1195 | subc, class, mthd, data); | 1199 | class, mthd, data); |
1196 | } | 1200 | } |
1197 | nvkm_wr32(device, 0x400100, 0x00000010); | 1201 | nvkm_wr32(device, 0x400100, 0x00000010); |
1198 | stat &= ~0x00000010; | 1202 | stat &= ~0x00000010; |
@@ -1201,8 +1205,7 @@ gf100_gr_intr(struct nvkm_subdev *subdev) | |||
1201 | if (stat & 0x00000020) { | 1205 | if (stat & 0x00000020) { |
1202 | nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] " | 1206 | nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] " |
1203 | "subc %d class %04x mthd %04x data %08x\n", | 1207 | "subc %d class %04x mthd %04x data %08x\n", |
1204 | chid, inst << 12, nvkm_client_name(chan), subc, | 1208 | chid, inst << 12, name, subc, class, mthd, data); |
1205 | class, mthd, data); | ||
1206 | nvkm_wr32(device, 0x400100, 0x00000020); | 1209 | nvkm_wr32(device, 0x400100, 0x00000020); |
1207 | stat &= ~0x00000020; | 1210 | stat &= ~0x00000020; |
1208 | } | 1211 | } |
@@ -1213,14 +1216,14 @@ gf100_gr_intr(struct nvkm_subdev *subdev) | |||
1213 | nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] " | 1216 | nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] " |
1214 | "subc %d class %04x mthd %04x data %08x\n", | 1217 | "subc %d class %04x mthd %04x data %08x\n", |
1215 | code, en ? en->name : "", chid, inst << 12, | 1218 | code, en ? en->name : "", chid, inst << 12, |
1216 | nvkm_client_name(chan), subc, class, mthd, data); | 1219 | name, subc, class, mthd, data); |
1217 | nvkm_wr32(device, 0x400100, 0x00100000); | 1220 | nvkm_wr32(device, 0x400100, 0x00100000); |
1218 | stat &= ~0x00100000; | 1221 | stat &= ~0x00100000; |
1219 | } | 1222 | } |
1220 | 1223 | ||
1221 | if (stat & 0x00200000) { | 1224 | if (stat & 0x00200000) { |
1222 | nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n", | 1225 | nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n", |
1223 | chid, inst << 12, nvkm_client_name(chan)); | 1226 | chid, inst << 12, name); |
1224 | gf100_gr_trap_intr(gr); | 1227 | gf100_gr_trap_intr(gr); |
1225 | nvkm_wr32(device, 0x400100, 0x00200000); | 1228 | nvkm_wr32(device, 0x400100, 0x00200000); |
1226 | stat &= ~0x00200000; | 1229 | stat &= ~0x00200000; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 262638b4e0c5..29feab391fe4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | |||
@@ -230,7 +230,8 @@ nv20_gr_intr(struct nvkm_subdev *subdev) | |||
230 | "nstatus %08x [%s] ch %d [%s] subc %d " | 230 | "nstatus %08x [%s] ch %d [%s] subc %d " |
231 | "class %04x mthd %04x data %08x\n", | 231 | "class %04x mthd %04x data %08x\n", |
232 | show, msg, nsource, src, nstatus, sta, chid, | 232 | show, msg, nsource, src, nstatus, sta, chid, |
233 | nvkm_client_name(chan), subc, class, mthd, data); | 233 | chan ? chan->object.client->name : "unknown", |
234 | subc, class, mthd, data); | ||
234 | } | 235 | } |
235 | 236 | ||
236 | nvkm_fifo_chan_put(device->fifo, flags, &chan); | 237 | nvkm_fifo_chan_put(device->fifo, flags, &chan); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index 4db2a17f5308..e716ae12b55c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | |||
@@ -353,7 +353,8 @@ nv40_gr_intr(struct nvkm_subdev *subdev) | |||
353 | "class %04x mthd %04x data %08x\n", | 353 | "class %04x mthd %04x data %08x\n", |
354 | show, msg, nsource, src, nstatus, sta, | 354 | show, msg, nsource, src, nstatus, sta, |
355 | chan ? chan->fifo->chid : -1, inst << 4, | 355 | chan ? chan->fifo->chid : -1, inst << 4, |
356 | nvkm_client_name(chan), subc, class, mthd, data); | 356 | chan ? chan->fifo->object.client->name : "unknown", |
357 | subc, class, mthd, data); | ||
357 | } | 358 | } |
358 | 359 | ||
359 | spin_unlock_irqrestore(&gr->base.engine.lock, flags); | 360 | spin_unlock_irqrestore(&gr->base.engine.lock, flags); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index daac54075705..5f22dd3c788c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | |||
@@ -608,7 +608,7 @@ nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old, | |||
608 | 608 | ||
609 | static int | 609 | static int |
610 | nv50_gr_trap_handler(struct nv50_gr *gr, u32 display, | 610 | nv50_gr_trap_handler(struct nv50_gr *gr, u32 display, |
611 | int chid, u64 inst, struct nvkm_fifo_chan *chan) | 611 | int chid, u64 inst, const char *name) |
612 | { | 612 | { |
613 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; | 613 | struct nvkm_subdev *subdev = &gr->base.engine.subdev; |
614 | struct nvkm_device *device = subdev->device; | 614 | struct nvkm_device *device = subdev->device; |
@@ -648,8 +648,7 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display, | |||
648 | "ch %d [%010llx %s] subc %d " | 648 | "ch %d [%010llx %s] subc %d " |
649 | "class %04x mthd %04x data %08x%08x " | 649 | "class %04x mthd %04x data %08x%08x " |
650 | "400808 %08x 400848 %08x\n", | 650 | "400808 %08x 400848 %08x\n", |
651 | chid, inst, nvkm_client_name(chan), | 651 | chid, inst, name, subc, class, mthd, |
652 | subc, class, mthd, | ||
653 | datah, datal, addr, r848); | 652 | datah, datal, addr, r848); |
654 | } else | 653 | } else |
655 | if (display) { | 654 | if (display) { |
@@ -674,9 +673,8 @@ nv50_gr_trap_handler(struct nv50_gr *gr, u32 display, | |||
674 | nvkm_error(subdev, | 673 | nvkm_error(subdev, |
675 | "ch %d [%010llx %s] subc %d " | 674 | "ch %d [%010llx %s] subc %d " |
676 | "class %04x mthd %04x data %08x " | 675 | "class %04x mthd %04x data %08x " |
677 | "40084c %08x\n", chid, inst, | 676 | "40084c %08x\n", chid, inst, name, |
678 | nvkm_client_name(chan), subc, | 677 | subc, class, mthd, data, addr); |
679 | class, mthd, data, addr); | ||
680 | } else | 678 | } else |
681 | if (display) { | 679 | if (display) { |
682 | nvkm_error(subdev, "no stuck command?\n"); | 680 | nvkm_error(subdev, "no stuck command?\n"); |
@@ -849,11 +847,15 @@ nv50_gr_intr(struct nvkm_subdev *subdev) | |||
849 | u32 show = stat, show_bitfield = stat; | 847 | u32 show = stat, show_bitfield = stat; |
850 | const struct nvkm_enum *en; | 848 | const struct nvkm_enum *en; |
851 | unsigned long flags; | 849 | unsigned long flags; |
850 | const char *name = "unknown"; | ||
852 | char msg[128]; | 851 | char msg[128]; |
853 | int chid; | 852 | int chid = -1; |
854 | 853 | ||
855 | chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); | 854 | chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); |
856 | chid = chan ? chan->chid : -1; | 855 | if (chan) { |
856 | name = chan->object.client->name; | ||
857 | chid = chan->chid; | ||
858 | } | ||
857 | 859 | ||
858 | if (show & 0x00100000) { | 860 | if (show & 0x00100000) { |
859 | u32 ecode = nvkm_rd32(device, 0x400110); | 861 | u32 ecode = nvkm_rd32(device, 0x400110); |
@@ -864,7 +866,7 @@ nv50_gr_intr(struct nvkm_subdev *subdev) | |||
864 | } | 866 | } |
865 | 867 | ||
866 | if (stat & 0x00200000) { | 868 | if (stat & 0x00200000) { |
867 | if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, chan)) | 869 | if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name)) |
868 | show &= ~0x00200000; | 870 | show &= ~0x00200000; |
869 | show_bitfield &= ~0x00200000; | 871 | show_bitfield &= ~0x00200000; |
870 | } | 872 | } |
@@ -877,8 +879,8 @@ nv50_gr_intr(struct nvkm_subdev *subdev) | |||
877 | nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show); | 879 | nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show); |
878 | nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d " | 880 | nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d " |
879 | "class %04x mthd %04x data %08x\n", | 881 | "class %04x mthd %04x data %08x\n", |
880 | stat, msg, chid, (u64)inst << 12, | 882 | stat, msg, chid, (u64)inst << 12, name, |
881 | nvkm_client_name(chan), subc, class, mthd, data); | 883 | subc, class, mthd, data); |
882 | } | 884 | } |
883 | 885 | ||
884 | if (nvkm_rd32(device, 0x400824) & (1 << 31)) | 886 | if (nvkm_rd32(device, 0x400824) & (1 << 31)) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c index 05597f2070ed..211b44c00c85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c | |||
@@ -231,8 +231,8 @@ nv31_mpeg_intr(struct nvkm_subdev *subdev) | |||
231 | if (show) { | 231 | if (show) { |
232 | nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n", | 232 | nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n", |
233 | mpeg->chan ? mpeg->chan->fifo->chid : -1, | 233 | mpeg->chan ? mpeg->chan->fifo->chid : -1, |
234 | nvkm_client_name(mpeg->chan), | 234 | mpeg->chan ? mpeg->chan->fifo->object.client->name : |
235 | stat, type, mthd, data); | 235 | "unknown", stat, type, mthd, data); |
236 | } | 236 | } |
237 | 237 | ||
238 | spin_unlock_irqrestore(&mpeg->base.engine.lock, flags); | 238 | spin_unlock_irqrestore(&mpeg->base.engine.lock, flags); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index 1223baddfb9a..ff9ddc67a292 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c | |||
@@ -145,13 +145,11 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev) | |||
145 | u32 mthd = nvkm_rd32(device, 0x00b234); | 145 | u32 mthd = nvkm_rd32(device, 0x00b234); |
146 | u32 data = nvkm_rd32(device, 0x00b238); | 146 | u32 data = nvkm_rd32(device, 0x00b238); |
147 | u32 show = stat; | 147 | u32 show = stat; |
148 | int chid = -1; | ||
149 | 148 | ||
150 | spin_lock_irqsave(&mpeg->base.engine.lock, flags); | 149 | spin_lock_irqsave(&mpeg->base.engine.lock, flags); |
151 | list_for_each_entry(temp, &mpeg->chan, head) { | 150 | list_for_each_entry(temp, &mpeg->chan, head) { |
152 | if (temp->inst >> 4 == inst) { | 151 | if (temp->inst >> 4 == inst) { |
153 | chan = temp; | 152 | chan = temp; |
154 | chid = chan->fifo->chid; | ||
155 | list_del(&chan->head); | 153 | list_del(&chan->head); |
156 | list_add(&chan->head, &mpeg->chan); | 154 | list_add(&chan->head, &mpeg->chan); |
157 | break; | 155 | break; |
@@ -176,7 +174,8 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev) | |||
176 | 174 | ||
177 | if (show) { | 175 | if (show) { |
178 | nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n", | 176 | nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n", |
179 | chid, inst << 4, nvkm_client_name(chan), | 177 | chan ? chan->fifo->chid : -1, inst << 4, |
178 | chan ? chan->fifo->object.client->name : "unknown", | ||
180 | stat, type, mthd, data); | 179 | stat, type, mthd, data); |
181 | } | 180 | } |
182 | 181 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c index e6544097726c..c15934d7ff63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c | |||
@@ -84,7 +84,8 @@ g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan) | |||
84 | nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] " | 84 | nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] " |
85 | "subc %d mthd %04x data %08x\n", ssta, | 85 | "subc %d mthd %04x data %08x\n", ssta, |
86 | en ? en->name : "UNKNOWN", chan ? chan->chid : -1, | 86 | en ? en->name : "UNKNOWN", chan ? chan->chid : -1, |
87 | chan ? chan->inst : 0, nvkm_client_name(chan), | 87 | chan ? chan->inst->addr : 0, |
88 | chan ? chan->object.client->name : "unknown", | ||
88 | subc, mthd, data); | 89 | subc, mthd, data); |
89 | } | 90 | } |
90 | 91 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c index 4e8b632ef5b1..35ec1cffb53a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c | |||
@@ -24,8 +24,8 @@ | |||
24 | #include "nv50.h" | 24 | #include "nv50.h" |
25 | 25 | ||
26 | #include <core/handle.h> | 26 | #include <core/handle.h> |
27 | #include <core/namedb.h> | ||
28 | #include <engine/disp.h> | 27 | #include <engine/disp.h> |
28 | #include <engine/fifo/chan.h> | ||
29 | #include <subdev/bar.h> | 29 | #include <subdev/bar.h> |
30 | 30 | ||
31 | #include <nvif/event.h> | 31 | #include <nvif/event.h> |
@@ -136,7 +136,7 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
136 | return ret; | 136 | return ret; |
137 | } | 137 | } |
138 | 138 | ||
139 | chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; | 139 | chan->vblank.channel = nvkm_fifo_chan(parent)->inst->addr >> 12; |
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index 4a88bbd814b7..383030434079 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c | |||
@@ -338,7 +338,7 @@ void | |||
338 | gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) | 338 | gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) |
339 | { | 339 | { |
340 | struct nvkm_device *device = clk->subdev.device; | 340 | struct nvkm_device *device = clk->subdev.device; |
341 | struct nvkm_fifo *fifo = nvkm_fifo(clk); | 341 | struct nvkm_fifo *fifo = device->fifo; |
342 | 342 | ||
343 | if (fifo && flags) | 343 | if (fifo && flags) |
344 | fifo->start(fifo, flags); | 344 | fifo->start(fifo, flags); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c index ea83f7d9ddc8..711ea96bcd36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c | |||
@@ -189,12 +189,14 @@ nv50_fb_intr(struct nvkm_subdev *subdev) | |||
189 | else sc = NULL; | 189 | else sc = NULL; |
190 | 190 | ||
191 | chan = nvkm_fifo_chan_inst(fifo, inst, &flags); | 191 | chan = nvkm_fifo_chan_inst(fifo, inst, &flags); |
192 | nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel " | 192 | nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] " |
193 | "%08x [%s] engine %02x [%s] client %02x [%s] " | 193 | "engine %02x [%s] client %02x [%s] " |
194 | "subclient %02x [%s] reason %08x [%s]\n", | 194 | "subclient %02x [%s] reason %08x [%s]\n", |
195 | (trap[5] & 0x00000100) ? "read" : "write", | 195 | (trap[5] & 0x00000100) ? "read" : "write", |
196 | trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, inst, | 196 | trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, |
197 | nvkm_client_name(chan), st0, en ? en->name : "", | 197 | chan ? chan->chid : -1, inst, |
198 | chan ? chan->object.client->name : "unknown", | ||
199 | st0, en ? en->name : "", | ||
198 | st2, cl ? cl->name : "", st3, sc ? sc->name : "", | 200 | st2, cl ? cl->name : "", st3, sc ? sc->name : "", |
199 | st1, re ? re->name : ""); | 201 | st1, re ? re->name : ""); |
200 | nvkm_fifo_chan_put(fifo, flags, &chan); | 202 | nvkm_fifo_chan_put(fifo, flags, &chan); |