aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2016-11-15 23:38:44 -0500
committerBen Skeggs <bskeggs@redhat.com>2016-11-16 18:50:38 -0500
commit4391d7f5c79a9fe6fa11cf6c160ca7f7bdb49d2a (patch)
tree8d640a872ed7d7fce70503b3d7c607d3fc24626a
parent3a6536c51d5db3adf58dcd466a3aee6233b58544 (diff)
drm/nouveau/disp/nv50-: split chid into chid.ctrl and chid.user
GP102/GP104 make life difficult by redefining the channel indices for some registers, but not others. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c30
7 files changed, 106 insertions, 92 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 26990d44ae75..566d2d1b8cb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
82 82
83 if (mthd->addr) { 83 if (mthd->addr) {
84 snprintf(cname_, sizeof(cname_), "%s %d", 84 snprintf(cname_, sizeof(cname_), "%s %d",
85 mthd->name, chan->chid); 85 mthd->name, chan->chid.user);
86 cname = cname_; 86 cname = cname_;
87 } 87 }
88 88
@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
139 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { 139 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
140 notify->size = sizeof(struct nvif_notify_uevent_rep); 140 notify->size = sizeof(struct nvif_notify_uevent_rep);
141 notify->types = 1; 141 notify->types = 1;
142 notify->index = chan->chid; 142 notify->index = chan->chid.user;
143 return 0; 143 return 0;
144 } 144 }
145 145
@@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
159 struct nv50_disp_chan *chan = nv50_disp_chan(object); 159 struct nv50_disp_chan *chan = nv50_disp_chan(object);
160 struct nv50_disp *disp = chan->root->disp; 160 struct nv50_disp *disp = chan->root->disp;
161 struct nvkm_device *device = disp->base.engine.subdev.device; 161 struct nvkm_device *device = disp->base.engine.subdev.device;
162 *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); 162 *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
163 return 0; 163 return 0;
164} 164}
165 165
@@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
169 struct nv50_disp_chan *chan = nv50_disp_chan(object); 169 struct nv50_disp_chan *chan = nv50_disp_chan(object);
170 struct nv50_disp *disp = chan->root->disp; 170 struct nv50_disp *disp = chan->root->disp;
171 struct nvkm_device *device = disp->base.engine.subdev.device; 171 struct nvkm_device *device = disp->base.engine.subdev.device;
172 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); 172 nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
173 return 0; 173 return 0;
174} 174}
175 175
@@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
196 struct nv50_disp *disp = chan->root->disp; 196 struct nv50_disp *disp = chan->root->disp;
197 struct nvkm_device *device = disp->base.engine.subdev.device; 197 struct nvkm_device *device = disp->base.engine.subdev.device;
198 *addr = device->func->resource_addr(device, 0) + 198 *addr = device->func->resource_addr(device, 0) +
199 0x640000 + (chan->chid * 0x1000); 199 0x640000 + (chan->chid.user * 0x1000);
200 *size = 0x001000; 200 *size = 0x001000;
201 return 0; 201 return 0;
202} 202}
@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
243{ 243{
244 struct nv50_disp_chan *chan = nv50_disp_chan(object); 244 struct nv50_disp_chan *chan = nv50_disp_chan(object);
245 struct nv50_disp *disp = chan->root->disp; 245 struct nv50_disp *disp = chan->root->disp;
246 if (chan->chid >= 0) 246 if (chan->chid.user >= 0)
247 disp->chan[chan->chid] = NULL; 247 disp->chan[chan->chid.user] = NULL;
248 return chan->func->dtor ? chan->func->dtor(chan) : chan; 248 return chan->func->dtor ? chan->func->dtor(chan) : chan;
249} 249}
250 250
@@ -273,14 +273,15 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
273 chan->func = func; 273 chan->func = func;
274 chan->mthd = mthd; 274 chan->mthd = mthd;
275 chan->root = root; 275 chan->root = root;
276 chan->chid = chid; 276 chan->chid.ctrl = chid;
277 chan->chid.user = chid;
277 chan->head = head; 278 chan->head = head;
278 279
279 if (disp->chan[chan->chid]) { 280 if (disp->chan[chan->chid.user]) {
280 chan->chid = -1; 281 chan->chid.user = -1;
281 return -EBUSY; 282 return -EBUSY;
282 } 283 }
283 disp->chan[chan->chid] = chan; 284 disp->chan[chan->chid.user] = chan;
284 return 0; 285 return 0;
285} 286}
286 287
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d9fd20..de8db9cfe87d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@ struct nv50_disp_chan {
7 const struct nv50_disp_chan_func *func; 7 const struct nv50_disp_chan_func *func;
8 const struct nv50_disp_chan_mthd *mthd; 8 const struct nv50_disp_chan_mthd *mthd;
9 struct nv50_disp_root *root; 9 struct nv50_disp_root *root;
10 int chid; 10
11 struct {
12 int ctrl;
13 int user;
14 } chid;
11 int head; 15 int head;
12 16
13 struct nvkm_object object; 17 struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7cef307a..ce7cd74fbd5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
32 struct nvkm_object *object, u32 handle) 32 struct nvkm_object *object, u32 handle)
33{ 33{
34 return nvkm_ramht_insert(chan->base.root->ramht, object, 34 return nvkm_ramht_insert(chan->base.root->ramht, object,
35 chan->base.chid, -9, handle, 35 chan->base.chid.user, -9, handle,
36 chan->base.chid << 27 | 0x00000001); 36 chan->base.chid.user << 27 | 0x00000001);
37} 37}
38 38
39void 39void
@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
42 struct nv50_disp *disp = chan->base.root->disp; 42 struct nv50_disp *disp = chan->base.root->disp;
43 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 43 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
44 struct nvkm_device *device = subdev->device; 44 struct nvkm_device *device = subdev->device;
45 int chid = chan->base.chid; 45 int ctrl = chan->base.chid.ctrl;
46 int user = chan->base.chid.user;
46 47
47 /* deactivate channel */ 48 /* deactivate channel */
48 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); 49 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
49 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); 50 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
50 if (nvkm_msec(device, 2000, 51 if (nvkm_msec(device, 2000,
51 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) 52 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
52 break; 53 break;
53 ) < 0) { 54 ) < 0) {
54 nvkm_error(subdev, "ch %d fini: %08x\n", chid, 55 nvkm_error(subdev, "ch %d fini: %08x\n", user,
55 nvkm_rd32(device, 0x610490 + (chid * 0x10))); 56 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
56 } 57 }
57 58
58 /* disable error reporting and completion notification */ 59 /* disable error reporting and completion notification */
59 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); 60 nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); 61 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
61} 62}
62 63
63static int 64static int
@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
66 struct nv50_disp *disp = chan->base.root->disp; 67 struct nv50_disp *disp = chan->base.root->disp;
67 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 68 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
68 struct nvkm_device *device = subdev->device; 69 struct nvkm_device *device = subdev->device;
69 int chid = chan->base.chid; 70 int ctrl = chan->base.chid.ctrl;
71 int user = chan->base.chid.user;
70 72
71 /* enable error reporting */ 73 /* enable error reporting */
72 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); 74 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
73 75
74 /* initialise channel for dma command submission */ 76 /* initialise channel for dma command submission */
75 nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); 77 nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
76 nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); 78 nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
77 nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); 79 nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
78 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); 80 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
79 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 81 nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
80 nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); 82 nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
81 83
82 /* wait for it to go inactive */ 84 /* wait for it to go inactive */
83 if (nvkm_msec(device, 2000, 85 if (nvkm_msec(device, 2000,
84 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) 86 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
85 break; 87 break;
86 ) < 0) { 88 ) < 0) {
87 nvkm_error(subdev, "ch %d init: %08x\n", chid, 89 nvkm_error(subdev, "ch %d init: %08x\n", user,
88 nvkm_rd32(device, 0x610490 + (chid * 0x10))); 90 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
89 return -EBUSY; 91 return -EBUSY;
90 } 92 }
91 93
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
index ad24c2c57696..d26d3b4c41a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -32,26 +32,27 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
32 struct nv50_disp *disp = chan->base.root->disp; 32 struct nv50_disp *disp = chan->base.root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 34 struct nvkm_device *device = subdev->device;
35 int chid = chan->base.chid; 35 int ctrl = chan->base.chid.ctrl;
36 int user = chan->base.chid.user;
36 37
37 /* enable error reporting */ 38 /* enable error reporting */
38 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); 39 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
39 40
40 /* initialise channel for dma command submission */ 41 /* initialise channel for dma command submission */
41 nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); 42 nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
42 nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); 43 nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
43 nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); 44 nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
44 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); 45 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
45 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 46 nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
46 nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); 47 nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
47 48
48 /* wait for it to go inactive */ 49 /* wait for it to go inactive */
49 if (nvkm_msec(device, 2000, 50 if (nvkm_msec(device, 2000,
50 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) 51 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
51 break; 52 break;
52 ) < 0) { 53 ) < 0) {
53 nvkm_error(subdev, "ch %d init: %08x\n", chid, 54 nvkm_error(subdev, "ch %d init: %08x\n", user,
54 nvkm_rd32(device, 0x610490 + (chid * 0x10))); 55 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
55 return -EBUSY; 56 return -EBUSY;
56 } 57 }
57 58
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a357b9..cfba994bef4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
179 struct nvkm_object *object, u32 handle) 179 struct nvkm_object *object, u32 handle)
180{ 180{
181 return nvkm_ramht_insert(chan->base.root->ramht, object, 181 return nvkm_ramht_insert(chan->base.root->ramht, object,
182 chan->base.chid, -10, handle, 182 chan->base.chid.user, -10, handle,
183 chan->base.chid << 28 | 183 chan->base.chid.user << 28 |
184 chan->base.chid); 184 chan->base.chid.user);
185} 185}
186 186
187static void 187static void
@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
190 struct nv50_disp *disp = chan->base.root->disp; 190 struct nv50_disp *disp = chan->base.root->disp;
191 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 191 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
192 struct nvkm_device *device = subdev->device; 192 struct nvkm_device *device = subdev->device;
193 int chid = chan->base.chid; 193 int ctrl = chan->base.chid.ctrl;
194 int user = chan->base.chid.user;
194 195
195 /* deactivate channel */ 196 /* deactivate channel */
196 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); 197 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
197 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); 198 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
198 if (nvkm_msec(device, 2000, 199 if (nvkm_msec(device, 2000,
199 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) 200 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
200 break; 201 break;
201 ) < 0) { 202 ) < 0) {
202 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, 203 nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
203 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 204 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
204 } 205 }
205 206
206 /* disable error reporting and completion notifications */ 207 /* disable error reporting and completion notifications */
207 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 208 nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
208} 209}
209 210
210static int 211static int
@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
213 struct nv50_disp *disp = chan->base.root->disp; 214 struct nv50_disp *disp = chan->base.root->disp;
214 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 215 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
215 struct nvkm_device *device = subdev->device; 216 struct nvkm_device *device = subdev->device;
216 int chid = chan->base.chid; 217 int ctrl = chan->base.chid.ctrl;
218 int user = chan->base.chid.user;
217 219
218 /* enable error reporting */ 220 /* enable error reporting */
219 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); 221 nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
220 222
221 /* initialise channel for dma command submission */ 223 /* initialise channel for dma command submission */
222 nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push); 224 nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
223 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); 225 nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
224 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); 226 nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
225 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); 227 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
226 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 228 nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
227 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); 229 nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
228 230
229 /* wait for it to go inactive */ 231 /* wait for it to go inactive */
230 if (nvkm_msec(device, 2000, 232 if (nvkm_msec(device, 2000,
231 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) 233 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
232 break; 234 break;
233 ) < 0) { 235 ) < 0) {
234 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, 236 nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
235 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 237 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
236 return -EBUSY; 238 return -EBUSY;
237 } 239 }
238 240
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a9876e34..0abaa6431943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
32 struct nv50_disp *disp = chan->root->disp; 32 struct nv50_disp *disp = chan->root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 34 struct nvkm_device *device = subdev->device;
35 int chid = chan->chid; 35 int ctrl = chan->chid.ctrl;
36 int user = chan->chid.user;
36 37
37 nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); 38 nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
38 if (nvkm_msec(device, 2000, 39 if (nvkm_msec(device, 2000,
39 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) 40 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
40 break; 41 break;
41 ) < 0) { 42 ) < 0) {
42 nvkm_error(subdev, "ch %d fini: %08x\n", chid, 43 nvkm_error(subdev, "ch %d fini: %08x\n", user,
43 nvkm_rd32(device, 0x610490 + (chid * 0x10))); 44 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
44 } 45 }
45 46
46 /* disable error reporting and completion notification */ 47 /* disable error reporting and completion notification */
47 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); 48 nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
48 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); 49 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
49} 50}
50 51
51static int 52static int
@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
54 struct nv50_disp *disp = chan->root->disp; 55 struct nv50_disp *disp = chan->root->disp;
55 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 56 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
56 struct nvkm_device *device = subdev->device; 57 struct nvkm_device *device = subdev->device;
57 int chid = chan->chid; 58 int ctrl = chan->chid.ctrl;
59 int user = chan->chid.user;
58 60
59 /* enable error reporting */ 61 /* enable error reporting */
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); 62 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
61 63
62 /* activate channel */ 64 /* activate channel */
63 nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); 65 nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
64 if (nvkm_msec(device, 2000, 66 if (nvkm_msec(device, 2000,
65 u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); 67 u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
66 if ((tmp & 0x00030000) == 0x00010000) 68 if ((tmp & 0x00030000) == 0x00010000)
67 break; 69 break;
68 ) < 0) { 70 ) < 0) {
69 nvkm_error(subdev, "ch %d init: %08x\n", chid, 71 nvkm_error(subdev, "ch %d init: %08x\n", user,
70 nvkm_rd32(device, 0x610490 + (chid * 0x10))); 72 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
71 return -EBUSY; 73 return -EBUSY;
72 } 74 }
73 75
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618dacf20..0211e0e8a35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
32 struct nv50_disp *disp = chan->root->disp; 32 struct nv50_disp *disp = chan->root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 34 struct nvkm_device *device = subdev->device;
35 int chid = chan->chid; 35 int ctrl = chan->chid.ctrl;
36 int user = chan->chid.user;
36 37
37 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); 38 nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
38 if (nvkm_msec(device, 2000, 39 if (nvkm_msec(device, 2000,
39 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 40 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
40 break; 41 break;
41 ) < 0) { 42 ) < 0) {
42 nvkm_error(subdev, "ch %d timeout: %08x\n", chid, 43 nvkm_error(subdev, "ch %d timeout: %08x\n", user,
43 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 44 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
44 } 45 }
45} 46}
46 47
@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
50 struct nv50_disp *disp = chan->root->disp; 51 struct nv50_disp *disp = chan->root->disp;
51 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 52 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
52 struct nvkm_device *device = subdev->device; 53 struct nvkm_device *device = subdev->device;
53 int chid = chan->chid; 54 int ctrl = chan->chid.ctrl;
55 int user = chan->chid.user;
54 56
55 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); 57 nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
56 if (nvkm_msec(device, 2000, 58 if (nvkm_msec(device, 2000,
57 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 59 if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
58 break; 60 break;
59 ) < 0) { 61 ) < 0) {
60 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, 62 nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
61 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 63 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
62 return -EBUSY; 64 return -EBUSY;
63 } 65 }
64 66
65 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); 67 nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
66 if (nvkm_msec(device, 2000, 68 if (nvkm_msec(device, 2000,
67 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); 69 u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
68 if ((tmp & 0x00030000) == 0x00010000) 70 if ((tmp & 0x00030000) == 0x00010000)
69 break; 71 break;
70 ) < 0) { 72 ) < 0) {
71 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, 73 nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
72 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 74 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
73 return -EBUSY; 75 return -EBUSY;
74 } 76 }
75 77