aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c33
2 files changed, 58 insertions, 21 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index deb96de54b00..ee2431a7804e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
71 struct nvkm_pci *pci = arg; 71 struct nvkm_pci *pci = arg;
72 struct nvkm_device *device = pci->subdev.device; 72 struct nvkm_device *device = pci->subdev.device;
73 bool handled = false; 73 bool handled = false;
74
75 if (pci->irq < 0)
76 return IRQ_HANDLED;
77
74 nvkm_mc_intr_unarm(device); 78 nvkm_mc_intr_unarm(device);
75 if (pci->msi) 79 if (pci->msi)
76 pci->func->msi_rearm(pci); 80 pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
84{ 88{
85 struct nvkm_pci *pci = nvkm_pci(subdev); 89 struct nvkm_pci *pci = nvkm_pci(subdev);
86 90
87 if (pci->irq >= 0) {
88 free_irq(pci->irq, pci);
89 pci->irq = -1;
90 }
91
92 if (pci->agp.bridge) 91 if (pci->agp.bridge)
93 nvkm_agp_fini(pci); 92 nvkm_agp_fini(pci);
94 93
@@ -108,8 +107,20 @@ static int
108nvkm_pci_oneinit(struct nvkm_subdev *subdev) 107nvkm_pci_oneinit(struct nvkm_subdev *subdev)
109{ 108{
110 struct nvkm_pci *pci = nvkm_pci(subdev); 109 struct nvkm_pci *pci = nvkm_pci(subdev);
111 if (pci_is_pcie(pci->pdev)) 110 struct pci_dev *pdev = pci->pdev;
112 return nvkm_pcie_oneinit(pci); 111 int ret;
112
113 if (pci_is_pcie(pci->pdev)) {
114 ret = nvkm_pcie_oneinit(pci);
115 if (ret)
116 return ret;
117 }
118
119 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
120 if (ret)
121 return ret;
122
123 pci->irq = pdev->irq;
113 return 0; 124 return 0;
114} 125}
115 126
@@ -117,7 +128,6 @@ static int
117nvkm_pci_init(struct nvkm_subdev *subdev) 128nvkm_pci_init(struct nvkm_subdev *subdev)
118{ 129{
119 struct nvkm_pci *pci = nvkm_pci(subdev); 130 struct nvkm_pci *pci = nvkm_pci(subdev);
120 struct pci_dev *pdev = pci->pdev;
121 int ret; 131 int ret;
122 132
123 if (pci->agp.bridge) { 133 if (pci->agp.bridge) {
@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
131 if (pci->func->init) 141 if (pci->func->init)
132 pci->func->init(pci); 142 pci->func->init(pci);
133 143
134 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
135 if (ret)
136 return ret;
137
138 pci->irq = pdev->irq;
139
140 /* Ensure MSI interrupts are armed, for the case where there are 144 /* Ensure MSI interrupts are armed, for the case where there are
141 * already interrupts pending (for whatever reason) at load time. 145 * already interrupts pending (for whatever reason) at load time.
142 */ 146 */
143 if (pci->msi) 147 if (pci->msi)
144 pci->func->msi_rearm(pci); 148 pci->func->msi_rearm(pci);
145 149
146 return ret; 150 return 0;
147} 151}
148 152
149static void * 153static void *
150nvkm_pci_dtor(struct nvkm_subdev *subdev) 154nvkm_pci_dtor(struct nvkm_subdev *subdev)
151{ 155{
152 struct nvkm_pci *pci = nvkm_pci(subdev); 156 struct nvkm_pci *pci = nvkm_pci(subdev);
157
153 nvkm_agp_dtor(pci); 158 nvkm_agp_dtor(pci);
159
160 if (pci->irq >= 0) {
161 /* freq_irq() will call the handler, we use pci->irq == -1
162 * to signal that it's been torn down and should be a noop.
163 */
164 int irq = pci->irq;
165 pci->irq = -1;
166 free_irq(irq, pci);
167 }
168
154 if (pci->msi) 169 if (pci->msi)
155 pci_disable_msi(pci->pdev); 170 pci_disable_msi(pci->pdev);
171
156 return nvkm_pci(subdev); 172 return nvkm_pci(subdev);
157} 173}
158 174
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 638540943c61..c94cce96544c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
146 struct vc4_exec_info *exec[2]; 146 struct vc4_exec_info *exec[2];
147 struct vc4_bo *bo; 147 struct vc4_bo *bo;
148 unsigned long irqflags; 148 unsigned long irqflags;
149 unsigned int i, j, unref_list_count, prev_idx; 149 unsigned int i, j, k, unref_list_count;
150 150
151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); 151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
152 if (!kernel_state) 152 if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
182 return; 182 return;
183 } 183 }
184 184
185 prev_idx = 0; 185 k = 0;
186 for (i = 0; i < 2; i++) { 186 for (i = 0; i < 2; i++) {
187 if (!exec[i]) 187 if (!exec[i])
188 continue; 188 continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
197 WARN_ON(!refcount_read(&bo->usecnt)); 197 WARN_ON(!refcount_read(&bo->usecnt));
198 refcount_inc(&bo->usecnt); 198 refcount_inc(&bo->usecnt);
199 drm_gem_object_get(&exec[i]->bo[j]->base); 199 drm_gem_object_get(&exec[i]->bo[j]->base);
200 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; 200 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
201 } 201 }
202 202
203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { 203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
205 * because they are naturally unpurgeable. 205 * because they are naturally unpurgeable.
206 */ 206 */
207 drm_gem_object_get(&bo->base.base); 207 drm_gem_object_get(&bo->base.base);
208 kernel_state->bo[j + prev_idx] = &bo->base.base; 208 kernel_state->bo[k++] = &bo->base.base;
209 j++;
210 } 209 }
211 prev_idx = j + 1;
212 } 210 }
213 211
212 WARN_ON_ONCE(k != state->bo_count);
213
214 if (exec[0]) 214 if (exec[0])
215 state->start_bin = exec[0]->ct0ca; 215 state->start_bin = exec[0]->ct0ca;
216 if (exec[1]) 216 if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
436 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); 436 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
437} 437}
438 438
439static void
440vc4_flush_texture_caches(struct drm_device *dev)
441{
442 struct vc4_dev *vc4 = to_vc4_dev(dev);
443
444 V3D_WRITE(V3D_L2CACTL,
445 V3D_L2CACTL_L2CCLR);
446
447 V3D_WRITE(V3D_SLCACTL,
448 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
449 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
450}
451
439/* Sets the registers for the next job to be actually be executed in 452/* Sets the registers for the next job to be actually be executed in
440 * the hardware. 453 * the hardware.
441 * 454 *
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
474 if (!exec) 487 if (!exec)
475 return; 488 return;
476 489
490 /* A previous RCL may have written to one of our textures, and
491 * our full cache flush at bin time may have occurred before
492 * that RCL completed. Flush the texture cache now, but not
493 * the instructions or uniforms (since we don't write those
494 * from an RCL).
495 */
496 vc4_flush_texture_caches(dev);
497
477 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); 498 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
478} 499}
479 500