aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-10-06 11:49:30 -0400
committerBen Skeggs <bskeggs@redhat.com>2016-10-12 03:29:34 -0400
commit2ecf7c43d78093a24aa44c0a14a335457f065bb2 (patch)
tree57fe01b56fe3679c52bcc2425ceb888b7f8ff18f
parentebf7655aebe6a4e339a269130b399f5f7b0bf4b9 (diff)
drm/nouveau/fb/nv50: defer DMA mapping of scratch page to oneinit() hook
The 100c08 scratch page is mapped using dma_map_page() before the TTM layer has had a chance to set the DMA mask. This means we are still running with the default of 32 when this code executes, and this causes problems for platforms with no memory below 4 GB (such as AMD Seattle) So move the dma_map_page() to the .oneinit hook, which executes after the DMA mask has been set. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
210 nvkm_fifo_chan_put(fifo, flags, &chan); 210 nvkm_fifo_chan_put(fifo, flags, &chan);
211} 211}
212 212
213static int
214nv50_fb_oneinit(struct nvkm_fb *base)
215{
216 struct nv50_fb *fb = nv50_fb(base);
217 struct nvkm_device *device = fb->base.subdev.device;
218
219 fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
220 if (fb->r100c08_page) {
221 fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
222 PAGE_SIZE, DMA_BIDIRECTIONAL);
223 if (dma_mapping_error(device->dev, fb->r100c08))
224 return -EFAULT;
225 }
226
227 return 0;
228}
229
213static void 230static void
214nv50_fb_init(struct nvkm_fb *base) 231nv50_fb_init(struct nvkm_fb *base)
215{ 232{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
245static const struct nvkm_fb_func 262static const struct nvkm_fb_func
246nv50_fb_ = { 263nv50_fb_ = {
247 .dtor = nv50_fb_dtor, 264 .dtor = nv50_fb_dtor,
265 .oneinit = nv50_fb_oneinit,
248 .init = nv50_fb_init, 266 .init = nv50_fb_init,
249 .intr = nv50_fb_intr, 267 .intr = nv50_fb_intr,
250 .ram_new = nv50_fb_ram_new, 268 .ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
263 fb->func = func; 281 fb->func = func;
264 *pfb = &fb->base; 282 *pfb = &fb->base;
265 283
266 fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
267 if (fb->r100c08_page) {
268 fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
269 PAGE_SIZE, DMA_BIDIRECTIONAL);
270 if (dma_mapping_error(device->dev, fb->r100c08))
271 return -EFAULT;
272 } else {
273 nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
274 }
275
276 return 0; 284 return 0;
277} 285}
278 286