aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-07-14 05:09:17 -0400
committerBen Skeggs <bskeggs@redhat.com>2012-10-02 23:12:52 -0400
commit3863c9bc887e9638a9d905d55f6038641ece78d6 (patch)
tree923decce50fc9f0ed28e04d5ad83d6518162bad0 /drivers/gpu/drm
parent8a9b889e668a5bc2f4031015fe4893005c43403d (diff)
drm/nouveau/instmem: completely new implementation, as a subdev module
v2 (Ben Skeggs): - some fixes for 64KiB PAGE_SIZE - fix porting issues in (currently unused) nv41/nv44 pciegart code Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h74
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h88
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c263
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c254
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c220
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c458
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nvc0.c222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c150
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c257
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c111
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.c145
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c327
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c315
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c142
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c5
69 files changed, 2777 insertions, 2169 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 68043a40d9ea..9a86ae3e6807 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,6 +9,7 @@ ccflags-y += -I$(src)
9nouveau-y := core/core/client.o 9nouveau-y := core/core/client.o
10nouveau-y += core/core/engine.o 10nouveau-y += core/core/engine.o
11nouveau-y += core/core/enum.o 11nouveau-y += core/core/enum.o
12nouveau-y += core/core/gpuobj.o
12nouveau-y += core/core/handle.o 13nouveau-y += core/core/handle.o
13nouveau-y += core/core/mm.o 14nouveau-y += core/core/mm.o
14nouveau-y += core/core/namedb.o 15nouveau-y += core/core/namedb.o
@@ -19,6 +20,9 @@ nouveau-y += core/core/printk.o
19nouveau-y += core/core/ramht.o 20nouveau-y += core/core/ramht.o
20nouveau-y += core/core/subdev.o 21nouveau-y += core/core/subdev.o
21 22
23nouveau-y += core/subdev/bar/base.o
24nouveau-y += core/subdev/bar/nv50.o
25nouveau-y += core/subdev/bar/nvc0.o
22nouveau-y += core/subdev/bios/base.o 26nouveau-y += core/subdev/bios/base.o
23nouveau-y += core/subdev/bios/bit.o 27nouveau-y += core/subdev/bios/bit.o
24nouveau-y += core/subdev/bios/conn.o 28nouveau-y += core/subdev/bios/conn.o
@@ -66,10 +70,10 @@ nouveau-y += core/subdev/gpio/nvd0.o
66nouveau-y += core/subdev/i2c/base.o 70nouveau-y += core/subdev/i2c/base.o
67nouveau-y += core/subdev/i2c/aux.o 71nouveau-y += core/subdev/i2c/aux.o
68nouveau-y += core/subdev/i2c/bit.o 72nouveau-y += core/subdev/i2c/bit.o
73nouveau-y += core/subdev/instmem/base.o
69nouveau-y += core/subdev/instmem/nv04.o 74nouveau-y += core/subdev/instmem/nv04.o
70nouveau-y += core/subdev/instmem/nv40.o 75nouveau-y += core/subdev/instmem/nv40.o
71nouveau-y += core/subdev/instmem/nv50.o 76nouveau-y += core/subdev/instmem/nv50.o
72nouveau-y += core/subdev/instmem/nvc0.o
73nouveau-y += core/subdev/ltcg/nvc0.o 77nouveau-y += core/subdev/ltcg/nvc0.o
74nouveau-y += core/subdev/mc/base.o 78nouveau-y += core/subdev/mc/base.o
75nouveau-y += core/subdev/mc/nv04.o 79nouveau-y += core/subdev/mc/nv04.o
@@ -80,6 +84,9 @@ nouveau-y += core/subdev/mc/nvc0.o
80nouveau-y += core/subdev/timer/base.o 84nouveau-y += core/subdev/timer/base.o
81nouveau-y += core/subdev/timer/nv04.o 85nouveau-y += core/subdev/timer/nv04.o
82nouveau-y += core/subdev/vm/base.o 86nouveau-y += core/subdev/vm/base.o
87nouveau-y += core/subdev/vm/nv04.o
88nouveau-y += core/subdev/vm/nv41.o
89nouveau-y += core/subdev/vm/nv44.o
83nouveau-y += core/subdev/vm/nv50.o 90nouveau-y += core/subdev/vm/nv50.o
84nouveau-y += core/subdev/vm/nvc0.o 91nouveau-y += core/subdev/vm/nvc0.o
85 92
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 59c16192f0e4..5c22864fbd2c 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -86,7 +86,6 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
86{ 86{
87 struct drm_device *dev = chan->dev; 87 struct drm_device *dev = chan->dev;
88 struct drm_nouveau_private *dev_priv = dev->dev_private; 88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90 struct nouveau_ramht_entry *entry; 89 struct nouveau_ramht_entry *entry;
91 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 90 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
92 unsigned long flags; 91 unsigned long flags;
@@ -104,21 +103,21 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); 103 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105 104
106 if (dev_priv->card_type < NV_40) { 105 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) | 106 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | 107 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); 108 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else 109 } else
111 if (dev_priv->card_type < NV_50) { 110 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->pinst >> 4) | 111 ctx = (gpuobj->addr >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | 112 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); 113 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else { 114 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { 115 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | 116 ctx = (gpuobj->node->offset << 10) |
118 (chan->id << 28) | 117 (chan->id << 28) |
119 chan->id; /* HASH_TAG */ 118 chan->id; /* HASH_TAG */
120 } else { 119 } else {
121 ctx = (gpuobj->cinst >> 4) | 120 ctx = (gpuobj->node->offset >> 4) |
122 ((gpuobj->engine << 121 ((gpuobj->engine <<
123 NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); 122 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
124 } 123 }
@@ -137,7 +136,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
137 nv_wo32(ramht, co + 4, ctx); 136 nv_wo32(ramht, co + 4, ctx);
138 137
139 spin_unlock_irqrestore(&chan->ramht->lock, flags); 138 spin_unlock_irqrestore(&chan->ramht->lock, flags);
140 instmem->flush(dev); 139 nvimem_flush(dev);
141 return 0; 140 return 0;
142 } 141 }
143 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", 142 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -184,8 +183,6 @@ static void
184nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) 183nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
185{ 184{
186 struct drm_device *dev = chan->dev; 185 struct drm_device *dev = chan->dev;
187 struct drm_nouveau_private *dev_priv = dev->dev_private;
188 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
189 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 186 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
190 unsigned long flags; 187 unsigned long flags;
191 u32 co, ho; 188 u32 co, ho;
@@ -201,7 +198,7 @@ nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
201 chan->id, co, handle, nv_ro32(ramht, co + 4)); 198 chan->id, co, handle, nv_ro32(ramht, co + 4));
202 nv_wo32(ramht, co + 0, 0x00000000); 199 nv_wo32(ramht, co + 0, 0x00000000);
203 nv_wo32(ramht, co + 4, 0x00000000); 200 nv_wo32(ramht, co + 4, 0x00000000);
204 instmem->flush(dev); 201 nvimem_flush(dev);
205 goto out; 202 goto out;
206 } 203 }
207 204
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index db3b57369830..4b809319e831 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -25,7 +25,6 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include <subdev/vm.h>
29#include <core/ramht.h> 28#include <core/ramht.h>
30 29
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes 30/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index fec52463456a..9150c5ed16c3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -26,7 +26,6 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_util.h" 28#include "nouveau_util.h"
29#include <subdev/vm.h>
30#include <core/ramht.h> 29#include <core/ramht.h>
31#include "fuc/nva3.fuc.h" 30#include "fuc/nva3.fuc.h"
32 31
@@ -38,7 +37,6 @@ static int
38nva3_copy_context_new(struct nouveau_channel *chan, int engine) 37nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39{ 38{
40 struct drm_device *dev = chan->dev; 39 struct drm_device *dev = chan->dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *ramin = chan->ramin; 40 struct nouveau_gpuobj *ramin = chan->ramin;
43 struct nouveau_gpuobj *ctx = NULL; 41 struct nouveau_gpuobj *ctx = NULL;
44 int ret; 42 int ret;
@@ -51,14 +49,14 @@ nva3_copy_context_new(struct nouveau_channel *chan, int engine)
51 return ret; 49 return ret;
52 50
53 nv_wo32(ramin, 0xc0, 0x00190000); 51 nv_wo32(ramin, 0xc0, 0x00190000);
54 nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1); 52 nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
55 nv_wo32(ramin, 0xc8, ctx->vinst); 53 nv_wo32(ramin, 0xc8, ctx->addr);
56 nv_wo32(ramin, 0xcc, 0x00000000); 54 nv_wo32(ramin, 0xcc, 0x00000000);
57 nv_wo32(ramin, 0xd0, 0x00000000); 55 nv_wo32(ramin, 0xd0, 0x00000000);
58 nv_wo32(ramin, 0xd4, 0x00000000); 56 nv_wo32(ramin, 0xd4, 0x00000000);
59 dev_priv->engine.instmem.flush(dev); 57 nvimem_flush(dev);
60 58
61 atomic_inc(&chan->vm->engref[engine]); 59 nvvm_engref(chan->vm, engine, 1);
62 chan->engctx[engine] = ctx; 60 chan->engctx[engine] = ctx;
63 return 0; 61 return 0;
64} 62}
@@ -84,7 +82,7 @@ nva3_copy_context_del(struct nouveau_channel *chan, int engine)
84 for (i = 0xc0; i <= 0xd4; i += 4) 82 for (i = 0xc0; i <= 0xd4; i += 4)
85 nv_wo32(chan->ramin, i, 0x00000000); 83 nv_wo32(chan->ramin, i, 0x00000000);
86 84
87 atomic_dec(&chan->vm->engref[engine]); 85 nvvm_engref(chan->vm, engine, -1);
88 nouveau_gpuobj_ref(NULL, &ctx); 86 nouveau_gpuobj_ref(NULL, &ctx);
89 chan->engctx[engine] = ctx; 87 chan->engctx[engine] = ctx;
90} 88}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 926f21c0ebce..f39de5a593d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -26,7 +26,6 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_util.h" 28#include "nouveau_util.h"
29#include <subdev/vm.h>
30#include <core/ramht.h> 29#include <core/ramht.h>
31#include "fuc/nvc0.fuc.h" 30#include "fuc/nvc0.fuc.h"
32 31
@@ -49,7 +48,6 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
49 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); 48 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
50 struct nvc0_copy_chan *cctx; 49 struct nvc0_copy_chan *cctx;
51 struct drm_device *dev = chan->dev; 50 struct drm_device *dev = chan->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_gpuobj *ramin = chan->ramin; 51 struct nouveau_gpuobj *ramin = chan->ramin;
54 int ret; 52 int ret;
55 53
@@ -62,14 +60,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
62 if (ret) 60 if (ret)
63 return ret; 61 return ret;
64 62
65 ret = nouveau_gpuobj_map_vm(cctx->mem, NV_MEM_ACCESS_RW, chan->vm, 63 ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW,
66 &cctx->vma); 64 &cctx->vma);
67 if (ret) 65 if (ret)
68 return ret; 66 return ret;
69 67
70 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset)); 68 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
71 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset)); 69 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
72 dev_priv->engine.instmem.flush(dev); 70 nvimem_flush(dev);
73 return 0; 71 return 0;
74} 72}
75 73
@@ -88,7 +86,7 @@ nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
88 struct drm_device *dev = chan->dev; 86 struct drm_device *dev = chan->dev;
89 u32 inst; 87 u32 inst;
90 88
91 inst = (chan->ramin->vinst >> 12); 89 inst = (chan->ramin->addr >> 12);
92 inst |= 0x40000000; 90 inst |= 0x40000000;
93 91
94 /* disable fifo access */ 92 /* disable fifo access */
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 388138946ad9..63051ab0ecca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -25,7 +25,6 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include <subdev/vm.h>
29#include <core/ramht.h> 28#include <core/ramht.h>
30 29
31struct nv84_crypt_engine { 30struct nv84_crypt_engine {
@@ -36,7 +35,6 @@ static int
36nv84_crypt_context_new(struct nouveau_channel *chan, int engine) 35nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
37{ 36{
38 struct drm_device *dev = chan->dev; 37 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 struct nouveau_gpuobj *ramin = chan->ramin; 38 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx; 39 struct nouveau_gpuobj *ctx;
42 int ret; 40 int ret;
@@ -49,14 +47,14 @@ nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
49 return ret; 47 return ret;
50 48
51 nv_wo32(ramin, 0xa0, 0x00190000); 49 nv_wo32(ramin, 0xa0, 0x00190000);
52 nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1); 50 nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1);
53 nv_wo32(ramin, 0xa8, ctx->vinst); 51 nv_wo32(ramin, 0xa8, ctx->addr);
54 nv_wo32(ramin, 0xac, 0); 52 nv_wo32(ramin, 0xac, 0);
55 nv_wo32(ramin, 0xb0, 0); 53 nv_wo32(ramin, 0xb0, 0);
56 nv_wo32(ramin, 0xb4, 0); 54 nv_wo32(ramin, 0xb4, 0);
57 dev_priv->engine.instmem.flush(dev); 55 nvimem_flush(dev);
58 56
59 atomic_inc(&chan->vm->engref[engine]); 57 nvvm_engref(chan->vm, engine, 1);
60 chan->engctx[engine] = ctx; 58 chan->engctx[engine] = ctx;
61 return 0; 59 return 0;
62} 60}
@@ -68,7 +66,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
68 struct drm_device *dev = chan->dev; 66 struct drm_device *dev = chan->dev;
69 u32 inst; 67 u32 inst;
70 68
71 inst = (chan->ramin->vinst >> 12); 69 inst = (chan->ramin->addr >> 12);
72 inst |= 0x80000000; 70 inst |= 0x80000000;
73 71
74 /* mark context as invalid if still on the hardware, not 72 /* mark context as invalid if still on the hardware, not
@@ -84,7 +82,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
84 82
85 nouveau_gpuobj_ref(NULL, &ctx); 83 nouveau_gpuobj_ref(NULL, &ctx);
86 84
87 atomic_dec(&chan->vm->engref[engine]); 85 nvvm_engref(chan->vm, engine, -1);
88 chan->engctx[engine] = NULL; 86 chan->engctx[engine] = NULL;
89} 87}
90 88
@@ -93,7 +91,6 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class) 91 u32 handle, u16 class)
94{ 92{
95 struct drm_device *dev = chan->dev; 93 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL; 94 struct nouveau_gpuobj *obj = NULL;
98 int ret; 95 int ret;
99 96
@@ -104,7 +101,7 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
104 obj->class = class; 101 obj->class = class;
105 102
106 nv_wo32(obj, 0x00, class); 103 nv_wo32(obj, 0x00, class);
107 dev_priv->engine.instmem.flush(dev); 104 nvimem_flush(dev);
108 105
109 ret = nouveau_ramht_insert(chan, handle, obj); 106 ret = nouveau_ramht_insert(chan, handle, obj);
110 nouveau_gpuobj_ref(NULL, &obj); 107 nouveau_gpuobj_ref(NULL, &obj);
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 563f60d1ce6e..c9adc1b8a7db 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,7 +26,6 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_util.h" 28#include "nouveau_util.h"
29#include <subdev/vm.h>
30#include <core/ramht.h> 29#include <core/ramht.h>
31 30
32#include "fuc/nv98.fuc.h" 31#include "fuc/nv98.fuc.h"
@@ -43,7 +42,6 @@ static int
43nv98_crypt_context_new(struct nouveau_channel *chan, int engine) 42nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
44{ 43{
45 struct drm_device *dev = chan->dev; 44 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv98_crypt_priv *priv = nv_engine(dev, engine); 45 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
48 struct nv98_crypt_chan *cctx; 46 struct nv98_crypt_chan *cctx;
49 int ret; 47 int ret;
@@ -52,7 +50,7 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
52 if (!cctx) 50 if (!cctx)
53 return -ENOMEM; 51 return -ENOMEM;
54 52
55 atomic_inc(&chan->vm->engref[engine]); 53 nvvm_engref(chan->vm, engine, 1);
56 54
57 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | 55 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &cctx->mem); 56 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
@@ -60,12 +58,12 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
60 goto error; 58 goto error;
61 59
62 nv_wo32(chan->ramin, 0xa0, 0x00190000); 60 nv_wo32(chan->ramin, 0xa0, 0x00190000);
63 nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1); 61 nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
64 nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst); 62 nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
65 nv_wo32(chan->ramin, 0xac, 0x00000000); 63 nv_wo32(chan->ramin, 0xac, 0x00000000);
66 nv_wo32(chan->ramin, 0xb0, 0x00000000); 64 nv_wo32(chan->ramin, 0xb0, 0x00000000);
67 nv_wo32(chan->ramin, 0xb4, 0x00000000); 65 nv_wo32(chan->ramin, 0xb4, 0x00000000);
68 dev_priv->engine.instmem.flush(dev); 66 nvimem_flush(dev);
69 67
70error: 68error:
71 if (ret) 69 if (ret)
@@ -84,7 +82,7 @@ nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
84 82
85 nouveau_gpuobj_ref(NULL, &cctx->mem); 83 nouveau_gpuobj_ref(NULL, &cctx->mem);
86 84
87 atomic_dec(&chan->vm->engref[engine]); 85 nvvm_engref(chan->vm, engine, -1);
88 chan->engctx[engine] = NULL; 86 chan->engctx[engine] = NULL;
89 kfree(cctx); 87 kfree(cctx);
90} 88}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ba76cf094a6b..6ab7eb0dd9bb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -32,8 +32,6 @@
32#include <core/ramht.h> 32#include <core/ramht.h>
33#include "nouveau_software.h" 33#include "nouveau_software.h"
34 34
35#include <core/subdev/instmem/nv04.h>
36
37static struct ramfc_desc { 35static struct ramfc_desc {
38 unsigned bits:6; 36 unsigned bits:6;
39 unsigned ctxs:5; 37 unsigned ctxs:5;
@@ -120,7 +118,7 @@ nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
120 /* initialise default fifo context */ 118 /* initialise default fifo context */
121 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 119 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
122 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 120 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
123 nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->pinst >> 4); 121 nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4);
124 nv_wo32(priv->ramfc, fctx->ramfc + 0x10, 122 nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
125 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 123 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
126 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 124 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
@@ -203,9 +201,9 @@ nv04_fifo_init(struct drm_device *dev, int engine)
203 201
204 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 202 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
205 ((dev_priv->ramht->bits - 9) << 16) | 203 ((dev_priv->ramht->bits - 9) << 16) |
206 (dev_priv->ramht->gpuobj->pinst >> 8)); 204 (dev_priv->ramht->gpuobj->addr >> 8));
207 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8); 205 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
208 nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->pinst >> 8); 206 nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
209 207
210 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 208 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
211 209
@@ -486,15 +484,14 @@ int
486nv04_fifo_create(struct drm_device *dev) 484nv04_fifo_create(struct drm_device *dev)
487{ 485{
488 struct drm_nouveau_private *dev_priv = dev->dev_private; 486 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
490 struct nv04_fifo_priv *priv; 487 struct nv04_fifo_priv *priv;
491 488
492 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 489 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
493 if (!priv) 490 if (!priv)
494 return -ENOMEM; 491 return -ENOMEM;
495 492
496 nouveau_gpuobj_ref(imem->ramro, &priv->ramro); 493 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
497 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); 494 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
498 495
499 priv->base.base.destroy = nv04_fifo_destroy; 496 priv->base.base.destroy = nv04_fifo_destroy;
500 priv->base.base.init = nv04_fifo_init; 497 priv->base.base.init = nv04_fifo_init;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 0da287caf43f..2d38fa88f9c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -31,8 +31,6 @@
31#include "nouveau_util.h" 31#include "nouveau_util.h"
32#include <core/ramht.h> 32#include <core/ramht.h>
33 33
34#include <core/subdev/instmem/nv04.h>
35
36static struct ramfc_desc { 34static struct ramfc_desc {
37 unsigned bits:6; 35 unsigned bits:6;
38 unsigned ctxs:5; 36 unsigned ctxs:5;
@@ -91,7 +89,7 @@ nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
91 /* initialise default fifo context */ 89 /* initialise default fifo context */
92 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 90 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
93 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 91 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
94 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4); 92 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
95 nv_wo32(priv->ramfc, fctx->ramfc + 0x14, 93 nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
96 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 94 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
97 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 95 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
@@ -115,15 +113,14 @@ int
115nv10_fifo_create(struct drm_device *dev) 113nv10_fifo_create(struct drm_device *dev)
116{ 114{
117 struct drm_nouveau_private *dev_priv = dev->dev_private; 115 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
119 struct nv10_fifo_priv *priv; 116 struct nv10_fifo_priv *priv;
120 117
121 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 118 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
122 if (!priv) 119 if (!priv)
123 return -ENOMEM; 120 return -ENOMEM;
124 121
125 nouveau_gpuobj_ref(imem->ramro, &priv->ramro); 122 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
126 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); 123 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
127 124
128 priv->base.base.destroy = nv04_fifo_destroy; 125 priv->base.base.destroy = nv04_fifo_destroy;
129 priv->base.base.init = nv04_fifo_init; 126 priv->base.base.init = nv04_fifo_init;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index 99b88e0ef452..2f700a15e286 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -31,8 +31,6 @@
31#include "nouveau_util.h" 31#include "nouveau_util.h"
32#include <core/ramht.h> 32#include <core/ramht.h>
33 33
34#include <core/subdev/instmem/nv04.h>
35
36static struct ramfc_desc { 34static struct ramfc_desc {
37 unsigned bits:6; 35 unsigned bits:6;
38 unsigned ctxs:5; 36 unsigned ctxs:5;
@@ -96,7 +94,7 @@ nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
96 /* initialise default fifo context */ 94 /* initialise default fifo context */
97 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 95 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
98 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 96 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
99 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4); 97 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
100 nv_wo32(priv->ramfc, fctx->ramfc + 0x14, 98 nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
101 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 99 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 100 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
@@ -131,10 +129,10 @@ nv17_fifo_init(struct drm_device *dev, int engine)
131 129
132 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 130 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
133 ((dev_priv->ramht->bits - 9) << 16) | 131 ((dev_priv->ramht->bits - 9) << 16) |
134 (dev_priv->ramht->gpuobj->pinst >> 8)); 132 (dev_priv->ramht->gpuobj->addr >> 8));
135 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8); 133 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
136 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 | 134 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
137 priv->ramfc->pinst >> 8); 135 priv->ramfc->addr >> 8);
138 136
139 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 137 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
140 138
@@ -157,15 +155,14 @@ int
157nv17_fifo_create(struct drm_device *dev) 155nv17_fifo_create(struct drm_device *dev)
158{ 156{
159 struct drm_nouveau_private *dev_priv = dev->dev_private; 157 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
161 struct nv17_fifo_priv *priv; 158 struct nv17_fifo_priv *priv;
162 159
163 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 160 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
164 if (!priv) 161 if (!priv)
165 return -ENOMEM; 162 return -ENOMEM;
166 163
167 nouveau_gpuobj_ref(imem->ramro, &priv->ramro); 164 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
168 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); 165 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
169 166
170 priv->base.base.destroy = nv04_fifo_destroy; 167 priv->base.base.destroy = nv04_fifo_destroy;
171 priv->base.base.init = nv17_fifo_init; 168 priv->base.base.init = nv17_fifo_init;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index df53b9f27208..65a670f92a07 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -31,8 +31,6 @@
31#include "nouveau_util.h" 31#include "nouveau_util.h"
32#include <core/ramht.h> 32#include <core/ramht.h>
33 33
34#include <core/subdev/instmem/nv04.h>
35
36static struct ramfc_desc { 34static struct ramfc_desc {
37 unsigned bits:6; 35 unsigned bits:6;
38 unsigned ctxs:5; 36 unsigned ctxs:5;
@@ -104,7 +102,7 @@ nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
104 /* initialise default fifo context */ 102 /* initialise default fifo context */
105 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 103 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
106 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 104 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
107 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4); 105 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
108 nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 | 106 nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 |
109 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 107 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
110 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 108 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
@@ -144,8 +142,8 @@ nv40_fifo_init(struct drm_device *dev, int engine)
144 142
145 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 143 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
146 ((dev_priv->ramht->bits - 9) << 16) | 144 ((dev_priv->ramht->bits - 9) << 16) |
147 (dev_priv->ramht->gpuobj->pinst >> 8)); 145 (dev_priv->ramht->gpuobj->addr >> 8));
148 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8); 146 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
149 147
150 switch (dev_priv->chipset) { 148 switch (dev_priv->chipset) {
151 case 0x47: 149 case 0x47:
@@ -163,7 +161,7 @@ nv40_fifo_init(struct drm_device *dev, int engine)
163 default: 161 default:
164 nv_wr32(dev, 0x002230, 0x00000000); 162 nv_wr32(dev, 0x002230, 0x00000000);
165 nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 + 163 nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 +
166 priv->ramfc->pinst) >> 16) | 164 priv->ramfc->addr) >> 16) |
167 0x00030000); 165 0x00030000);
168 break; 166 break;
169 } 167 }
@@ -189,15 +187,14 @@ int
189nv40_fifo_create(struct drm_device *dev) 187nv40_fifo_create(struct drm_device *dev)
190{ 188{
191 struct drm_nouveau_private *dev_priv = dev->dev_private; 189 struct drm_nouveau_private *dev_priv = dev->dev_private;
192 struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
193 struct nv40_fifo_priv *priv; 190 struct nv40_fifo_priv *priv;
194 191
195 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 192 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
196 if (!priv) 193 if (!priv)
197 return -ENOMEM; 194 return -ENOMEM;
198 195
199 nouveau_gpuobj_ref(imem->ramro, &priv->ramro); 196 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
200 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); 197 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
201 198
202 priv->base.base.destroy = nv04_fifo_destroy; 199 priv->base.base.destroy = nv04_fifo_destroy;
203 priv->base.base.init = nv40_fifo_init; 200 priv->base.base.init = nv40_fifo_init;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 2309871704e7..7b5b1592bf61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -29,7 +29,6 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include <engine/fifo.h> 30#include <engine/fifo.h>
31#include <core/ramht.h> 31#include <core/ramht.h>
32#include <subdev/vm.h>
33 32
34struct nv50_fifo_priv { 33struct nv50_fifo_priv {
35 struct nouveau_fifo_priv base; 34 struct nouveau_fifo_priv base;
@@ -45,7 +44,6 @@ void
45nv50_fifo_playlist_update(struct drm_device *dev) 44nv50_fifo_playlist_update(struct drm_device *dev)
46{ 45{
47 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 46 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
48 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_gpuobj *cur; 47 struct nouveau_gpuobj *cur;
50 int i, p; 48 int i, p;
51 49
@@ -57,9 +55,9 @@ nv50_fifo_playlist_update(struct drm_device *dev)
57 nv_wo32(cur, p++ * 4, i); 55 nv_wo32(cur, p++ * 4, i);
58 } 56 }
59 57
60 dev_priv->engine.instmem.flush(dev); 58 nvimem_flush(dev);
61 59
62 nv_wr32(dev, 0x0032f4, cur->vinst >> 12); 60 nv_wr32(dev, 0x0032f4, cur->addr >> 12);
63 nv_wr32(dev, 0x0032ec, p); 61 nv_wr32(dev, 0x0032ec, p);
64 nv_wr32(dev, 0x002500, 0x00000101); 62 nv_wr32(dev, 0x002500, 0x00000101);
65} 63}
@@ -72,14 +70,14 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
72 struct drm_device *dev = chan->dev; 70 struct drm_device *dev = chan->dev;
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 71 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; 72 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
75 u64 instance = chan->ramin->vinst >> 12; 73 u64 instance = chan->ramin->addr >> 12;
76 unsigned long flags; 74 unsigned long flags;
77 int ret = 0, i; 75 int ret = 0, i;
78 76
79 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 77 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
80 if (!fctx) 78 if (!fctx)
81 return -ENOMEM; 79 return -ENOMEM;
82 atomic_inc(&chan->vm->engref[engine]); 80 nvvm_engref(chan->vm, engine, 1);
83 81
84 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 82 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
85 NV50_USER(chan->id), PAGE_SIZE); 83 NV50_USER(chan->id), PAGE_SIZE);
@@ -93,7 +91,7 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
93 nv_wo32(chan->ramin, 0x3c, 0x403f6078); 91 nv_wo32(chan->ramin, 0x3c, 0x403f6078);
94 nv_wo32(chan->ramin, 0x40, 0x00000000); 92 nv_wo32(chan->ramin, 0x40, 0x00000000);
95 nv_wo32(chan->ramin, 0x44, 0x01003fff); 93 nv_wo32(chan->ramin, 0x44, 0x01003fff);
96 nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4); 94 nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4);
97 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset)); 95 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
98 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) | 96 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
99 drm_order(chan->dma.ib_max + 1) << 16); 97 drm_order(chan->dma.ib_max + 1) << 16);
@@ -102,9 +100,9 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
102 nv_wo32(chan->ramin, 0x7c, 0x30000001); 100 nv_wo32(chan->ramin, 0x7c, 0x30000001);
103 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) | 101 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
104 (4 << 24) /* SEARCH_FULL */ | 102 (4 << 24) /* SEARCH_FULL */ |
105 (chan->ramht->gpuobj->cinst >> 4)); 103 (chan->ramht->gpuobj->node->offset >> 4));
106 104
107 dev_priv->engine.instmem.flush(dev); 105 nvimem_flush(dev);
108 106
109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
110 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); 108 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
@@ -141,7 +139,7 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
141 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001); 139 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
142 140
143 /* do the kickoff... */ 141 /* do the kickoff... */
144 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); 142 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
145 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { 143 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
146 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); 144 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
147 done = false; 145 done = false;
@@ -177,7 +175,7 @@ nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
177 chan->user = NULL; 175 chan->user = NULL;
178 } 176 }
179 177
180 atomic_dec(&chan->vm->engref[engine]); 178 nvvm_engref(chan->vm, engine, -1);
181 chan->engctx[engine] = NULL; 179 chan->engctx[engine] = NULL;
182 kfree(fctx); 180 kfree(fctx);
183} 181}
@@ -200,7 +198,7 @@ nv50_fifo_init(struct drm_device *dev, int engine)
200 for (i = 0; i < 128; i++) { 198 for (i = 0; i < 128; i++) {
201 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 199 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan && chan->engctx[engine]) 200 if (chan && chan->engctx[engine])
203 instance = 0x80000000 | chan->ramin->vinst >> 12; 201 instance = 0x80000000 | chan->ramin->addr >> 12;
204 else 202 else
205 instance = 0x00000000; 203 instance = 0x00000000;
206 nv_wr32(dev, 0x002600 + (i * 4), instance); 204 nv_wr32(dev, 0x002600 + (i * 4), instance);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index f505d1ed8866..63a4941e285c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -29,7 +29,6 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include <engine/fifo.h> 30#include <engine/fifo.h>
31#include <core/ramht.h> 31#include <core/ramht.h>
32#include <subdev/vm.h>
33 32
34struct nv84_fifo_priv { 33struct nv84_fifo_priv {
35 struct nouveau_fifo_priv base; 34 struct nouveau_fifo_priv base;
@@ -58,7 +57,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
58 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 57 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
59 if (!fctx) 58 if (!fctx)
60 return -ENOMEM; 59 return -ENOMEM;
61 atomic_inc(&chan->vm->engref[engine]); 60 nvvm_engref(chan->vm, engine, 1);
62 61
63 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 62 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
64 NV50_USER(chan->id), PAGE_SIZE); 63 NV50_USER(chan->id), PAGE_SIZE);
@@ -72,7 +71,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
72 if (ret) 71 if (ret)
73 goto error; 72 goto error;
74 73
75 instance = fctx->ramfc->vinst >> 8; 74 instance = fctx->ramfc->addr >> 8;
76 75
77 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache); 76 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
78 if (ret) 77 if (ret)
@@ -81,7 +80,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
81 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078); 80 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
82 nv_wo32(fctx->ramfc, 0x40, 0x00000000); 81 nv_wo32(fctx->ramfc, 0x40, 0x00000000);
83 nv_wo32(fctx->ramfc, 0x44, 0x01003fff); 82 nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
84 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4); 83 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4);
85 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset)); 84 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
86 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) | 85 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
87 drm_order(chan->dma.ib_max + 1) << 16); 86 drm_order(chan->dma.ib_max + 1) << 16);
@@ -90,14 +89,14 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
90 nv_wo32(fctx->ramfc, 0x7c, 0x30000001); 89 nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
91 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 90 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
92 (4 << 24) /* SEARCH_FULL */ | 91 (4 << 24) /* SEARCH_FULL */ |
93 (chan->ramht->gpuobj->cinst >> 4)); 92 (chan->ramht->gpuobj->node->offset >> 4));
94 nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10); 93 nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10);
95 nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12); 94 nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12);
96 95
97 nv_wo32(chan->ramin, 0x00, chan->id); 96 nv_wo32(chan->ramin, 0x00, chan->id);
98 nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8); 97 nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8);
99 98
100 dev_priv->engine.instmem.flush(dev); 99 nvimem_flush(dev);
101 100
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 101 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); 102 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
@@ -127,7 +126,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
127 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); 126 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
128 127
129 /* tell any engines on this channel to unload their contexts */ 128 /* tell any engines on this channel to unload their contexts */
130 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); 129 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
131 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) 130 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
132 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); 131 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
133 132
@@ -145,7 +144,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
145 nouveau_gpuobj_ref(NULL, &fctx->ramfc); 144 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
146 nouveau_gpuobj_ref(NULL, &fctx->cache); 145 nouveau_gpuobj_ref(NULL, &fctx->cache);
147 146
148 atomic_dec(&chan->vm->engref[engine]); 147 nvvm_engref(chan->vm, engine, -1);
149 chan->engctx[engine] = NULL; 148 chan->engctx[engine] = NULL;
150 kfree(fctx); 149 kfree(fctx);
151} 150}
@@ -169,7 +168,7 @@ nv84_fifo_init(struct drm_device *dev, int engine)
169 for (i = 0; i < 128; i++) { 168 for (i = 0; i < 128; i++) {
170 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 169 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
171 if (chan && (fctx = chan->engctx[engine])) 170 if (chan && (fctx = chan->engctx[engine]))
172 instance = 0x80000000 | fctx->ramfc->vinst >> 8; 171 instance = 0x80000000 | fctx->ramfc->addr >> 8;
173 else 172 else
174 instance = 0x00000000; 173 instance = 0x00000000;
175 nv_wr32(dev, 0x002600 + (i * 4), instance); 174 nv_wr32(dev, 0x002600 + (i * 4), instance);
@@ -200,7 +199,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
200 for (i = 0; i < priv->base.channels; i++) { 199 for (i = 0; i < priv->base.channels; i++) {
201 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 200 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan) 201 if (chan)
203 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); 202 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
204 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { 203 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
205 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i); 204 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
206 return -EBUSY; 205 return -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index f60221d9235d..6535a999015d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -48,8 +48,6 @@ struct nvc0_fifo_chan {
48static void 48static void
49nvc0_fifo_playlist_update(struct drm_device *dev) 49nvc0_fifo_playlist_update(struct drm_device *dev)
50{ 50{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
53 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
54 struct nouveau_gpuobj *cur; 52 struct nouveau_gpuobj *cur;
55 int i, p; 53 int i, p;
@@ -64,9 +62,9 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
64 nv_wo32(cur, p + 4, 0x00000004); 62 nv_wo32(cur, p + 4, 0x00000004);
65 p += 8; 63 p += 8;
66 } 64 }
67 pinstmem->flush(dev); 65 nvimem_flush(dev);
68 66
69 nv_wr32(dev, 0x002270, cur->vinst >> 12); 67 nv_wr32(dev, 0x002270, cur->addr >> 12);
70 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3)); 68 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
71 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000)) 69 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
72 NV_ERROR(dev, "PFIFO - playlist update failed\n"); 70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
@@ -76,11 +74,9 @@ static int
76nvc0_fifo_context_new(struct nouveau_channel *chan, int engine) 74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
77{ 75{
78 struct drm_device *dev = chan->dev; 76 struct drm_device *dev = chan->dev;
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
81 struct nvc0_fifo_priv *priv = nv_engine(dev, engine); 77 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
82 struct nvc0_fifo_chan *fctx; 78 struct nvc0_fifo_chan *fctx;
83 u64 usermem = priv->user.mem->vinst + chan->id * 0x1000; 79 u64 usermem = priv->user.mem->addr + chan->id * 0x1000;
84 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 80 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
85 int ret, i; 81 int ret, i;
86 82
@@ -115,10 +111,10 @@ nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
115 nv_wo32(chan->ramin, 0xb8, 0xf8000000); 111 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
116 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */ 112 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
117 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */ 113 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
118 pinstmem->flush(dev); 114 nvimem_flush(dev);
119 115
120 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | 116 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
121 (chan->ramin->vinst >> 12)); 117 (chan->ramin->addr >> 12));
122 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); 118 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
123 nvc0_fifo_playlist_update(dev); 119 nvc0_fifo_playlist_update(dev);
124 120
@@ -198,7 +194,7 @@ nvc0_fifo_init(struct drm_device *dev, int engine)
198 continue; 194 continue;
199 195
200 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 196 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
201 (chan->ramin->vinst >> 12)); 197 (chan->ramin->addr >> 12));
202 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001); 198 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
203 } 199 }
204 nvc0_fifo_playlist_update(dev); 200 nvc0_fifo_playlist_update(dev);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 1c06bde1bccf..461fbf62492d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -55,8 +55,6 @@ struct nve0_fifo_chan {
55static void 55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine) 56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{ 57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 58 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine]; 59 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur; 60 struct nouveau_gpuobj *cur;
@@ -84,9 +82,9 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
84 nv_wo32(cur, p + 4, 0x00000000); 82 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8; 83 p += 8;
86 } 84 }
87 pinstmem->flush(dev); 85 nvimem_flush(dev);
88 86
89 nv_wr32(dev, 0x002270, cur->vinst >> 12); 87 nv_wr32(dev, 0x002270, cur->addr >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3)); 88 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 89 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine); 90 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
@@ -96,11 +94,9 @@ static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine) 94nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97{ 95{
98 struct drm_device *dev = chan->dev; 96 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine); 97 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx; 98 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512; 99 u64 usermem = priv->user.mem->addr + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 100 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i; 101 int ret = 0, i;
106 102
@@ -135,10 +131,10 @@ nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
135 nv_wo32(chan->ramin, 0xe8, chan->id); 131 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */ 132 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */ 133 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev); 134 nvimem_flush(dev);
139 135
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 | 136 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12)); 137 (chan->ramin->addr >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 138 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine); 139 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 140 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
@@ -207,7 +203,7 @@ nve0_fifo_init(struct drm_device *dev, int engine)
207 continue; 203 continue;
208 204
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 | 205 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12)); 206 (chan->ramin->addr >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); 207 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine); 208 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); 209 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index dd31156e3029..0d874b8b18e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -52,7 +52,7 @@ nv20_graph_unload_context(struct drm_device *dev)
52 return 0; 52 return 0;
53 grctx = chan->engctx[NVOBJ_ENGINE_GR]; 53 grctx = chan->engctx[NVOBJ_ENGINE_GR];
54 54
55 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4); 55 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->addr >> 4);
56 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, 56 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
57 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); 57 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
58 58
@@ -437,7 +437,7 @@ nv20_graph_context_new(struct nouveau_channel *chan, int engine)
437 /* CTX_USER */ 437 /* CTX_USER */
438 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1); 438 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
439 439
440 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4); 440 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->addr >> 4);
441 chan->engctx[engine] = grctx; 441 chan->engctx[engine] = grctx;
442 return 0; 442 return 0;
443} 443}
@@ -505,7 +505,7 @@ nv20_graph_init(struct drm_device *dev, int engine)
505 nv_wr32(dev, NV03_PMC_ENABLE, 505 nv_wr32(dev, NV03_PMC_ENABLE,
506 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 506 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
507 507
508 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); 508 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
509 509
510 nv20_graph_rdi(dev); 510 nv20_graph_rdi(dev);
511 511
@@ -592,7 +592,7 @@ nv30_graph_init(struct drm_device *dev, int engine)
592 nv_wr32(dev, NV03_PMC_ENABLE, 592 nv_wr32(dev, NV03_PMC_ENABLE,
593 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 593 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
594 594
595 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); 595 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
596 596
597 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 597 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
598 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 598 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index ab3af6d15381..466d21514b2c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -52,16 +52,16 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
52 52
53 /* Initialise default context values */ 53 /* Initialise default context values */
54 nv40_grctx_fill(dev, grctx); 54 nv40_grctx_fill(dev, grctx);
55 nv_wo32(grctx, 0, grctx->vinst); 55 nv_wo32(grctx, 0, grctx->addr);
56 56
57 /* init grctx pointer in ramfc, and on PFIFO if channel is 57 /* init grctx pointer in ramfc, and on PFIFO if channel is
58 * already active there 58 * already active there
59 */ 59 */
60 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 60 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
61 nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); 61 nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
62 nv_mask(dev, 0x002500, 0x00000001, 0x00000000); 62 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
63 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) 63 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
64 nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); 64 nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
65 nv_mask(dev, 0x002500, 0x00000001, 0x00000001); 65 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
66 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 66 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
67 67
@@ -75,7 +75,7 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
75 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 75 struct nouveau_gpuobj *grctx = chan->engctx[engine];
76 struct drm_device *dev = chan->dev; 76 struct drm_device *dev = chan->dev;
77 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 u32 inst = 0x01000000 | (grctx->pinst >> 4); 78 u32 inst = 0x01000000 | (grctx->addr >> 4);
79 unsigned long flags; 79 unsigned long flags;
80 80
81 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 81 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -357,7 +357,7 @@ nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
357 continue; 357 continue;
358 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; 358 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
359 359
360 if (grctx && grctx->pinst == inst) 360 if (grctx && grctx->addr == inst)
361 break; 361 break;
362 } 362 }
363 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 363 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index b2e9ad4dda06..28932c4662e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -30,7 +30,6 @@
30#include <engine/fifo.h> 30#include <engine/fifo.h>
31#include <core/ramht.h> 31#include <core/ramht.h>
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include <subdev/vm.h>
34#include "nv50_evo.h" 33#include "nv50_evo.h"
35 34
36struct nv50_graph_engine { 35struct nv50_graph_engine {
@@ -155,18 +154,18 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
155 154
156 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 155 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
157 nv_wo32(ramin, hdr + 0x00, 0x00190002); 156 nv_wo32(ramin, hdr + 0x00, 0x00190002);
158 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1); 157 nv_wo32(ramin, hdr + 0x04, grctx->addr + grctx->size - 1);
159 nv_wo32(ramin, hdr + 0x08, grctx->vinst); 158 nv_wo32(ramin, hdr + 0x08, grctx->addr);
160 nv_wo32(ramin, hdr + 0x0c, 0); 159 nv_wo32(ramin, hdr + 0x0c, 0);
161 nv_wo32(ramin, hdr + 0x10, 0); 160 nv_wo32(ramin, hdr + 0x10, 0);
162 nv_wo32(ramin, hdr + 0x14, 0x00010000); 161 nv_wo32(ramin, hdr + 0x14, 0x00010000);
163 162
164 nv50_grctx_fill(dev, grctx); 163 nv50_grctx_fill(dev, grctx);
165 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); 164 nv_wo32(grctx, 0x00000, chan->ramin->addr >> 12);
166 165
167 dev_priv->engine.instmem.flush(dev); 166 nvimem_flush(dev);
168 167
169 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]); 168 nvvm_engref(chan->vm, engine, 1);
170 chan->engctx[NVOBJ_ENGINE_GR] = grctx; 169 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
171 return 0; 170 return 0;
172} 171}
@@ -181,9 +180,9 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
181 180
182 for (i = hdr; i < hdr + 24; i += 4) 181 for (i = hdr; i < hdr + 24; i += 4)
183 nv_wo32(chan->ramin, i, 0); 182 nv_wo32(chan->ramin, i, 0);
184 dev_priv->engine.instmem.flush(dev); 183 nvimem_flush(dev);
185 184
186 atomic_dec(&chan->vm->engref[engine]); 185 nvvm_engref(chan->vm, engine, -1);
187 nouveau_gpuobj_ref(NULL, &grctx); 186 nouveau_gpuobj_ref(NULL, &grctx);
188 chan->engctx[engine] = NULL; 187 chan->engctx[engine] = NULL;
189} 188}
@@ -193,7 +192,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
193 u32 handle, u16 class) 192 u32 handle, u16 class)
194{ 193{
195 struct drm_device *dev = chan->dev; 194 struct drm_device *dev = chan->dev;
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_gpuobj *obj = NULL; 195 struct nouveau_gpuobj *obj = NULL;
198 int ret; 196 int ret;
199 197
@@ -207,7 +205,7 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
207 nv_wo32(obj, 0x04, 0x00000000); 205 nv_wo32(obj, 0x04, 0x00000000);
208 nv_wo32(obj, 0x08, 0x00000000); 206 nv_wo32(obj, 0x08, 0x00000000);
209 nv_wo32(obj, 0x0c, 0x00000000); 207 nv_wo32(obj, 0x0c, 0x00000000);
210 dev_priv->engine.instmem.flush(dev); 208 nvimem_flush(dev);
211 209
212 ret = nouveau_ramht_insert(chan, handle, obj); 210 ret = nouveau_ramht_insert(chan, handle, obj);
213 nouveau_gpuobj_ref(NULL, &obj); 211 nouveau_gpuobj_ref(NULL, &obj);
@@ -723,7 +721,7 @@ nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
723 if (!chan || !chan->ramin) 721 if (!chan || !chan->ramin)
724 continue; 722 continue;
725 723
726 if (inst == chan->ramin->vinst) 724 if (inst == chan->ramin->addr)
727 break; 725 break;
728 } 726 }
729 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 727 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 8d6eb89b8117..b741b038f0fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -65,7 +65,7 @@ nvc0_graph_load_context(struct nouveau_channel *chan)
65 struct drm_device *dev = chan->dev; 65 struct drm_device *dev = chan->dev;
66 66
67 nv_wr32(dev, 0x409840, 0x00000030); 67 nv_wr32(dev, 0x409840, 0x00000030);
68 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 68 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
69 nv_wr32(dev, 0x409504, 0x00000003); 69 nv_wr32(dev, 0x409504, 0x00000003);
70 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) 70 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
71 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); 71 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
@@ -90,7 +90,6 @@ nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
90static int 90static int
91nvc0_graph_construct_context(struct nouveau_channel *chan) 91nvc0_graph_construct_context(struct nouveau_channel *chan)
92{ 92{
93 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
94 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 93 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
95 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 94 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
96 struct drm_device *dev = chan->dev; 95 struct drm_device *dev = chan->dev;
@@ -103,7 +102,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
103 102
104 if (!nouveau_ctxfw) { 103 if (!nouveau_ctxfw) {
105 nv_wr32(dev, 0x409840, 0x80000000); 104 nv_wr32(dev, 0x409840, 0x80000000);
106 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 105 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
107 nv_wr32(dev, 0x409504, 0x00000001); 106 nv_wr32(dev, 0x409504, 0x00000001);
108 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { 107 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
109 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); 108 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
@@ -118,7 +117,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
118 nv_wo32(grch->grctx, 0x20, 0); 117 nv_wo32(grch->grctx, 0x20, 0);
119 nv_wo32(grch->grctx, 0x28, 0); 118 nv_wo32(grch->grctx, 0x28, 0);
120 nv_wo32(grch->grctx, 0x2c, 0); 119 nv_wo32(grch->grctx, 0x2c, 0);
121 dev_priv->engine.instmem.flush(dev); 120 nvimem_flush(dev);
122 } 121 }
123 122
124 ret = nvc0_grctx_generate(chan); 123 ret = nvc0_grctx_generate(chan);
@@ -127,7 +126,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
127 126
128 if (!nouveau_ctxfw) { 127 if (!nouveau_ctxfw) {
129 nv_wr32(dev, 0x409840, 0x80000000); 128 nv_wr32(dev, 0x409840, 0x80000000);
130 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 129 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
131 nv_wr32(dev, 0x409504, 0x00000002); 130 nv_wr32(dev, 0x409504, 0x00000002);
132 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { 131 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
133 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n"); 132 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
@@ -136,7 +135,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
136 goto err; 135 goto err;
137 } 136 }
138 } else { 137 } else {
139 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); 138 ret = nvc0_graph_unload_context_to(dev, chan->ramin->addr);
140 if (ret) 139 if (ret)
141 goto err; 140 goto err;
142 } 141 }
@@ -165,8 +164,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
165 if (ret) 164 if (ret)
166 return ret; 165 return ret;
167 166
168 ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW | 167 ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
169 NV_MEM_ACCESS_SYS, chan->vm, 168 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
170 &grch->unk408004_vma); 169 &grch->unk408004_vma);
171 if (ret) 170 if (ret)
172 return ret; 171 return ret;
@@ -175,8 +174,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
175 if (ret) 174 if (ret)
176 return ret; 175 return ret;
177 176
178 ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW | 177 ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
179 NV_MEM_ACCESS_SYS, chan->vm, 178 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
180 &grch->unk40800c_vma); 179 &grch->unk40800c_vma);
181 if (ret) 180 if (ret)
182 return ret; 181 return ret;
@@ -186,8 +185,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
186 if (ret) 185 if (ret)
187 return ret; 186 return ret;
188 187
189 ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW, 188 ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
190 chan->vm, &grch->unk418810_vma); 189 NV_MEM_ACCESS_RW, &grch->unk418810_vma);
191 if (ret) 190 if (ret)
192 return ret; 191 return ret;
193 192
@@ -195,9 +194,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
195 if (ret) 194 if (ret)
196 return ret; 195 return ret;
197 196
198 ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW | 197 ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm, NV_MEM_ACCESS_RW |
199 NV_MEM_ACCESS_SYS, chan->vm, 198 NV_MEM_ACCESS_SYS, &grch->mmio_vma);
200 &grch->mmio_vma);
201 if (ret) 199 if (ret)
202 return ret; 200 return ret;
203 201
@@ -268,8 +266,6 @@ static int
268nvc0_graph_context_new(struct nouveau_channel *chan, int engine) 266nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
269{ 267{
270 struct drm_device *dev = chan->dev; 268 struct drm_device *dev = chan->dev;
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
272 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
273 struct nvc0_graph_priv *priv = nv_engine(dev, engine); 269 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
274 struct nvc0_graph_chan *grch; 270 struct nvc0_graph_chan *grch;
275 struct nouveau_gpuobj *grctx; 271 struct nouveau_gpuobj *grctx;
@@ -285,9 +281,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
285 if (ret) 281 if (ret)
286 goto error; 282 goto error;
287 283
288 ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW | 284 ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
289 NV_MEM_ACCESS_SYS, chan->vm, 285 NV_MEM_ACCESS_SYS, &grch->grctx_vma);
290 &grch->grctx_vma);
291 if (ret) 286 if (ret)
292 return ret; 287 return ret;
293 288
@@ -299,7 +294,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
299 294
300 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4); 295 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
301 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset)); 296 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
302 pinstmem->flush(dev); 297 nvimem_flush(dev);
303 298
304 if (!priv->grctx_vals) { 299 if (!priv->grctx_vals) {
305 ret = nvc0_graph_construct_context(chan); 300 ret = nvc0_graph_construct_context(chan);
@@ -324,7 +319,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
324 nv_wo32(grctx, 0x28, 0); 319 nv_wo32(grctx, 0x28, 0);
325 nv_wo32(grctx, 0x2c, 0); 320 nv_wo32(grctx, 0x2c, 0);
326 } 321 }
327 pinstmem->flush(dev); 322 nvimem_flush(dev);
328 return 0; 323 return 0;
329 324
330error: 325error:
@@ -373,8 +368,8 @@ nvc0_graph_init_obj418880(struct drm_device *dev)
373 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); 368 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
374 for (i = 0; i < 4; i++) 369 for (i = 0; i < 4; i++)
375 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); 370 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
376 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8); 371 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
377 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8); 372 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
378} 373}
379 374
380static void 375static void
@@ -662,7 +657,7 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
662 if (!chan || !chan->ramin) 657 if (!chan || !chan->ramin)
663 continue; 658 continue;
664 659
665 if (inst == chan->ramin->vinst) 660 if (inst == chan->ramin->addr)
666 break; 661 break;
667 } 662 }
668 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 663 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 5f671a21b8bb..47bda63ddbcd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -63,7 +63,7 @@ nve0_graph_load_context(struct nouveau_channel *chan)
63 struct drm_device *dev = chan->dev; 63 struct drm_device *dev = chan->dev;
64 64
65 nv_wr32(dev, 0x409840, 0x00000030); 65 nv_wr32(dev, 0x409840, 0x00000030);
66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); 66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
67 nv_wr32(dev, 0x409504, 0x00000003); 67 nv_wr32(dev, 0x409504, 0x00000003);
68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) 68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); 69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
@@ -88,7 +88,6 @@ nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
88static int 88static int
89nve0_graph_construct_context(struct nouveau_channel *chan) 89nve0_graph_construct_context(struct nouveau_channel *chan)
90{ 90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 91 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 92 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct drm_device *dev = chan->dev; 93 struct drm_device *dev = chan->dev;
@@ -105,13 +104,13 @@ nve0_graph_construct_context(struct nouveau_channel *chan)
105 nv_wo32(grch->grctx, 0x20, 0); 104 nv_wo32(grch->grctx, 0x20, 0);
106 nv_wo32(grch->grctx, 0x28, 0); 105 nv_wo32(grch->grctx, 0x28, 0);
107 nv_wo32(grch->grctx, 0x2c, 0); 106 nv_wo32(grch->grctx, 0x2c, 0);
108 dev_priv->engine.instmem.flush(dev); 107 nvimem_flush(dev);
109 108
110 ret = nve0_grctx_generate(chan); 109 ret = nve0_grctx_generate(chan);
111 if (ret) 110 if (ret)
112 goto err; 111 goto err;
113 112
114 ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst); 113 ret = nve0_graph_unload_context_to(dev, chan->ramin->addr);
115 if (ret) 114 if (ret)
116 goto err; 115 goto err;
117 116
@@ -141,8 +140,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
141 if (ret) 140 if (ret)
142 return ret; 141 return ret;
143 142
144 ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW | 143 ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
145 NV_MEM_ACCESS_SYS, chan->vm, 144 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
146 &grch->unk408004_vma); 145 &grch->unk408004_vma);
147 if (ret) 146 if (ret)
148 return ret; 147 return ret;
@@ -151,8 +150,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
151 if (ret) 150 if (ret)
152 return ret; 151 return ret;
153 152
154 ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW | 153 ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
155 NV_MEM_ACCESS_SYS, chan->vm, 154 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
156 &grch->unk40800c_vma); 155 &grch->unk40800c_vma);
157 if (ret) 156 if (ret)
158 return ret; 157 return ret;
@@ -162,8 +161,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
162 if (ret) 161 if (ret)
163 return ret; 162 return ret;
164 163
165 ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW, 164 ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
166 chan->vm, &grch->unk418810_vma); 165 NV_MEM_ACCESS_RW, &grch->unk418810_vma);
167 if (ret) 166 if (ret)
168 return ret; 167 return ret;
169 168
@@ -171,8 +170,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
171 if (ret) 170 if (ret)
172 return ret; 171 return ret;
173 172
174 ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW | 173 ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm,
175 NV_MEM_ACCESS_SYS, chan->vm, 174 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
176 &grch->mmio_vma); 175 &grch->mmio_vma);
177 if (ret) 176 if (ret)
178 return ret; 177 return ret;
@@ -221,8 +220,6 @@ static int
221nve0_graph_context_new(struct nouveau_channel *chan, int engine) 220nve0_graph_context_new(struct nouveau_channel *chan, int engine)
222{ 221{
223 struct drm_device *dev = chan->dev; 222 struct drm_device *dev = chan->dev;
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
226 struct nve0_graph_priv *priv = nv_engine(dev, engine); 223 struct nve0_graph_priv *priv = nv_engine(dev, engine);
227 struct nve0_graph_chan *grch; 224 struct nve0_graph_chan *grch;
228 struct nouveau_gpuobj *grctx; 225 struct nouveau_gpuobj *grctx;
@@ -238,9 +235,8 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
238 if (ret) 235 if (ret)
239 goto error; 236 goto error;
240 237
241 ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW | 238 ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
242 NV_MEM_ACCESS_SYS, chan->vm, 239 NV_MEM_ACCESS_SYS, &grch->grctx_vma);
243 &grch->grctx_vma);
244 if (ret) 240 if (ret)
245 return ret; 241 return ret;
246 242
@@ -252,7 +248,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
252 248
253 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4); 249 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
254 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset)); 250 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
255 pinstmem->flush(dev); 251 nvimem_flush(dev);
256 252
257 if (!priv->grctx_vals) { 253 if (!priv->grctx_vals) {
258 ret = nve0_graph_construct_context(chan); 254 ret = nve0_graph_construct_context(chan);
@@ -272,7 +268,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
272 nv_wo32(grctx, 0x28, 0); 268 nv_wo32(grctx, 0x28, 0);
273 nv_wo32(grctx, 0x2c, 0); 269 nv_wo32(grctx, 0x2c, 0);
274 270
275 pinstmem->flush(dev); 271 nvimem_flush(dev);
276 return 0; 272 return 0;
277 273
278error: 274error:
@@ -321,8 +317,8 @@ nve0_graph_init_obj418880(struct drm_device *dev)
321 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); 317 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
322 for (i = 0; i < 4; i++) 318 for (i = 0; i < 4; i++)
323 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); 319 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
324 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8); 320 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
325 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8); 321 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
326} 322}
327 323
328static void 324static void
@@ -591,7 +587,7 @@ nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
591 if (!chan || !chan->ramin) 587 if (!chan || !chan->ramin)
592 continue; 588 continue;
593 589
594 if (inst == chan->ramin->vinst) 590 if (inst == chan->ramin->addr)
595 break; 591 break;
596 } 592 }
597 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 593 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index eb5455fed1bf..a0258c766850 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -74,8 +74,8 @@ nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
74 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 74 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
75 nv_mask(dev, 0x002500, 0x00000001, 0x00000000); 75 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
76 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id) 76 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
77 nv_wr32(dev, 0x00330c, ctx->pinst >> 4); 77 nv_wr32(dev, 0x00330c, ctx->addr >> 4);
78 nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4); 78 nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
79 nv_mask(dev, 0x002500, 0x00000001, 0x00000001); 79 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
81 81
@@ -90,7 +90,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
90 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 90 struct nouveau_gpuobj *ctx = chan->engctx[engine];
91 struct drm_device *dev = chan->dev; 91 struct drm_device *dev = chan->dev;
92 unsigned long flags; 92 unsigned long flags;
93 u32 inst = 0x80000000 | (ctx->pinst >> 4); 93 u32 inst = 0x80000000 | (ctx->addr >> 4);
94 94
95 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 95 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
@@ -224,7 +224,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
224 continue; 224 continue;
225 225
226 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG]; 226 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
227 if (ctx && ctx->pinst == inst) 227 if (ctx && ctx->addr == inst)
228 break; 228 break;
229 } 229 }
230 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 230 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 47d37a2f478f..4e3292ed80c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -47,7 +47,6 @@ static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine) 47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
48{ 48{
49 struct drm_device *dev = chan->dev; 49 struct drm_device *dev = chan->dev;
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_gpuobj *ramin = chan->ramin; 50 struct nouveau_gpuobj *ramin = chan->ramin;
52 struct nouveau_gpuobj *ctx = NULL; 51 struct nouveau_gpuobj *ctx = NULL;
53 int ret; 52 int ret;
@@ -60,15 +59,15 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
60 return ret; 59 return ret;
61 60
62 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002); 61 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
63 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1); 62 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1);
64 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst); 63 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr);
65 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0); 64 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0); 65 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
67 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000); 66 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
68 67
69 nv_wo32(ctx, 0x70, 0x00801ec1); 68 nv_wo32(ctx, 0x70, 0x00801ec1);
70 nv_wo32(ctx, 0x7c, 0x0000037c); 69 nv_wo32(ctx, 0x7c, 0x0000037c);
71 dev_priv->engine.instmem.flush(dev); 70 nvimem_flush(dev);
72 71
73 chan->engctx[engine] = ctx; 72 chan->engctx[engine] = ctx;
74 return 0; 73 return 0;
@@ -93,7 +92,6 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class) 92 u32 handle, u16 class)
94{ 93{
95 struct drm_device *dev = chan->dev; 94 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL; 95 struct nouveau_gpuobj *obj = NULL;
98 int ret; 96 int ret;
99 97
@@ -107,7 +105,7 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
107 nv_wo32(obj, 0x04, 0x00000000); 105 nv_wo32(obj, 0x04, 0x00000000);
108 nv_wo32(obj, 0x08, 0x00000000); 106 nv_wo32(obj, 0x08, 0x00000000);
109 nv_wo32(obj, 0x0c, 0x00000000); 107 nv_wo32(obj, 0x0c, 0x00000000);
110 dev_priv->engine.instmem.flush(dev); 108 nvimem_flush(dev);
111 109
112 ret = nouveau_ramht_insert(chan, handle, obj); 110 ret = nouveau_ramht_insert(chan, handle, obj);
113 nouveau_gpuobj_ref(NULL, &obj); 111 nouveau_gpuobj_ref(NULL, &obj);
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 20d33dcd7f40..384de6deeeea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -25,7 +25,6 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include <subdev/vm.h>
29#include <core/ramht.h> 28#include <core/ramht.h>
30 29
31struct nv98_ppp_engine { 30struct nv98_ppp_engine {
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index eb7e1960dd81..5e164a684aec 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -25,7 +25,6 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include <subdev/vm.h>
29#include <core/ramht.h> 28#include <core/ramht.h>
30 29
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes 30/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
new file mode 100644
index 000000000000..4f4ff4502c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -0,0 +1,55 @@
1#ifndef __NOUVEAU_BAR_H__
2#define __NOUVEAU_BAR_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/fb.h>
8
9struct nouveau_vma;
10
11struct nouveau_bar {
12 struct nouveau_subdev base;
13
14 int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
15 struct nouveau_mem *, struct nouveau_object **);
16 void __iomem *iomem;
17
18 int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
19 u32 flags, struct nouveau_vma *);
20 int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
21 u32 flags, struct nouveau_vma *);
22 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
23 void (*flush)(struct nouveau_bar *);
24};
25
26static inline struct nouveau_bar *
27nouveau_bar(void *obj)
28{
29 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
30}
31
32#define nouveau_bar_create(p,e,o,d) \
33 nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
34#define nouveau_bar_init(p) \
35 nouveau_subdev_init(&(p)->base)
36#define nouveau_bar_fini(p,s) \
37 nouveau_subdev_fini(&(p)->base, (s))
38
39int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, int, void **);
41void nouveau_bar_destroy(struct nouveau_bar *);
42
43void _nouveau_bar_dtor(struct nouveau_object *);
44#define _nouveau_bar_init _nouveau_subdev_init
45#define _nouveau_bar_fini _nouveau_subdev_fini
46
47extern struct nouveau_oclass nv50_bar_oclass;
48extern struct nouveau_oclass nvc0_bar_oclass;
49
50int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
51 struct nouveau_mem *, struct nouveau_object **);
52
53void nv84_bar_flush(struct nouveau_bar *);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 91dbdc16ac94..f18c1a1e121c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -6,6 +6,7 @@
6#include <core/device.h> 6#include <core/device.h>
7#endif 7#endif
8#include <core/mm.h> 8#include <core/mm.h>
9
9#include <subdev/vm.h> 10#include <subdev/vm.h>
10 11
11/* memory type/access flags, do not match hardware values */ 12/* memory type/access flags, do not match hardware values */
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
new file mode 100644
index 000000000000..2adfcafa4478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -0,0 +1,74 @@
1#ifndef __NOUVEAU_INSTMEM_H__
2#define __NOUVEAU_INSTMEM_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6#include <core/mm.h>
7
8struct nouveau_instobj {
9 struct nouveau_object base;
10 struct list_head head;
11 struct nouveau_mm heap;
12 u32 *suspend;
13 u64 addr;
14 u32 size;
15};
16
17static inline struct nouveau_instobj *
18nv_memobj(void *obj)
19{
20#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
21 if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
22 nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
23#endif
24 return obj;
25}
26
27#define nouveau_instobj_create(p,e,o,d) \
28 nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
29#define nouveau_instobj_init(p) \
30 nouveau_object_init(&(p)->base)
31#define nouveau_instobj_fini(p,s) \
32 nouveau_object_fini(&(p)->base, (s))
33
34int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, int, void **);
36void nouveau_instobj_destroy(struct nouveau_instobj *);
37
38void _nouveau_instobj_dtor(struct nouveau_object *);
39#define _nouveau_instobj_init nouveau_object_init
40#define _nouveau_instobj_fini nouveau_object_fini
41
42struct nouveau_instmem {
43 struct nouveau_subdev base;
44 struct list_head list;
45
46 u32 reserved;
47 int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
48 u32 size, u32 align, struct nouveau_object **);
49};
50
51static inline struct nouveau_instmem *
52nouveau_instmem(void *obj)
53{
54 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
55}
56
57#define nouveau_instmem_create(p,e,o,d) \
58 nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
59#define nouveau_instmem_destroy(p) \
60 nouveau_subdev_destroy(&(p)->base)
61int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
62 struct nouveau_oclass *, int, void **);
63int nouveau_instmem_init(struct nouveau_instmem *);
64int nouveau_instmem_fini(struct nouveau_instmem *, bool);
65
66#define _nouveau_instmem_dtor _nouveau_subdev_dtor
67int _nouveau_instmem_init(struct nouveau_object *);
68int _nouveau_instmem_fini(struct nouveau_object *, bool);
69
70extern struct nouveau_oclass nv04_instmem_oclass;
71extern struct nouveau_oclass nv40_instmem_oclass;
72extern struct nouveau_oclass nv50_instmem_oclass;
73
74#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index 6d3764b416f4..81577bb783e8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -25,10 +25,14 @@
25#ifndef __NOUVEAU_VM_H__ 25#ifndef __NOUVEAU_VM_H__
26#define __NOUVEAU_VM_H__ 26#define __NOUVEAU_VM_H__
27 27
28#ifndef XXX_THIS_IS_A_HACK
29#include <core/object.h>
30#include <core/subdev.h>
31#include <core/device.h>
32#endif
28#include <core/mm.h> 33#include <core/mm.h>
29 34
30struct nouveau_mem; 35#ifndef XXX_THIS_IS_A_HACK
31
32struct nouveau_vm_pgt { 36struct nouveau_vm_pgt {
33 struct nouveau_gpuobj *obj[2]; 37 struct nouveau_gpuobj *obj[2];
34 u32 refcount[2]; 38 u32 refcount[2];
@@ -38,6 +42,10 @@ struct nouveau_vm_pgd {
38 struct list_head head; 42 struct list_head head;
39 struct nouveau_gpuobj *obj; 43 struct nouveau_gpuobj *obj;
40}; 44};
45#endif
46
47struct nouveau_gpuobj;
48struct nouveau_mem;
41 49
42struct nouveau_vma { 50struct nouveau_vma {
43 struct list_head head; 51 struct list_head head;
@@ -49,21 +57,29 @@ struct nouveau_vma {
49}; 57};
50 58
51struct nouveau_vm { 59struct nouveau_vm {
52 struct drm_device *dev; 60 struct nouveau_vmmgr *vmm;
53 struct nouveau_mm mm; 61 struct nouveau_mm mm;
54 int refcount; 62 int refcount;
55 63
56 struct list_head pgd_list; 64 struct list_head pgd_list;
57 atomic_t engref[16]; 65 atomic_t engref[64]; //NVDEV_SUBDEV_NR];
58 66
59 struct nouveau_vm_pgt *pgt; 67 struct nouveau_vm_pgt *pgt;
60 u32 fpde; 68 u32 fpde;
61 u32 lpde; 69 u32 lpde;
70};
71
72#ifndef XXX_THIS_IS_A_HACK
73struct nouveau_vmmgr {
74 struct nouveau_subdev base;
62 75
63 u32 pgt_bits; 76 u32 pgt_bits;
64 u8 spg_shift; 77 u8 spg_shift;
65 u8 lpg_shift; 78 u8 lpg_shift;
66 79
80 int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
81 u64 mm_offset, struct nouveau_vm **);
82
67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, 83 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
68 struct nouveau_gpuobj *pgt[2]); 84 struct nouveau_gpuobj *pgt[2]);
69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 85 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
@@ -71,16 +87,48 @@ struct nouveau_vm {
71 u64 phys, u64 delta); 87 u64 phys, u64 delta);
72 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 88 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
73 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); 89 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
74
75 void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
76 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
77 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 90 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
78 void (*flush)(struct nouveau_vm *); 91 void (*flush)(struct nouveau_vm *);
79}; 92};
80 93
81/* nouveau_vm.c */ 94static inline struct nouveau_vmmgr *
82int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, 95nouveau_vmmgr(void *obj)
96{
97 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
98}
99
100#define nouveau_vmmgr_create(p,e,o,i,f,d) \
101 nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
102#define nouveau_vmmgr_destroy(p) \
103 nouveau_subdev_destroy(&(p)->base)
104#define nouveau_vmmgr_init(p) \
105 nouveau_subdev_init(&(p)->base)
106#define nouveau_vmmgr_fini(p,s) \
107 nouveau_subdev_fini(&(p)->base, (s))
108
109#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
110#define _nouveau_vmmgr_init _nouveau_subdev_init
111#define _nouveau_vmmgr_fini _nouveau_subdev_fini
112
113extern struct nouveau_oclass nv04_vmmgr_oclass;
114extern struct nouveau_oclass nv41_vmmgr_oclass;
115extern struct nouveau_oclass nv44_vmmgr_oclass;
116extern struct nouveau_oclass nv50_vmmgr_oclass;
117extern struct nouveau_oclass nvc0_vmmgr_oclass;
118
119int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
83 struct nouveau_vm **); 120 struct nouveau_vm **);
121void nv04_vmmgr_dtor(struct nouveau_object *);
122
123void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
124void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
125
126/* nouveau_vm.c */
127int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
128 u64 mm_offset, u32 block, struct nouveau_vm **);
129int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
130 u64 mm_offset, struct nouveau_vm **);
131#endif
84int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, 132int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
85 struct nouveau_gpuobj *pgd); 133 struct nouveau_gpuobj *pgd);
86int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, 134int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
@@ -93,26 +141,6 @@ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
93void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 141void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
94 struct nouveau_mem *); 142 struct nouveau_mem *);
95void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, 143void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
96 struct nouveau_mem *mem); 144 struct nouveau_mem *mem);
97/* nv50_vm.c */
98void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
99 struct nouveau_gpuobj *pgt[2]);
100void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
101 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
102void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
103 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
104void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
105void nv50_vm_flush(struct nouveau_vm *);
106void nv50_vm_flush_engine(struct drm_device *, int engine);
107
108/* nvc0_vm.c */
109void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
110 struct nouveau_gpuobj *pgt[2]);
111void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
112 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
113void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
114 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
115void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
116void nvc0_vm_flush(struct nouveau_vm *);
117 145
118#endif 146#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
new file mode 100644
index 000000000000..cd01c533007a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <subdev/bar.h>
27
28struct nouveau_barobj {
29 struct nouveau_object base;
30 struct nouveau_vma vma;
31 void __iomem *iomem;
32};
33
34static int
35nouveau_barobj_ctor(struct nouveau_object *parent,
36 struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *mem, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_bar *bar = (void *)engine;
41 struct nouveau_barobj *barobj;
42 int ret;
43
44 ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
45 *pobject = nv_object(barobj);
46 if (ret)
47 return ret;
48
49 ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
50 if (ret)
51 return ret;
52
53 barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
54 return 0;
55}
56
57static void
58nouveau_barobj_dtor(struct nouveau_object *object)
59{
60 struct nouveau_bar *bar = (void *)object->engine;
61 struct nouveau_barobj *barobj = (void *)object;
62 if (barobj->vma.node)
63 bar->unmap(bar, &barobj->vma);
64 nouveau_object_destroy(&barobj->base);
65}
66
67static u32
68nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
69{
70 struct nouveau_barobj *barobj = (void *)object;
71 return ioread32_native(barobj->iomem + addr);
72}
73
74static void
75nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
76{
77 struct nouveau_barobj *barobj = (void *)object;
78 iowrite32_native(data, barobj->iomem + addr);
79}
80
81static struct nouveau_oclass
82nouveau_barobj_oclass = {
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nouveau_barobj_ctor,
85 .dtor = nouveau_barobj_dtor,
86 .init = nouveau_object_init,
87 .fini = nouveau_object_fini,
88 .rd32 = nouveau_barobj_rd32,
89 .wr32 = nouveau_barobj_wr32,
90 },
91};
92
93int
94nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
95 struct nouveau_mem *mem, struct nouveau_object **pobject)
96{
97 struct nouveau_object *engine = nv_object(bar);
98 return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
99 mem, 0, pobject);
100}
101
102int
103nouveau_bar_create_(struct nouveau_object *parent,
104 struct nouveau_object *engine,
105 struct nouveau_oclass *oclass, int length, void **pobject)
106{
107 struct nouveau_device *device = nv_device(parent);
108 struct nouveau_bar *bar;
109 int ret;
110
111 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
112 "bar", length, pobject);
113 bar = *pobject;
114 if (ret)
115 return ret;
116
117 bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
118 pci_resource_len(device->pdev, 3));
119 return 0;
120}
121
122void
123nouveau_bar_destroy(struct nouveau_bar *bar)
124{
125 if (bar->iomem)
126 iounmap(bar->iomem);
127 nouveau_subdev_destroy(&bar->base);
128}
129
130void
131_nouveau_bar_dtor(struct nouveau_object *object)
132{
133 struct nouveau_bar *bar = (void *)object;
134 nouveau_bar_destroy(bar);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
new file mode 100644
index 000000000000..c3acf5b70d9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nv50_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct nouveau_gpuobj *mem;
36 struct nouveau_gpuobj *pad;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *bar1_vm;
39 struct nouveau_gpuobj *bar1;
40 struct nouveau_vm *bar3_vm;
41 struct nouveau_gpuobj *bar3;
42};
43
44static int
45nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
46 u32 flags, struct nouveau_vma *vma)
47{
48 struct nv50_bar_priv *priv = (void *)bar;
49 int ret;
50
51 ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
52 if (ret)
53 return ret;
54
55 nouveau_vm_map(vma, mem);
56 nv50_vm_flush_engine(nv_subdev(bar), 6);
57 return 0;
58}
59
60static int
61nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
62 u32 flags, struct nouveau_vma *vma)
63{
64 struct nv50_bar_priv *priv = (void *)bar;
65 int ret;
66
67 ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
68 if (ret)
69 return ret;
70
71 nouveau_vm_map(vma, mem);
72 nv50_vm_flush_engine(nv_subdev(bar), 6);
73 return 0;
74}
75
76static void
77nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
78{
79 nouveau_vm_unmap(vma);
80 nv50_vm_flush_engine(nv_subdev(bar), 6);
81 nouveau_vm_put(vma);
82}
83
84static void
85nv50_bar_flush(struct nouveau_bar *bar)
86{
87 struct nv50_bar_priv *priv = (void *)bar;
88 unsigned long flags;
89 spin_lock_irqsave(&priv->lock, flags);
90 nv_wr32(priv, 0x00330c, 0x00000001);
91 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
92 nv_warn(priv, "flush timeout\n");
93 spin_unlock_irqrestore(&priv->lock, flags);
94}
95
96void
97nv84_bar_flush(struct nouveau_bar *bar)
98{
99 struct nv50_bar_priv *priv = (void *)bar;
100 unsigned long flags;
101 spin_lock_irqsave(&priv->lock, flags);
102 nv_wr32(bar, 0x070000, 0x00000001);
103 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
104 nv_warn(priv, "flush timeout\n");
105 spin_unlock_irqrestore(&priv->lock, flags);
106}
107
108static int
109nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_device *device = nv_device(parent);
114 struct nouveau_object *heap;
115 struct nouveau_vm *vm;
116 struct nv50_bar_priv *priv;
117 u64 start, limit;
118 int ret;
119
120 ret = nouveau_bar_create(parent, engine, oclass, &priv);
121 *pobject = nv_object(priv);
122 if (ret)
123 return ret;
124
125 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
126 &priv->mem);
127 heap = nv_object(priv->mem);
128 if (ret)
129 return ret;
130
131 ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
132 0x1400 : 0x0200, 0, 0, &priv->pad);
133 if (ret)
134 return ret;
135
136 ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
137 if (ret)
138 return ret;
139
140 /* BAR3 */
141 start = 0x0100000000ULL;
142 limit = start + pci_resource_len(device->pdev, 3);
143
144 ret = nouveau_vm_new(device, start, limit, start, &vm);
145 if (ret)
146 return ret;
147
148 ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
149 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
150 &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1;
152 if (ret)
153 return ret;
154
155 ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
156 nouveau_vm_ref(NULL, &vm, NULL);
157 if (ret)
158 return ret;
159
160 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
161 if (ret)
162 return ret;
163
164 nv_wo32(priv->bar3, 0x00, 0x7fc00000);
165 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
166 nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
167 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
168 upper_32_bits(start));
169 nv_wo32(priv->bar3, 0x10, 0x00000000);
170 nv_wo32(priv->bar3, 0x14, 0x00000000);
171
172 /* BAR1 */
173 start = 0x0000000000ULL;
174 limit = start + pci_resource_len(device->pdev, 1);
175
176 ret = nouveau_vm_new(device, start, limit--, start, &vm);
177 if (ret)
178 return ret;
179
180 ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
181 nouveau_vm_ref(NULL, &vm, NULL);
182 if (ret)
183 return ret;
184
185 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
186 if (ret)
187 return ret;
188
189 nv_wo32(priv->bar1, 0x00, 0x7fc00000);
190 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
191 nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
192 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
193 upper_32_bits(start));
194 nv_wo32(priv->bar1, 0x10, 0x00000000);
195 nv_wo32(priv->bar1, 0x14, 0x00000000);
196
197 priv->base.alloc = nouveau_bar_alloc;
198 priv->base.kmap = nv50_bar_kmap;
199 priv->base.umap = nv50_bar_umap;
200 priv->base.unmap = nv50_bar_unmap;
201 if (device->chipset == 0x50)
202 priv->base.flush = nv50_bar_flush;
203 else
204 priv->base.flush = nv84_bar_flush;
205 spin_lock_init(&priv->lock);
206 return 0;
207}
208
209static void
210nv50_bar_dtor(struct nouveau_object *object)
211{
212 struct nv50_bar_priv *priv = (void *)object;
213 nouveau_gpuobj_ref(NULL, &priv->bar1);
214 nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
215 nouveau_gpuobj_ref(NULL, &priv->bar3);
216 if (priv->bar3_vm) {
217 nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
219 }
220 nouveau_gpuobj_ref(NULL, &priv->pgd);
221 nouveau_gpuobj_ref(NULL, &priv->pad);
222 nouveau_gpuobj_ref(NULL, &priv->mem);
223 nouveau_bar_destroy(&priv->base);
224}
225
226static int
227nv50_bar_init(struct nouveau_object *object)
228{
229 struct nv50_bar_priv *priv = (void *)object;
230 int ret;
231
232 ret = nouveau_bar_init(&priv->base);
233 if (ret)
234 return ret;
235
236 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
237 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
238 nv50_vm_flush_engine(nv_subdev(priv), 6);
239
240 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
241 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
242 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
243 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
244 return 0;
245}
246
247static int
248nv50_bar_fini(struct nouveau_object *object, bool suspend)
249{
250 struct nv50_bar_priv *priv = (void *)object;
251 return nouveau_bar_fini(&priv->base, suspend);
252}
253
254struct nouveau_oclass
255nv50_bar_oclass = {
256 .handle = NV_SUBDEV(BAR, 0x50),
257 .ofuncs = &(struct nouveau_ofuncs) {
258 .ctor = nv50_bar_ctor,
259 .dtor = nv50_bar_dtor,
260 .init = nv50_bar_init,
261 .fini = nv50_bar_fini,
262 },
263};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
new file mode 100644
index 000000000000..77a6fb725d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nvc0_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct {
36 struct nouveau_gpuobj *mem;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *vm;
39 } bar[2];
40};
41
42static int
43nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
44 u32 flags, struct nouveau_vma *vma)
45{
46 struct nvc0_bar_priv *priv = (void *)bar;
47 int ret;
48
49 ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
50 if (ret)
51 return ret;
52
53 nouveau_vm_map(vma, mem);
54 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
55 return 0;
56}
57
58static int
59nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
60 u32 flags, struct nouveau_vma *vma)
61{
62 struct nvc0_bar_priv *priv = (void *)bar;
63 int ret;
64
65 ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
66 mem->page_shift, flags, vma);
67 if (ret)
68 return ret;
69
70 nouveau_vm_map(vma, mem);
71 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
72 return 0;
73}
74
75static void
76nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
77{
78 struct nvc0_bar_priv *priv = (void *)bar;
79 int i = !(vma->vm == priv->bar[0].vm);
80
81 nouveau_vm_unmap(vma);
82 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
83 nouveau_vm_put(vma);
84}
85
86static int
87nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_device *device = nv_device(parent);
92 struct pci_dev *pdev = device->pdev;
93 struct nvc0_bar_priv *priv;
94 struct nouveau_gpuobj *mem;
95 struct nouveau_vm *vm;
96 int ret;
97
98 ret = nouveau_bar_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 /* BAR3 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
105 mem = priv->bar[0].mem;
106 if (ret)
107 return ret;
108
109 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
110 if (ret)
111 return ret;
112
113 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
114 if (ret)
115 return ret;
116
117 ret = nouveau_gpuobj_new(parent, NULL,
118 (pci_resource_len(pdev, 3) >> 12) * 8,
119 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
120 &vm->pgt[0].obj[0]);
121 vm->pgt[0].refcount[0] = 1;
122 if (ret)
123 return ret;
124
125 ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
126 nouveau_vm_ref(NULL, &vm, NULL);
127 if (ret)
128 return ret;
129
130 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
131 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
132 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
133 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
134
135 /* BAR1 */
136 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
137 mem = priv->bar[1].mem;
138 if (ret)
139 return ret;
140
141 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
142 if (ret)
143 return ret;
144
145 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
146 if (ret)
147 return ret;
148
149 ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
150 nouveau_vm_ref(NULL, &vm, NULL);
151 if (ret)
152 return ret;
153
154 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
155 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
156 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
157 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
158
159 priv->base.alloc = nouveau_bar_alloc;
160 priv->base.kmap = nvc0_bar_kmap;
161 priv->base.umap = nvc0_bar_umap;
162 priv->base.unmap = nvc0_bar_unmap;
163 priv->base.flush = nv84_bar_flush;
164 spin_lock_init(&priv->lock);
165 return 0;
166}
167
168static void
169nvc0_bar_dtor(struct nouveau_object *object)
170{
171 struct nvc0_bar_priv *priv = (void *)object;
172
173 nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
174 nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
175 nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
176
177 if (priv->bar[0].vm) {
178 nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
179 nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
180 }
181 nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
182 nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
183
184 nouveau_bar_destroy(&priv->base);
185}
186
187static int
188nvc0_bar_init(struct nouveau_object *object)
189{
190 struct nvc0_bar_priv *priv = (void *)object;
191 int ret;
192
193 ret = nouveau_bar_init(&priv->base);
194 if (ret)
195 return ret;
196
197 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
198 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
199 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
200
201 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
202 nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
203 return 0;
204}
205
206struct nouveau_oclass
207nvc0_bar_oclass = {
208 .handle = NV_SUBDEV(BAR, 0xc0),
209 .ofuncs = &(struct nouveau_ofuncs) {
210 .ctor = nvc0_bar_ctor,
211 .dtor = nvc0_bar_dtor,
212 .init = nvc0_bar_init,
213 .fini = _nouveau_bar_fini,
214 },
215};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 66b7156281c9..5173c785b061 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -30,6 +30,8 @@
30#include <subdev/mc.h> 30#include <subdev/mc.h>
31#include <subdev/timer.h> 31#include <subdev/timer.h>
32#include <subdev/fb.h> 32#include <subdev/fb.h>
33#include <subdev/instmem.h>
34#include <subdev/vm.h>
33 35
34int 36int
35nv04_identify(struct nouveau_device *device) 37nv04_identify(struct nouveau_device *device)
@@ -43,6 +45,8 @@ nv04_identify(struct nouveau_device *device)
43 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 45 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
44 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
45 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 47 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
48 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
49 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
46 break; 50 break;
47 case 0x05: 51 case 0x05:
48 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 52 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -52,6 +56,8 @@ nv04_identify(struct nouveau_device *device)
52 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
53 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
54 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 58 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
55 break; 61 break;
56 default: 62 default:
57 nv_fatal(device, "unknown RIVA chipset\n"); 63 nv_fatal(device, "unknown RIVA chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index cf81479725ad..c4f2c2d3eaec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -31,6 +31,8 @@
31#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
34 36
35int 37int
36nv10_identify(struct nouveau_device *device) 38nv10_identify(struct nouveau_device *device)
@@ -45,6 +47,8 @@ nv10_identify(struct nouveau_device *device)
45 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 47 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 48 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 49 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
48 break; 52 break;
49 case 0x15: 53 case 0x15:
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -55,6 +59,8 @@ nv10_identify(struct nouveau_device *device)
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 60 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 61 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 break; 64 break;
59 case 0x16: 65 case 0x16:
60 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -65,6 +71,8 @@ nv10_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 72 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 73 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
68 break; 76 break;
69 case 0x1a: 77 case 0x1a:
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -75,6 +83,8 @@ nv10_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 83 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 84 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 85 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 break; 88 break;
79 case 0x11: 89 case 0x11:
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +95,8 @@ nv10_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 95 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
87 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
88 break; 100 break;
89 case 0x17: 101 case 0x17:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 102 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -95,6 +107,8 @@ nv10_identify(struct nouveau_device *device)
95 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 107 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 108 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 109 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
110 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
111 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
98 break; 112 break;
99 case 0x1f: 113 case 0x1f:
100 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -105,6 +119,8 @@ nv10_identify(struct nouveau_device *device)
105 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 119 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
106 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 120 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
107 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 121 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
122 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
123 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
108 break; 124 break;
109 case 0x18: 125 case 0x18:
110 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -115,6 +131,8 @@ nv10_identify(struct nouveau_device *device)
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 131 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 132 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 133 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
118 break; 136 break;
119 default: 137 default:
120 nv_fatal(device, "unknown Celsius chipset\n"); 138 nv_fatal(device, "unknown Celsius chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index e97280cd43b0..719b72a43e47 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -31,6 +31,8 @@
31#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
34 36
35int 37int
36nv20_identify(struct nouveau_device *device) 38nv20_identify(struct nouveau_device *device)
@@ -45,6 +47,8 @@ nv20_identify(struct nouveau_device *device)
45 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 47 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 48 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 49 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
48 break; 52 break;
49 case 0x25: 53 case 0x25:
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -55,6 +59,8 @@ nv20_identify(struct nouveau_device *device)
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 60 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 61 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 break; 64 break;
59 case 0x28: 65 case 0x28:
60 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -65,6 +71,8 @@ nv20_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 72 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 73 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
68 break; 76 break;
69 case 0x2a: 77 case 0x2a:
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -75,6 +83,8 @@ nv20_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 83 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 84 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 85 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 break; 88 break;
79 default: 89 default:
80 nv_fatal(device, "unknown Kelvin chipset\n"); 90 nv_fatal(device, "unknown Kelvin chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index ddd3ab6cb733..0a1a72809d82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -31,6 +31,8 @@
31#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
34 36
35int 37int
36nv30_identify(struct nouveau_device *device) 38nv30_identify(struct nouveau_device *device)
@@ -45,6 +47,8 @@ nv30_identify(struct nouveau_device *device)
45 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 47 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 48 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 49 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
48 break; 52 break;
49 case 0x35: 53 case 0x35:
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -55,6 +59,8 @@ nv30_identify(struct nouveau_device *device)
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 60 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 61 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 break; 64 break;
59 case 0x31: 65 case 0x31:
60 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -65,6 +71,8 @@ nv30_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 72 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 73 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
68 break; 76 break;
69 case 0x36: 77 case 0x36:
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -75,6 +83,8 @@ nv30_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 83 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 84 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 85 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 break; 88 break;
79 case 0x34: 89 case 0x34:
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +95,8 @@ nv30_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 95 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
87 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
88 break; 100 break;
89 default: 101 default:
90 nv_fatal(device, "unknown Rankine chipset\n"); 102 nv_fatal(device, "unknown Rankine chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index c7ea921e0309..5e1ef5e4cf7f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -31,6 +31,8 @@
31#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
34 36
35int 37int
36nv40_identify(struct nouveau_device *device) 38nv40_identify(struct nouveau_device *device)
@@ -45,6 +47,8 @@ nv40_identify(struct nouveau_device *device)
45 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 47 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 48 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 49 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
48 break; 52 break;
49 case 0x41: 53 case 0x41:
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -55,6 +59,8 @@ nv40_identify(struct nouveau_device *device)
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 60 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 61 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 break; 64 break;
59 case 0x42: 65 case 0x42:
60 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -65,6 +71,8 @@ nv40_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 72 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 73 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
68 break; 76 break;
69 case 0x43: 77 case 0x43:
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -75,6 +83,8 @@ nv40_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 83 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 84 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 85 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 break; 88 break;
79 case 0x45: 89 case 0x45:
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +95,8 @@ nv40_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 95 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
87 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
88 break; 100 break;
89 case 0x47: 101 case 0x47:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 102 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -95,6 +107,8 @@ nv40_identify(struct nouveau_device *device)
95 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 107 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 108 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 109 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
110 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
111 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
98 break; 112 break;
99 case 0x49: 113 case 0x49:
100 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -105,6 +119,8 @@ nv40_identify(struct nouveau_device *device)
105 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 119 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
106 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 120 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
107 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 121 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
122 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
123 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
108 break; 124 break;
109 case 0x4b: 125 case 0x4b:
110 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -115,6 +131,8 @@ nv40_identify(struct nouveau_device *device)
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 131 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 132 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 133 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
118 break; 136 break;
119 case 0x44: 137 case 0x44:
120 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 138 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -125,6 +143,8 @@ nv40_identify(struct nouveau_device *device)
125 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 143 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 144 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
127 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 145 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
146 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
147 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
128 break; 148 break;
129 case 0x46: 149 case 0x46:
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -135,6 +155,8 @@ nv40_identify(struct nouveau_device *device)
135 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 155 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 156 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
137 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 157 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
158 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
159 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
138 break; 160 break;
139 case 0x4a: 161 case 0x4a:
140 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -145,6 +167,8 @@ nv40_identify(struct nouveau_device *device)
145 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
146 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 168 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
147 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 169 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
170 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
171 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
148 break; 172 break;
149 case 0x4c: 173 case 0x4c:
150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 174 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -155,6 +179,8 @@ nv40_identify(struct nouveau_device *device)
155 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 179 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
156 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 180 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
157 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 181 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
183 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
158 break; 184 break;
159 case 0x4e: 185 case 0x4e:
160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -165,6 +191,8 @@ nv40_identify(struct nouveau_device *device)
165 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 191 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 192 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 193 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
194 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
195 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
168 break; 196 break;
169 case 0x63: 197 case 0x63:
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -175,6 +203,8 @@ nv40_identify(struct nouveau_device *device)
175 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 203 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
176 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 204 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
177 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 205 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
206 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
207 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
178 break; 208 break;
179 case 0x67: 209 case 0x67:
180 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 210 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -185,6 +215,8 @@ nv40_identify(struct nouveau_device *device)
185 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 215 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
186 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 216 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
187 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 217 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
218 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
219 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
188 break; 220 break;
189 case 0x68: 221 case 0x68:
190 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 222 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -195,6 +227,8 @@ nv40_identify(struct nouveau_device *device)
195 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 227 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
196 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 228 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
197 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 229 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
230 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
231 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
198 break; 232 break;
199 default: 233 default:
200 nv_fatal(device, "unknown Curie chipset\n"); 234 nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index e60cdf26ebf9..5e86a2f6ad8a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -31,6 +31,9 @@
31#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
36#include <subdev/bar.h>
34 37
35int 38int
36nv50_identify(struct nouveau_device *device) 39nv50_identify(struct nouveau_device *device)
@@ -45,6 +48,9 @@ nv50_identify(struct nouveau_device *device)
45 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 48 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
46 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 49 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 50 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
51 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
52 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
53 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
48 break; 54 break;
49 case 0x84: 55 case 0x84:
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 56 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -55,6 +61,9 @@ nv50_identify(struct nouveau_device *device)
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 61 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 62 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 63 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
64 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
65 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
66 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
58 break; 67 break;
59 case 0x86: 68 case 0x86:
60 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 69 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -65,6 +74,9 @@ nv50_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 74 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 75 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 76 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
77 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
78 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
79 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
68 break; 80 break;
69 case 0x92: 81 case 0x92:
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -75,6 +87,9 @@ nv50_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 87 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 89 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
91 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
92 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
78 break; 93 break;
79 case 0x94: 94 case 0x94:
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +100,9 @@ nv50_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 100 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 101 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
87 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 102 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
103 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
104 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
105 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
88 break; 106 break;
89 case 0x96: 107 case 0x96:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -95,6 +113,9 @@ nv50_identify(struct nouveau_device *device)
95 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 113 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 114 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 115 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
116 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
117 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
118 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
98 break; 119 break;
99 case 0x98: 120 case 0x98:
100 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 121 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -105,6 +126,9 @@ nv50_identify(struct nouveau_device *device)
105 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 126 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
106 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 127 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
107 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 128 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
129 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
130 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
131 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
108 break; 132 break;
109 case 0xa0: 133 case 0xa0:
110 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 134 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -115,6 +139,9 @@ nv50_identify(struct nouveau_device *device)
115 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 139 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 140 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 141 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
142 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
143 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
144 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
118 break; 145 break;
119 case 0xaa: 146 case 0xaa:
120 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -125,6 +152,9 @@ nv50_identify(struct nouveau_device *device)
125 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 152 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 153 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
127 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 154 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
155 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
156 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
157 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
128 break; 158 break;
129 case 0xac: 159 case 0xac:
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -135,6 +165,9 @@ nv50_identify(struct nouveau_device *device)
135 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 165 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
137 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
168 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
169 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
170 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
138 break; 171 break;
139 case 0xa3: 172 case 0xa3:
140 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 173 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -145,6 +178,9 @@ nv50_identify(struct nouveau_device *device)
145 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 178 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
146 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 179 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
147 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 180 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
181 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
182 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
183 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
148 break; 184 break;
149 case 0xa5: 185 case 0xa5:
150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -155,6 +191,9 @@ nv50_identify(struct nouveau_device *device)
155 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 191 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
156 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 192 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
157 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 193 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
194 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
195 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
196 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
158 break; 197 break;
159 case 0xa8: 198 case 0xa8:
160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -165,6 +204,9 @@ nv50_identify(struct nouveau_device *device)
165 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 204 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 206 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
207 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
208 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
209 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
168 break; 210 break;
169 case 0xaf: 211 case 0xaf:
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 212 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -175,6 +217,9 @@ nv50_identify(struct nouveau_device *device)
175 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 217 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
176 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
177 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 219 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
222 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
178 break; 223 break;
179 default: 224 default:
180 nv_fatal(device, "unknown Tesla chipset\n"); 225 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 8de67307eea7..87f4e16379c6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -32,6 +32,9 @@
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/ltcg.h> 34#include <subdev/ltcg.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37#include <subdev/bar.h>
35 38
36int 39int
37nvc0_identify(struct nouveau_device *device) 40nvc0_identify(struct nouveau_device *device)
@@ -47,6 +50,9 @@ nvc0_identify(struct nouveau_device *device)
47 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 50 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
48 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 51 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
49 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 52 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
53 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
54 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
55 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
50 break; 56 break;
51 case 0xc4: 57 case 0xc4:
52 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -58,6 +64,9 @@ nvc0_identify(struct nouveau_device *device)
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 64 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 65 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 66 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
61 break; 70 break;
62 case 0xc3: 71 case 0xc3:
63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 72 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -69,6 +78,9 @@ nvc0_identify(struct nouveau_device *device)
69 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
70 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 79 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
71 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 80 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
81 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
82 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
83 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
72 break; 84 break;
73 case 0xce: 85 case 0xce:
74 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -80,6 +92,9 @@ nvc0_identify(struct nouveau_device *device)
80 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
81 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 93 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
82 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 94 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
97 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
83 break; 98 break;
84 case 0xcf: 99 case 0xcf:
85 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 100 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -91,6 +106,9 @@ nvc0_identify(struct nouveau_device *device)
91 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 106 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
92 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 107 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
93 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 108 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
111 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
94 break; 112 break;
95 case 0xc1: 113 case 0xc1:
96 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -102,6 +120,9 @@ nvc0_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 120 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
103 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 121 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
104 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 122 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
123 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
124 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
125 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
105 break; 126 break;
106 case 0xc8: 127 case 0xc8:
107 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -113,6 +134,9 @@ nvc0_identify(struct nouveau_device *device)
113 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 134 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
114 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 135 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
115 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 136 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
137 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
138 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
139 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
116 break; 140 break;
117 case 0xd9: 141 case 0xd9:
118 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 142 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -124,6 +148,9 @@ nvc0_identify(struct nouveau_device *device)
124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 148 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
125 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 149 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
126 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 150 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
151 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
152 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
153 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
127 break; 154 break;
128 default: 155 default:
129 nv_fatal(device, "unknown Fermi chipset\n"); 156 nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 919a10280d7e..ab8346b8bde0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -32,6 +32,9 @@
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33#include <subdev/fb.h> 33#include <subdev/fb.h>
34#include <subdev/ltcg.h> 34#include <subdev/ltcg.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37#include <subdev/bar.h>
35 38
36int 39int
37nve0_identify(struct nouveau_device *device) 40nve0_identify(struct nouveau_device *device)
@@ -47,6 +50,9 @@ nve0_identify(struct nouveau_device *device)
47 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 50 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
48 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 51 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
49 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 52 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
53 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
54 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
55 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
50 break; 56 break;
51 case 0xe7: 57 case 0xe7:
52 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -58,6 +64,9 @@ nve0_identify(struct nouveau_device *device)
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 64 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 65 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 66 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
61 break; 70 break;
62 default: 71 default:
63 nv_fatal(device, "unknown Kepler chipset\n"); 72 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 84aa71c47128..347a496fcad8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -50,54 +50,14 @@ nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
50static void 50static void
51nv40_fb_init_gart(struct nv40_fb_priv *priv) 51nv40_fb_init_gart(struct nv40_fb_priv *priv)
52{ 52{
53#if 0 53 nv_wr32(priv, 0x100800, 0x00000001);
54 struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma;
55
56 if (ndev->gart_info.type != NOUVEAU_GART_HW) {
57#endif
58 nv_wr32(priv, 0x100800, 0x00000001);
59#if 0
60 return;
61 }
62
63 nv_wr32(ndev, 0x100800, gart->pinst | 0x00000002);
64 nv_mask(ndev, 0x10008c, 0x00000100, 0x00000100);
65 nv_wr32(ndev, 0x100820, 0x00000000);
66#endif
67} 54}
68 55
69static void 56static void
70nv44_fb_init_gart(struct nv40_fb_priv *priv) 57nv44_fb_init_gart(struct nv40_fb_priv *priv)
71{ 58{
72#if 0 59 nv_wr32(priv, 0x100850, 0x80000000);
73 struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma; 60 nv_wr32(priv, 0x100800, 0x00000001);
74 u32 vinst;
75
76 if (ndev->gart_info.type != NOUVEAU_GART_HW) {
77#endif
78 nv_wr32(priv, 0x100850, 0x80000000);
79 nv_wr32(priv, 0x100800, 0x00000001);
80#if 0
81 return;
82 }
83
84 /* calculate vram address of this PRAMIN block, object
85 * must be allocated on 512KiB alignment, and not exceed
86 * a total size of 512KiB for this to work correctly
87 */
88 vinst = nv_rd32(ndev, 0x10020c);
89 vinst -= ((gart->pinst >> 19) + 1) << 19;
90
91 nv_wr32(ndev, 0x100850, 0x80000000);
92 nv_wr32(ndev, 0x100818, ndev->gart_info.dummy.addr);
93
94 nv_wr32(ndev, 0x100804, ndev->gart_info.aper_size);
95 nv_wr32(ndev, 0x100850, 0x00008000);
96 nv_mask(ndev, 0x10008c, 0x00000200, 0x00000200);
97 nv_wr32(ndev, 0x100820, 0x00000000);
98 nv_wr32(ndev, 0x10082c, 0x00000001);
99 nv_wr32(ndev, 0x100800, vinst | 0x00000010);
100#endif
101} 61}
102 62
103static int 63static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
new file mode 100644
index 000000000000..1188227ca6aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/instmem.h>
26
27int
28nouveau_instobj_create_(struct nouveau_object *parent,
29 struct nouveau_object *engine,
30 struct nouveau_oclass *oclass,
31 int length, void **pobject)
32{
33 struct nouveau_instmem *imem = (void *)engine;
34 struct nouveau_instobj *iobj;
35 int ret;
36
37 ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
38 length, pobject);
39 iobj = *pobject;
40 if (ret)
41 return ret;
42
43 list_add(&iobj->head, &imem->list);
44 return 0;
45}
46
47void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{
50 if (iobj->head.prev)
51 list_del(&iobj->head);
52 return nouveau_object_destroy(&iobj->base);
53}
54
55void
56_nouveau_instobj_dtor(struct nouveau_object *object)
57{
58 struct nouveau_instobj *iobj = (void *)object;
59 return nouveau_instobj_destroy(iobj);
60}
61
62int
63nouveau_instmem_create_(struct nouveau_object *parent,
64 struct nouveau_object *engine,
65 struct nouveau_oclass *oclass,
66 int length, void **pobject)
67{
68 struct nouveau_instmem *imem;
69 int ret;
70
71 ret = nouveau_subdev_create_(parent, engine, oclass, 0,
72 "INSTMEM", "instmem", length, pobject);
73 imem = *pobject;
74 if (ret)
75 return ret;
76
77 INIT_LIST_HEAD(&imem->list);
78 return 0;
79}
80
81int
82nouveau_instmem_init(struct nouveau_instmem *imem)
83{
84 struct nouveau_instobj *iobj;
85 int ret, i;
86
87 ret = nouveau_subdev_init(&imem->base);
88 if (ret)
89 return ret;
90
91 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4)
94 nv_wo32(iobj, i, iobj->suspend[i / 4]);
95 vfree(iobj->suspend);
96 iobj->suspend = NULL;
97 }
98 }
99
100 return 0;
101}
102
103int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{
106 struct nouveau_instobj *iobj;
107 int i;
108
109 if (suspend) {
110 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4)
114 iobj->suspend[i / 4] = nv_ro32(iobj, i);
115 } else
116 return -ENOMEM;
117 }
118 }
119
120 return nouveau_subdev_fini(&imem->base, suspend);
121}
122
123int
124_nouveau_instmem_init(struct nouveau_object *object)
125{
126 struct nouveau_instmem *imem = (void *)object;
127 return nouveau_instmem_init(imem);
128}
129
130int
131_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
132{
133 struct nouveau_instmem *imem = (void *)object;
134 return nouveau_instmem_fini(imem, suspend);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index 46b6963b0937..f44f0f096689 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -1,141 +1,199 @@
1#include "drmP.h" 1/*
2#include "drm.h" 2 * Copyright 2012 Red Hat Inc.
3 3 *
4#include "nouveau_drv.h" 4 * Permission is hereby granted, free of charge, to any person obtaining a
5#include <engine/fifo.h> 5 * copy of this software and associated documentation files (the "Software"),
6#include <core/ramht.h> 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
7 26
8#include "nv04.h" 27#include "nv04.h"
9 28
29static int
30nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
31 struct nouveau_oclass *oclass, void *data, u32 size,
32 struct nouveau_object **pobject)
33{
34 struct nv04_instmem_priv *priv = (void *)engine;
35 struct nv04_instobj_priv *node;
36 int ret, align;
37
38 align = (unsigned long)data;
39 if (!align)
40 align = 1;
41
42 ret = nouveau_instobj_create(parent, engine, oclass, &node);
43 *pobject = nv_object(node);
44 if (ret)
45 return ret;
46
47 ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
48 if (ret)
49 return ret;
50
51 node->base.addr = node->mem->offset;
52 node->base.size = node->mem->length;
53 return 0;
54}
55
56static void
57nv04_instobj_dtor(struct nouveau_object *object)
58{
59 struct nv04_instmem_priv *priv = (void *)object->engine;
60 struct nv04_instobj_priv *node = (void *)object;
61 nouveau_mm_free(&priv->heap, &node->mem);
62 nouveau_instobj_destroy(&node->base);
63}
64
65static u32
66nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
67{
68 struct nv04_instobj_priv *node = (void *)object;
69 return nv_ro32(object->engine, node->mem->offset + addr);
70}
71
72static void
73nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
74{
75 struct nv04_instobj_priv *node = (void *)object;
76 nv_wo32(object->engine, node->mem->offset + addr, data);
77}
78
79static struct nouveau_oclass
80nv04_instobj_oclass = {
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv04_instobj_ctor,
83 .dtor = nv04_instobj_dtor,
84 .init = _nouveau_instobj_init,
85 .fini = _nouveau_instobj_fini,
86 .rd32 = nv04_instobj_rd32,
87 .wr32 = nv04_instobj_wr32,
88 },
89};
90
10int 91int
11nv04_instmem_init(struct drm_device *dev) 92nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
93 u32 size, u32 align, struct nouveau_object **pobject)
12{ 94{
13 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct nouveau_object *engine = nv_object(imem);
14 struct nv04_instmem_priv *priv; 96 struct nv04_instmem_priv *priv = (void *)(imem);
15 int ret; 97 int ret;
16 98
17 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 99 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
18 if (!priv) 100 (void *)(unsigned long)align, size, pobject);
19 return -ENOMEM; 101 if (ret)
20 dev_priv->engine.instmem.priv = priv; 102 return ret;
103
104 /* INSTMEM itself creates objects to reserve (and preserve across
105 * suspend/resume) various fixed data locations, each one of these
106 * takes a reference on INSTMEM itself, causing it to never be
107 * freed. We drop all the self-references here to avoid this.
108 */
109 if (unlikely(!priv->created))
110 atomic_dec(&engine->refcount);
111
112 return 0;
113}
21 114
22 /* PRAMIN aperture maps over the end of vram, reserve the space */ 115static int
23 dev_priv->ramin_available = true; 116nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
24 dev_priv->ramin_rsvd_vram = 512 * 1024; 117 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject)
119{
120 struct nv04_instmem_priv *priv;
121 int ret;
25 122
26 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram); 123 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
124 *pobject = nv_object(priv);
27 if (ret) 125 if (ret)
28 return ret; 126 return ret;
29 127
30 /* 0x00000-0x10000: reserve for probable vbios image */ 128 /* PRAMIN aperture maps over the end of VRAM, reserve it */
31 ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios); 129 priv->base.reserved = 512 * 1024;
130 priv->base.alloc = nv04_instmem_alloc;
131
132 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
32 if (ret) 133 if (ret)
33 return ret; 134 return ret;
34 135
35 /* 0x10000-0x18000: reserve for RAMHT */ 136 /* 0x00000-0x10000: reserve for probable vbios image */
36 ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC, 137 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
37 &priv->ramht);
38 if (ret) 138 if (ret)
39 return ret; 139 return ret;
40 140
41 /* 0x18000-0x18200: reserve for RAMRO */ 141 /* 0x10000-0x18000: reserve for RAMHT */
42 ret = nouveau_gpuobj_new(dev, NULL, 0x00200, 0, 0, &priv->ramro); 142 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
143 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
43 if (ret) 144 if (ret)
44 return ret; 145 return ret;
45 146
46 /* 0x18200-0x18a00: reserve for RAMFC (enough for 32 nv30 channels) */ 147 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
47 ret = nouveau_gpuobj_new(dev, NULL, 0x00800, 0, NVOBJ_FLAG_ZERO_ALLOC, 148 ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
48 &priv->ramfc); 149 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
49 if (ret) 150 if (ret)
50 return ret; 151 return ret;
51 152
52 ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht); 153 /* 0x18800-0x18a00: reserve for RAMRO */
154 ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
53 if (ret) 155 if (ret)
54 return ret; 156 return ret;
55 157
158 priv->created = true;
56 return 0; 159 return 0;
57} 160}
58 161
59void 162void
60nv04_instmem_takedown(struct drm_device *dev) 163nv04_instmem_dtor(struct nouveau_object *object)
61{ 164{
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 165 struct nv04_instmem_priv *priv = (void *)object;
63 struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv;
64
65 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
66 nouveau_gpuobj_ref(NULL, &priv->ramfc); 166 nouveau_gpuobj_ref(NULL, &priv->ramfc);
67 nouveau_gpuobj_ref(NULL, &priv->ramro); 167 nouveau_gpuobj_ref(NULL, &priv->ramro);
68 nouveau_gpuobj_ref(NULL, &priv->ramht); 168 nouveau_gpuobj_ref(NULL, &priv->ramht);
69 169 nouveau_gpuobj_ref(NULL, &priv->vbios);
70 if (drm_mm_initialized(&dev_priv->ramin_heap)) 170 nouveau_mm_fini(&priv->heap);
71 drm_mm_takedown(&dev_priv->ramin_heap); 171 if (priv->iomem)
72 172 iounmap(priv->iomem);
73 kfree(priv); 173 nouveau_instmem_destroy(&priv->base);
74 dev_priv->engine.instmem.priv = NULL;
75} 174}
76 175
77int 176static u32
78nv04_instmem_suspend(struct drm_device *dev) 177nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
79{
80 return 0;
81}
82
83void
84nv04_instmem_resume(struct drm_device *dev)
85{ 178{
179 return nv_rd32(object, 0x700000 + addr);
86} 180}
87 181
88int 182static void
89nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, 183nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
90 u32 size, u32 align)
91{ 184{
92 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 185 return nv_wr32(object, 0x700000 + addr, data);
93 struct drm_mm_node *ramin = NULL;
94
95 do {
96 if (drm_mm_pre_get(&dev_priv->ramin_heap))
97 return -ENOMEM;
98
99 spin_lock(&dev_priv->ramin_lock);
100 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
101 if (ramin == NULL) {
102 spin_unlock(&dev_priv->ramin_lock);
103 return -ENOMEM;
104 }
105
106 ramin = drm_mm_get_block_atomic(ramin, size, align);
107 spin_unlock(&dev_priv->ramin_lock);
108 } while (ramin == NULL);
109
110 gpuobj->node = ramin;
111 gpuobj->vinst = ramin->start;
112 return 0;
113} 186}
114 187
115void 188struct nouveau_oclass
116nv04_instmem_put(struct nouveau_gpuobj *gpuobj) 189nv04_instmem_oclass = {
117{ 190 .handle = NV_SUBDEV(INSTMEM, 0x04),
118 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 191 .ofuncs = &(struct nouveau_ofuncs) {
119 192 .ctor = nv04_instmem_ctor,
120 spin_lock(&dev_priv->ramin_lock); 193 .dtor = nv04_instmem_dtor,
121 drm_mm_put_block(gpuobj->node); 194 .init = _nouveau_instmem_init,
122 gpuobj->node = NULL; 195 .fini = _nouveau_instmem_fini,
123 spin_unlock(&dev_priv->ramin_lock); 196 .rd32 = nv04_instmem_rd32,
124} 197 .wr32 = nv04_instmem_wr32,
125 198 },
126int 199};
127nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
128{
129 gpuobj->pinst = gpuobj->vinst;
130 return 0;
131}
132
133void
134nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
135{
136}
137
138void
139nv04_instmem_flush(struct drm_device *dev)
140{
141}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index a8c1104a83da..b2f82f9e4e7f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -1,11 +1,32 @@
1#ifndef __NV04_INSTMEM_H__ 1#ifndef __NV04_INSTMEM_H__
2#define __NV04_INSTMEM_H__ 2#define __NV04_INSTMEM_H__
3 3
4#include <core/gpuobj.h>
5#include <core/mm.h>
6
7#include <subdev/instmem.h>
8
4struct nv04_instmem_priv { 9struct nv04_instmem_priv {
10 struct nouveau_instmem base;
11 bool created;
12
13 void __iomem *iomem;
14 struct nouveau_mm heap;
15
5 struct nouveau_gpuobj *vbios; 16 struct nouveau_gpuobj *vbios;
6 struct nouveau_gpuobj *ramht; 17 struct nouveau_gpuobj *ramht;
7 struct nouveau_gpuobj *ramro; 18 struct nouveau_gpuobj *ramro;
8 struct nouveau_gpuobj *ramfc; 19 struct nouveau_gpuobj *ramfc;
9}; 20};
10 21
22struct nv04_instobj_priv {
23 struct nouveau_instobj base;
24 struct nouveau_mm_node *mem;
25};
26
27void nv04_instmem_dtor(struct nouveau_object *);
28
29int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
30 u32 size, u32 align, struct nouveau_object **pobject);
31
11#endif 32#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 7c938ae8ec93..6a22160324c1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -1,165 +1,139 @@
1#include "drmP.h" 1/*
2#include "drm.h" 2 * Copyright 2012 Red Hat Inc.
3 3 *
4#include "nouveau_drv.h" 4 * Permission is hereby granted, free of charge, to any person obtaining a
5#include <engine/fifo.h> 5 * copy of this software and associated documentation files (the "Software"),
6#include <core/ramht.h> 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
7 24
8#include "nv04.h" 25#include "nv04.h"
9 26
10int nv40_instmem_init(struct drm_device *dev) 27static inline int
28nv44_graph_class(struct nv04_instmem_priv *priv)
11{ 29{
12 struct drm_nouveau_private *dev_priv = dev->dev_private; 30 if ((nv_device(priv)->chipset & 0xf0) == 0x60)
31 return 1;
32 return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
33}
34
35static int
36nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *data, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_device *device = nv_device(parent);
41 struct pci_dev *pdev = device->pdev;
13 struct nv04_instmem_priv *priv; 42 struct nv04_instmem_priv *priv;
14 u32 vs, rsvd; 43 int ret, bar, vs;
15 int ret;
16 44
17 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 45 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
18 if (!priv) 46 *pobject = nv_object(priv);
19 return -ENOMEM; 47 if (ret)
20 dev_priv->engine.instmem.priv = priv; 48 return ret;
49
50 /* map bar */
51 if (pci_resource_len(pdev, 2))
52 bar = 2;
53 else
54 bar = 3;
55
56 priv->iomem = ioremap(pci_resource_start(pdev, bar),
57 pci_resource_len(pdev, bar));
58 if (!priv->iomem) {
59 nv_error(priv, "unable to map PRAMIN BAR\n");
60 return -EFAULT;
61 }
21 62
22 /* PRAMIN aperture maps over the end of vram, reserve enough space 63 /* PRAMIN aperture maps over the end of vram, reserve enough space
23 * to fit graphics contexts for every channel, the magics come 64 * to fit graphics contexts for every channel, the magics come
24 * from engine/graph/nv40.c 65 * from engine/graph/nv40.c
25 */ 66 */
26 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); 67 vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
27 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; 68 if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
28 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; 69 else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
29 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; 70 else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
30 else rsvd = 0x4a40 * vs; 71 else priv->base.reserved = 0x4a40 * vs;
31 rsvd += 16 * 1024; 72 priv->base.reserved += 16 * 1024;
32 rsvd *= 32; /* per-channel */ 73 priv->base.reserved *= 32; /* per-channel */
33 rsvd += 512 * 1024; /* pci(e)gart table */ 74 priv->base.reserved += 512 * 1024; /* pci(e)gart table */
34 rsvd += 512 * 1024; /* object storage */ 75 priv->base.reserved += 512 * 1024; /* object storage */
35 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); 76
36 dev_priv->ramin_available = true; 77 priv->base.reserved = round_up(priv->base.reserved, 4096);
37 78 priv->base.alloc = nv04_instmem_alloc;
38 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram); 79
80 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
39 if (ret) 81 if (ret)
40 return ret; 82 return ret;
41 83
42 /* 0x00000-0x10000: reserve for probable vbios image */ 84 /* 0x00000-0x10000: reserve for probable vbios image */
43 ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios); 85 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
44 if (ret) 86 if (ret)
45 return ret; 87 return ret;
46 88
47 /* 0x10000-0x18000: reserve for RAMHT */ 89 /* 0x10000-0x18000: reserve for RAMHT */
48 ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC, 90 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
49 &priv->ramht); 91 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
50 if (ret) 92 if (ret)
51 return ret; 93 return ret;
52 94
53 /* 0x18000-0x18200: reserve for RAMRO 95 /* 0x18000-0x18200: reserve for RAMRO
54 * 0x18200-0x20000: padding 96 * 0x18200-0x20000: padding
55 */ 97 */
56 ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, 0, &priv->ramro); 98 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
57 if (ret) 99 if (ret)
58 return ret; 100 return ret;
59 101
60 /* 0x20000-0x21000: reserve for RAMFC 102 /* 0x20000-0x21000: reserve for RAMFC
61 * 0x21000-0x40000: padding + some unknown stuff (see below) 103 * 0x21000-0x40000: padding and some unknown crap
62 *
63 * It appears something is controlled by 0x2220/0x2230 on certain
64 * NV4x chipsets as well as RAMFC. When 0x2230 == 0 ("new style"
65 * control) the upper 16-bits of 0x2220 points at this other
66 * mysterious table that's clobbering important things.
67 *
68 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
69 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
70 */ 104 */
71 ret = nouveau_gpuobj_new(dev, NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC, 105 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
72 &priv->ramfc); 106 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
73 if (ret)
74 return ret;
75
76 ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht);
77 if (ret) 107 if (ret)
78 return ret; 108 return ret;
79 109
110 priv->created = true;
80 return 0; 111 return 0;
81} 112}
82 113
83void 114static u32
84nv40_instmem_takedown(struct drm_device *dev) 115nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
85{ 116{
86 struct drm_nouveau_private *dev_priv = dev->dev_private; 117 struct nv04_instmem_priv *priv = (void *)object;
87 struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv; 118 return ioread32_native(priv->iomem + addr);
88
89 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
90 nouveau_gpuobj_ref(NULL, &priv->ramfc);
91 nouveau_gpuobj_ref(NULL, &priv->ramro);
92 nouveau_gpuobj_ref(NULL, &priv->ramht);
93
94 if (drm_mm_initialized(&dev_priv->ramin_heap))
95 drm_mm_takedown(&dev_priv->ramin_heap);
96
97 kfree(priv);
98 dev_priv->engine.instmem.priv = NULL;
99}
100
101int
102nv40_instmem_suspend(struct drm_device *dev)
103{
104 return 0;
105} 119}
106 120
107void 121static void
108nv40_instmem_resume(struct drm_device *dev) 122nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
109{ 123{
124 struct nv04_instmem_priv *priv = (void *)object;
125 iowrite32_native(data, priv->iomem + addr);
110} 126}
111 127
112int 128struct nouveau_oclass
113nv40_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, 129nv40_instmem_oclass = {
114 u32 size, u32 align) 130 .handle = NV_SUBDEV(INSTMEM, 0x40),
115{ 131 .ofuncs = &(struct nouveau_ofuncs) {
116 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 132 .ctor = nv40_instmem_ctor,
117 struct drm_mm_node *ramin = NULL; 133 .dtor = nv04_instmem_dtor,
118 134 .init = _nouveau_instmem_init,
119 do { 135 .fini = _nouveau_instmem_fini,
120 if (drm_mm_pre_get(&dev_priv->ramin_heap)) 136 .rd32 = nv40_instmem_rd32,
121 return -ENOMEM; 137 .wr32 = nv40_instmem_wr32,
122 138 },
123 spin_lock(&dev_priv->ramin_lock); 139};
124 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
125 if (ramin == NULL) {
126 spin_unlock(&dev_priv->ramin_lock);
127 return -ENOMEM;
128 }
129
130 ramin = drm_mm_get_block_atomic(ramin, size, align);
131 spin_unlock(&dev_priv->ramin_lock);
132 } while (ramin == NULL);
133
134 gpuobj->node = ramin;
135 gpuobj->vinst = ramin->start;
136 return 0;
137}
138
139void
140nv40_instmem_put(struct nouveau_gpuobj *gpuobj)
141{
142 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
143
144 spin_lock(&dev_priv->ramin_lock);
145 drm_mm_put_block(gpuobj->node);
146 gpuobj->node = NULL;
147 spin_unlock(&dev_priv->ramin_lock);
148}
149
150int
151nv40_instmem_map(struct nouveau_gpuobj *gpuobj)
152{
153 gpuobj->pinst = gpuobj->vinst;
154 return 0;
155}
156
157void
158nv40_instmem_unmap(struct nouveau_gpuobj *gpuobj)
159{
160}
161
162void
163nv40_instmem_flush(struct drm_device *dev)
164{
165}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index f88530b96ec3..27ef0891d10b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -1,390 +1,172 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * All Rights Reserved. 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
5 * 10 *
6 * Permission is hereby granted, free of charge, to any person obtaining 11 * The above copyright notice and this permission notice shall be included in
7 * a copy of this software and associated documentation files (the 12 * all copies or substantial portions of the Software.
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 * 13 *
14 * The above copyright notice and this permission notice (including the 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * next paragraph) shall be included in all copies or substantial 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * portions of the Software. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * 21 *
22 * Authors: Ben Skeggs
26 */ 23 */
27 24
28#include "drmP.h" 25#include <subdev/instmem.h>
29#include "drm.h" 26#include <subdev/fb.h>
30
31#include "nouveau_drv.h"
32#include <subdev/vm.h>
33 27
34#define BAR1_VM_BASE 0x0020000000ULL 28#include <core/mm.h>
35#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
36#define BAR3_VM_BASE 0x0000000000ULL
37#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
38 29
39struct nv50_instmem_priv { 30struct nv50_instmem_priv {
40 uint32_t save1700[5]; /* 0x1700->0x1710 */ 31 struct nouveau_instmem base;
41 32 spinlock_t lock;
42 struct nouveau_gpuobj *bar1_dmaobj; 33 u64 addr;
43 struct nouveau_gpuobj *bar3_dmaobj;
44}; 34};
45 35
46static void 36struct nv50_instobj_priv {
47nv50_channel_del(struct nouveau_channel **pchan) 37 struct nouveau_instobj base;
48{ 38 struct nouveau_mem *mem;
49 struct nouveau_channel *chan; 39};
50
51 chan = *pchan;
52 *pchan = NULL;
53 if (!chan)
54 return;
55
56 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
57 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
58 if (drm_mm_initialized(&chan->ramin_heap))
59 drm_mm_takedown(&chan->ramin_heap);
60 nouveau_gpuobj_ref(NULL, &chan->ramin);
61 kfree(chan);
62}
63 40
64static int 41static int
65nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, 42nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_channel **pchan) 43 struct nouveau_oclass *oclass, void *data, u32 size,
67{ 44 struct nouveau_object **pobject)
68 struct drm_nouveau_private *dev_priv = dev->dev_private;
69 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
70 struct nouveau_channel *chan;
71 int ret, i;
72
73 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
74 if (!chan)
75 return -ENOMEM;
76 chan->dev = dev;
77
78 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
79 if (ret) {
80 nv50_channel_del(&chan);
81 return ret;
82 }
83
84 ret = drm_mm_init(&chan->ramin_heap, pgd, chan->ramin->size - pgd);
85 if (ret) {
86 nv50_channel_del(&chan);
87 return ret;
88 }
89
90 ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
91 if (ret) {
92 nv50_channel_del(&chan);
93 return ret;
94 }
95
96 for (i = 0; i < 0x4000; i += 8) {
97 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
98 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
99 }
100
101 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
102 if (ret) {
103 nv50_channel_del(&chan);
104 return ret;
105 }
106
107 *pchan = chan;
108 return 0;
109}
110
111int
112nv50_instmem_init(struct drm_device *dev)
113{ 45{
114 struct drm_nouveau_private *dev_priv = dev->dev_private; 46 struct nouveau_fb *pfb = nouveau_fb(parent);
115 struct nv50_instmem_priv *priv; 47 struct nv50_instobj_priv *node;
116 struct nouveau_channel *chan; 48 u32 align = (unsigned long)data;
117 struct nouveau_vm *vm; 49 int ret;
118 int ret, i;
119 u32 tmp;
120
121 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
122 if (!priv)
123 return -ENOMEM;
124 dev_priv->engine.instmem.priv = priv;
125
126 /* Save state, will restore at takedown. */
127 for (i = 0x1700; i <= 0x1710; i += 4)
128 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
129
130 /* Global PRAMIN heap */
131 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
132 if (ret) {
133 NV_ERROR(dev, "Failed to init RAMIN heap\n");
134 goto error;
135 }
136
137 /* BAR3 */
138 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
139 &dev_priv->bar3_vm);
140 if (ret)
141 goto error;
142
143 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
144 0x1000, NVOBJ_FLAG_DONT_MAP |
145 NVOBJ_FLAG_ZERO_ALLOC,
146 &dev_priv->bar3_vm->pgt[0].obj[0]);
147 if (ret)
148 goto error;
149 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
150
151 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
152
153 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
154 if (ret)
155 goto error;
156 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
157
158 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
159 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
160 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
161 &priv->bar3_dmaobj);
162 if (ret)
163 goto error;
164
165 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
166 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
167 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
168
169 dev_priv->engine.instmem.flush(dev);
170 dev_priv->ramin_available = true;
171
172 tmp = nv_ro32(chan->ramin, 0);
173 nv_wo32(chan->ramin, 0, ~tmp);
174 if (nv_ro32(chan->ramin, 0) != ~tmp) {
175 NV_ERROR(dev, "PRAMIN readback failed\n");
176 ret = -EIO;
177 goto error;
178 }
179 nv_wo32(chan->ramin, 0, tmp);
180
181 /* BAR1 */
182 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
183 if (ret)
184 goto error;
185 50
186 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); 51 size = max((size + 4095) & ~4095, (u32)4096);
187 if (ret) 52 align = max((align + 4095) & ~4095, (u32)4096);
188 goto error;
189 nouveau_vm_ref(NULL, &vm, NULL);
190 53
191 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, 54 ret = nouveau_instobj_create(parent, engine, oclass, &node);
192 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, 55 *pobject = nv_object(node);
193 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
194 &priv->bar1_dmaobj);
195 if (ret) 56 if (ret)
196 goto error; 57 return ret;
197
198 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
199 for (i = 0; i < 8; i++)
200 nv_wr32(dev, 0x1900 + (i*4), 0);
201 58
202 /* Create shared channel VM, space is reserved at the beginning 59 ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
203 * to catch "NULL pointer" references
204 */
205 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
206 &dev_priv->chan_vm);
207 if (ret) 60 if (ret)
208 return ret; 61 return ret;
209 62
63 node->base.addr = node->mem->offset;
64 node->base.size = node->mem->size << 12;
65 node->mem->page_shift = 12;
210 return 0; 66 return 0;
211
212error:
213 nv50_instmem_takedown(dev);
214 return ret;
215} 67}
216 68
217void 69static void
218nv50_instmem_takedown(struct drm_device *dev) 70nv50_instobj_dtor(struct nouveau_object *object)
219{ 71{
220 struct drm_nouveau_private *dev_priv = dev->dev_private; 72 struct nv50_instobj_priv *node = (void *)object;
221 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 73 struct nouveau_fb *pfb = nouveau_fb(object);
222 struct nouveau_channel *chan = dev_priv->channels.ptr[0]; 74 pfb->ram.put(pfb, &node->mem);
223 int i; 75 nouveau_instobj_destroy(&node->base);
224
225 NV_DEBUG(dev, "\n");
226
227 if (!priv)
228 return;
229
230 dev_priv->ramin_available = false;
231
232 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
233
234 for (i = 0x1700; i <= 0x1710; i += 4)
235 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
236
237 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
238 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
239
240 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
241 dev_priv->channels.ptr[127] = 0;
242 nv50_channel_del(&dev_priv->channels.ptr[0]);
243
244 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
245 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
246
247 if (drm_mm_initialized(&dev_priv->ramin_heap))
248 drm_mm_takedown(&dev_priv->ramin_heap);
249
250 dev_priv->engine.instmem.priv = NULL;
251 kfree(priv);
252} 76}
253 77
254int 78static u32
255nv50_instmem_suspend(struct drm_device *dev) 79nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
256{ 80{
257 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 struct nv50_instmem_priv *priv = (void *)object->engine;
258 82 struct nv50_instobj_priv *node = (void *)object;
259 dev_priv->ramin_available = false; 83 unsigned long flags;
260 return 0; 84 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
85 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
86 u32 data;
87
88 spin_lock_irqsave(&priv->lock, flags);
89 if (unlikely(priv->addr != base)) {
90 nv_wr32(priv, 0x001700, base >> 16);
91 priv->addr = base;
92 }
93 data = nv_rd32(priv, 0x700000 + addr);
94 spin_unlock_irqrestore(&priv->lock, flags);
95 return data;
261} 96}
262 97
263void 98static void
264nv50_instmem_resume(struct drm_device *dev) 99nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
265{ 100{
266 struct drm_nouveau_private *dev_priv = dev->dev_private; 101 struct nv50_instmem_priv *priv = (void *)object->engine;
267 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 102 struct nv50_instobj_priv *node = (void *)object;
268 struct nouveau_channel *chan = dev_priv->channels.ptr[0]; 103 unsigned long flags;
269 int i; 104 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
270 105 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
271 /* Poke the relevant regs, and pray it works :) */
272 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
273 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
274 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
275 NV50_PUNK_BAR_CFG_BASE_VALID);
276 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
277 NV50_PUNK_BAR1_CTXDMA_VALID);
278 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
279 NV50_PUNK_BAR3_CTXDMA_VALID);
280
281 for (i = 0; i < 8; i++)
282 nv_wr32(dev, 0x1900 + (i*4), 0);
283 106
284 dev_priv->ramin_available = true; 107 spin_lock_irqsave(&priv->lock, flags);
108 if (unlikely(priv->addr != base)) {
109 nv_wr32(priv, 0x001700, base >> 16);
110 priv->addr = base;
111 }
112 nv_wr32(priv, 0x700000 + addr, data);
113 spin_unlock_irqrestore(&priv->lock, flags);
285} 114}
286 115
287struct nv50_gpuobj_node { 116static struct nouveau_oclass
288 struct nouveau_mem *vram; 117nv50_instobj_oclass = {
289 struct nouveau_vma chan_vma; 118 .ofuncs = &(struct nouveau_ofuncs) {
290 u32 align; 119 .ctor = nv50_instobj_ctor,
120 .dtor = nv50_instobj_dtor,
121 .init = _nouveau_instobj_init,
122 .fini = _nouveau_instobj_fini,
123 .rd32 = nv50_instobj_rd32,
124 .wr32 = nv50_instobj_wr32,
125 },
291}; 126};
292 127
293int 128static int
294nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, 129nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
295 u32 size, u32 align) 130 u32 size, u32 align, struct nouveau_object **pobject)
296{
297 struct drm_device *dev = gpuobj->dev;
298 struct nv50_gpuobj_node *node = NULL;
299 int ret;
300
301 node = kzalloc(sizeof(*node), GFP_KERNEL);
302 if (!node)
303 return -ENOMEM;
304 node->align = align;
305
306 size = (size + 4095) & ~4095;
307 align = max(align, (u32)4096);
308
309 ret = nvfb_vram_get(dev, size, align, 0, 0x800, &node->vram);
310 if (ret) {
311 kfree(node);
312 return ret;
313 }
314
315 gpuobj->vinst = node->vram->offset;
316 gpuobj->size = size;
317 gpuobj->node = node;
318 return 0;
319}
320
321void
322nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
323{ 131{
324 struct drm_device *dev = gpuobj->dev; 132 struct nouveau_object *engine = nv_object(imem);
325 struct nv50_gpuobj_node *node; 133 return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
326 134 (void *)(unsigned long)align, size, pobject);
327 node = gpuobj->node;
328 gpuobj->node = NULL;
329
330 if (node->chan_vma.node) {
331 nouveau_vm_unmap(&node->chan_vma);
332 nouveau_vm_put(&node->chan_vma);
333 }
334 nvfb_vram_put(dev, &node->vram);
335 kfree(node);
336} 135}
337 136
338int 137static int
339nv50_instmem_map(struct nouveau_gpuobj *gpuobj) 138nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
139 struct nouveau_oclass *oclass, void *data, u32 size,
140 struct nouveau_object **pobject)
340{ 141{
341 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 142 struct nv50_instmem_priv *priv;
342 struct nv50_gpuobj_node *node = gpuobj->node;
343 int ret; 143 int ret;
344 144
345 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, 145 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
346 NV_MEM_ACCESS_RW, &node->vram->bar_vma); 146 *pobject = nv_object(priv);
347 if (ret) 147 if (ret)
348 return ret; 148 return ret;
349 149
350 nouveau_vm_map(&node->vram->bar_vma, node->vram); 150 spin_lock_init(&priv->lock);
351 gpuobj->pinst = node->vram->bar_vma.offset; 151 priv->base.alloc = nv50_instmem_alloc;
352 return 0; 152 return 0;
353} 153}
354 154
355void 155static int
356nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) 156nv50_instmem_fini(struct nouveau_object *object, bool suspend)
357{ 157{
358 struct nv50_gpuobj_node *node = gpuobj->node; 158 struct nv50_instmem_priv *priv = (void *)object;
359 159 priv->addr = ~0ULL;
360 if (node->vram->bar_vma.node) { 160 return nouveau_instmem_fini(&priv->base, suspend);
361 nouveau_vm_unmap(&node->vram->bar_vma);
362 nouveau_vm_put(&node->vram->bar_vma);
363 }
364} 161}
365 162
366void 163struct nouveau_oclass
367nv50_instmem_flush(struct drm_device *dev) 164nv50_instmem_oclass = {
368{ 165 .handle = NV_SUBDEV(INSTMEM, 0x50),
369 struct drm_nouveau_private *dev_priv = dev->dev_private; 166 .ofuncs = &(struct nouveau_ofuncs) {
370 unsigned long flags; 167 .ctor = nv50_instmem_ctor,
371 168 .dtor = _nouveau_instmem_dtor,
372 spin_lock_irqsave(&dev_priv->vm_lock, flags); 169 .init = _nouveau_instmem_init,
373 nv_wr32(dev, 0x00330c, 0x00000001); 170 .fini = nv50_instmem_fini,
374 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 171 },
375 NV_ERROR(dev, "PRAMIN flush timeout\n"); 172};
376 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
377}
378
379void
380nv84_instmem_flush(struct drm_device *dev)
381{
382 struct drm_nouveau_private *dev_priv = dev->dev_private;
383 unsigned long flags;
384
385 spin_lock_irqsave(&dev_priv->vm_lock, flags);
386 nv_wr32(dev, 0x070000, 0x00000001);
387 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
388 NV_ERROR(dev, "PRAMIN flush timeout\n");
389 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
390}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nvc0.c
deleted file mode 100644
index 0ce986910cbf..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nvc0.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include <subdev/vm.h>
29
30struct nvc0_instmem_priv {
31 struct nouveau_gpuobj *bar1_pgd;
32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3;
35};
36
37int
38nvc0_instmem_suspend(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 dev_priv->ramin_available = false;
43 return 0;
44}
45
46void
47nvc0_instmem_resume(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
51
52 nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
53 nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
54 nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
55 dev_priv->ramin_available = true;
56}
57
58static void
59nvc0_channel_del(struct nouveau_channel **pchan)
60{
61 struct nouveau_channel *chan;
62
63 chan = *pchan;
64 *pchan = NULL;
65 if (!chan)
66 return;
67
68 nouveau_vm_ref(NULL, &chan->vm, NULL);
69 if (drm_mm_initialized(&chan->ramin_heap))
70 drm_mm_takedown(&chan->ramin_heap);
71 nouveau_gpuobj_ref(NULL, &chan->ramin);
72 kfree(chan);
73}
74
75static int
76nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
77 struct nouveau_channel **pchan,
78 struct nouveau_gpuobj *pgd, u64 vm_size)
79{
80 struct nouveau_channel *chan;
81 int ret;
82
83 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
84 if (!chan)
85 return -ENOMEM;
86 chan->dev = dev;
87
88 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
89 if (ret) {
90 nvc0_channel_del(&chan);
91 return ret;
92 }
93
94 ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
95 if (ret) {
96 nvc0_channel_del(&chan);
97 return ret;
98 }
99
100 ret = nouveau_vm_ref(vm, &chan->vm, NULL);
101 if (ret) {
102 nvc0_channel_del(&chan);
103 return ret;
104 }
105
106 nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
107 nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
108 nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
109 nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
110
111 *pchan = chan;
112 return 0;
113}
114
115int
116nvc0_instmem_init(struct drm_device *dev)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
120 struct pci_dev *pdev = dev->pdev;
121 struct nvc0_instmem_priv *priv;
122 struct nouveau_vm *vm = NULL;
123 int ret;
124
125 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
126 if (!priv)
127 return -ENOMEM;
128 pinstmem->priv = priv;
129
130 /* BAR3 VM */
131 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
132 &dev_priv->bar3_vm);
133 if (ret)
134 goto error;
135
136 ret = nouveau_gpuobj_new(dev, NULL,
137 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
138 NVOBJ_FLAG_DONT_MAP |
139 NVOBJ_FLAG_ZERO_ALLOC,
140 &dev_priv->bar3_vm->pgt[0].obj[0]);
141 if (ret)
142 goto error;
143 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
144
145 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
146
147 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
149 if (ret)
150 goto error;
151
152 ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
153 if (ret)
154 goto error;
155 nouveau_vm_ref(NULL, &vm, NULL);
156
157 ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
158 priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
159 if (ret)
160 goto error;
161
162 /* BAR1 VM */
163 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
164 if (ret)
165 goto error;
166
167 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
168 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
169 if (ret)
170 goto error;
171
172 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
173 if (ret)
174 goto error;
175 nouveau_vm_ref(NULL, &vm, NULL);
176
177 ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
178 priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
179 if (ret)
180 goto error;
181
182 /* channel vm */
183 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
184 &dev_priv->chan_vm);
185 if (ret)
186 goto error;
187
188 nvc0_instmem_resume(dev);
189 return 0;
190error:
191 nvc0_instmem_takedown(dev);
192 return ret;
193}
194
195void
196nvc0_instmem_takedown(struct drm_device *dev)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
200 struct nouveau_vm *vm = NULL;
201
202 nvc0_instmem_suspend(dev);
203
204 nv_wr32(dev, 0x1704, 0x00000000);
205 nv_wr32(dev, 0x1714, 0x00000000);
206
207 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
208
209 nvc0_channel_del(&priv->bar1);
210 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
211 nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
212
213 nvc0_channel_del(&priv->bar3);
214 nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
215 nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
216 nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
217 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
219
220 dev_priv->engine.instmem.priv = NULL;
221 kfree(priv);
222}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 9fb858a21abb..b92b3d47c69c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -22,22 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/gpuobj.h>
26#include "nouveau_drv.h"
27#include <core/mm.h> 26#include <core/mm.h>
27
28#include <subdev/fb.h>
28#include <subdev/vm.h> 29#include <subdev/vm.h>
29 30
30void 31void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) 32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32{ 33{
33 struct nouveau_vm *vm = vma->vm; 34 struct nouveau_vm *vm = vma->vm;
35 struct nouveau_vmmgr *vmm = vm->vmm;
34 struct nouveau_mm_node *r; 36 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift; 37 int big = vma->node->type != vmm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12); 38 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12; 39 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 40 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 41 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits); 42 u32 max = 1 << (vmm->pgt_bits - bits);
41 u32 end, len; 43 u32 end, len;
42 44
43 delta = 0; 45 delta = 0;
@@ -53,7 +55,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
53 end = max; 55 end = max;
54 len = end - pte; 56 len = end - pte;
55 57
56 vm->map(vma, pgt, node, pte, len, phys, delta); 58 vmm->map(vma, pgt, node, pte, len, phys, delta);
57 59
58 num -= len; 60 num -= len;
59 pte += len; 61 pte += len;
@@ -67,7 +69,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
67 } 69 }
68 } 70 }
69 71
70 vm->flush(vm); 72 vmm->flush(vm);
71} 73}
72 74
73void 75void
@@ -81,13 +83,14 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem) 83 struct nouveau_mem *mem)
82{ 84{
83 struct nouveau_vm *vm = vma->vm; 85 struct nouveau_vm *vm = vma->vm;
84 int big = vma->node->type != vm->spg_shift; 86 struct nouveau_vmmgr *vmm = vm->vmm;
87 int big = vma->node->type != vmm->spg_shift;
85 u32 offset = vma->node->offset + (delta >> 12); 88 u32 offset = vma->node->offset + (delta >> 12);
86 u32 bits = vma->node->type - 12; 89 u32 bits = vma->node->type - 12;
87 u32 num = length >> vma->node->type; 90 u32 num = length >> vma->node->type;
88 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 91 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
89 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 92 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
90 u32 max = 1 << (vm->pgt_bits - bits); 93 u32 max = 1 << (vmm->pgt_bits - bits);
91 unsigned m, sglen; 94 unsigned m, sglen;
92 u32 end, len; 95 u32 end, len;
93 int i; 96 int i;
@@ -105,7 +108,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
105 for (m = 0; m < len; m++) { 108 for (m = 0; m < len; m++) {
106 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 109 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
107 110
108 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 111 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
109 num--; 112 num--;
110 pte++; 113 pte++;
111 114
@@ -120,7 +123,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
120 for (; m < sglen; m++) { 123 for (; m < sglen; m++) {
121 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 124 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
122 125
123 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 126 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
124 num--; 127 num--;
125 pte++; 128 pte++;
126 if (num == 0) 129 if (num == 0)
@@ -130,7 +133,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
130 133
131 } 134 }
132finish: 135finish:
133 vm->flush(vm); 136 vmm->flush(vm);
134} 137}
135 138
136void 139void
@@ -138,14 +141,15 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
138 struct nouveau_mem *mem) 141 struct nouveau_mem *mem)
139{ 142{
140 struct nouveau_vm *vm = vma->vm; 143 struct nouveau_vm *vm = vma->vm;
144 struct nouveau_vmmgr *vmm = vm->vmm;
141 dma_addr_t *list = mem->pages; 145 dma_addr_t *list = mem->pages;
142 int big = vma->node->type != vm->spg_shift; 146 int big = vma->node->type != vmm->spg_shift;
143 u32 offset = vma->node->offset + (delta >> 12); 147 u32 offset = vma->node->offset + (delta >> 12);
144 u32 bits = vma->node->type - 12; 148 u32 bits = vma->node->type - 12;
145 u32 num = length >> vma->node->type; 149 u32 num = length >> vma->node->type;
146 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 150 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
147 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 151 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
148 u32 max = 1 << (vm->pgt_bits - bits); 152 u32 max = 1 << (vmm->pgt_bits - bits);
149 u32 end, len; 153 u32 end, len;
150 154
151 while (num) { 155 while (num) {
@@ -156,7 +160,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
156 end = max; 160 end = max;
157 len = end - pte; 161 len = end - pte;
158 162
159 vm->map_sg(vma, pgt, mem, pte, len, list); 163 vmm->map_sg(vma, pgt, mem, pte, len, list);
160 164
161 num -= len; 165 num -= len;
162 pte += len; 166 pte += len;
@@ -167,20 +171,21 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
167 } 171 }
168 } 172 }
169 173
170 vm->flush(vm); 174 vmm->flush(vm);
171} 175}
172 176
173void 177void
174nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) 178nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
175{ 179{
176 struct nouveau_vm *vm = vma->vm; 180 struct nouveau_vm *vm = vma->vm;
177 int big = vma->node->type != vm->spg_shift; 181 struct nouveau_vmmgr *vmm = vm->vmm;
182 int big = vma->node->type != vmm->spg_shift;
178 u32 offset = vma->node->offset + (delta >> 12); 183 u32 offset = vma->node->offset + (delta >> 12);
179 u32 bits = vma->node->type - 12; 184 u32 bits = vma->node->type - 12;
180 u32 num = length >> vma->node->type; 185 u32 num = length >> vma->node->type;
181 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 186 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
182 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 187 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
183 u32 max = 1 << (vm->pgt_bits - bits); 188 u32 max = 1 << (vmm->pgt_bits - bits);
184 u32 end, len; 189 u32 end, len;
185 190
186 while (num) { 191 while (num) {
@@ -191,7 +196,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
191 end = max; 196 end = max;
192 len = end - pte; 197 len = end - pte;
193 198
194 vm->unmap(pgt, pte, len); 199 vmm->unmap(pgt, pte, len);
195 200
196 num -= len; 201 num -= len;
197 pte += len; 202 pte += len;
@@ -201,7 +206,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
201 } 206 }
202 } 207 }
203 208
204 vm->flush(vm); 209 vmm->flush(vm);
205} 210}
206 211
207void 212void
@@ -213,6 +218,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
213static void 218static void
214nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) 219nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
215{ 220{
221 struct nouveau_vmmgr *vmm = vm->vmm;
216 struct nouveau_vm_pgd *vpgd; 222 struct nouveau_vm_pgd *vpgd;
217 struct nouveau_vm_pgt *vpgt; 223 struct nouveau_vm_pgt *vpgt;
218 struct nouveau_gpuobj *pgt; 224 struct nouveau_gpuobj *pgt;
@@ -227,7 +233,7 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
227 vpgt->obj[big] = NULL; 233 vpgt->obj[big] = NULL;
228 234
229 list_for_each_entry(vpgd, &vm->pgd_list, head) { 235 list_for_each_entry(vpgd, &vm->pgd_list, head) {
230 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
231 } 237 }
232 238
233 mutex_unlock(&vm->mm.mutex); 239 mutex_unlock(&vm->mm.mutex);
@@ -239,18 +245,19 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
239static int 245static int
240nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) 246nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
241{ 247{
248 struct nouveau_vmmgr *vmm = vm->vmm;
242 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 249 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
243 struct nouveau_vm_pgd *vpgd; 250 struct nouveau_vm_pgd *vpgd;
244 struct nouveau_gpuobj *pgt; 251 struct nouveau_gpuobj *pgt;
245 int big = (type != vm->spg_shift); 252 int big = (type != vmm->spg_shift);
246 u32 pgt_size; 253 u32 pgt_size;
247 int ret; 254 int ret;
248 255
249 pgt_size = (1 << (vm->pgt_bits + 12)) >> type; 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
250 pgt_size *= 8; 257 pgt_size *= 8;
251 258
252 mutex_unlock(&vm->mm.mutex); 259 mutex_unlock(&vm->mm.mutex);
253 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
254 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
255 mutex_lock(&vm->mm.mutex); 262 mutex_lock(&vm->mm.mutex);
256 if (unlikely(ret)) 263 if (unlikely(ret))
@@ -266,7 +273,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
266 273
267 vpgt->obj[big] = pgt; 274 vpgt->obj[big] = pgt;
268 list_for_each_entry(vpgd, &vm->pgd_list, head) { 275 list_for_each_entry(vpgd, &vm->pgd_list, head) {
269 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 276 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
270 } 277 }
271 278
272 return 0; 279 return 0;
@@ -276,6 +283,7 @@ int
276nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, 283nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
277 u32 access, struct nouveau_vma *vma) 284 u32 access, struct nouveau_vma *vma)
278{ 285{
286 struct nouveau_vmmgr *vmm = vm->vmm;
279 u32 align = (1 << page_shift) >> 12; 287 u32 align = (1 << page_shift) >> 12;
280 u32 msize = size >> 12; 288 u32 msize = size >> 12;
281 u32 fpde, lpde, pde; 289 u32 fpde, lpde, pde;
@@ -289,11 +297,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
289 return ret; 297 return ret;
290 } 298 }
291 299
292 fpde = (vma->node->offset >> vm->pgt_bits); 300 fpde = (vma->node->offset >> vmm->pgt_bits);
293 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
294 for (pde = fpde; pde <= lpde; pde++) { 302 for (pde = fpde; pde <= lpde; pde++) {
295 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 303 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
296 int big = (vma->node->type != vm->spg_shift); 304 int big = (vma->node->type != vmm->spg_shift);
297 305
298 if (likely(vpgt->refcount[big])) { 306 if (likely(vpgt->refcount[big])) {
299 vpgt->refcount[big]++; 307 vpgt->refcount[big]++;
@@ -321,90 +329,67 @@ void
321nouveau_vm_put(struct nouveau_vma *vma) 329nouveau_vm_put(struct nouveau_vma *vma)
322{ 330{
323 struct nouveau_vm *vm = vma->vm; 331 struct nouveau_vm *vm = vma->vm;
332 struct nouveau_vmmgr *vmm = vm->vmm;
324 u32 fpde, lpde; 333 u32 fpde, lpde;
325 334
326 if (unlikely(vma->node == NULL)) 335 if (unlikely(vma->node == NULL))
327 return; 336 return;
328 fpde = (vma->node->offset >> vm->pgt_bits); 337 fpde = (vma->node->offset >> vmm->pgt_bits);
329 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 338 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
330 339
331 mutex_lock(&vm->mm.mutex); 340 mutex_lock(&vm->mm.mutex);
332 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); 341 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
333 nouveau_mm_free(&vm->mm, &vma->node); 342 nouveau_mm_free(&vm->mm, &vma->node);
334 mutex_unlock(&vm->mm.mutex); 343 mutex_unlock(&vm->mm.mutex);
335} 344}
336 345
337int 346int
338nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, 347nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
339 struct nouveau_vm **pvm) 348 u64 mm_offset, u32 block, struct nouveau_vm **pvm)
340{ 349{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct nouveau_vm *vm; 350 struct nouveau_vm *vm;
343 u64 mm_length = (offset + length) - mm_offset; 351 u64 mm_length = (offset + length) - mm_offset;
344 u32 block, pgt_bits;
345 int ret; 352 int ret;
346 353
347 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 354 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
348 if (!vm) 355 if (!vm)
349 return -ENOMEM; 356 return -ENOMEM;
350 357
351 if (dev_priv->card_type == NV_50) { 358 INIT_LIST_HEAD(&vm->pgd_list);
352 vm->map_pgt = nv50_vm_map_pgt; 359 vm->vmm = vmm;
353 vm->map = nv50_vm_map; 360 vm->refcount = 1;
354 vm->map_sg = nv50_vm_map_sg; 361 vm->fpde = offset >> (vmm->pgt_bits + 12);
355 vm->unmap = nv50_vm_unmap; 362 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
356 vm->flush = nv50_vm_flush;
357 vm->spg_shift = 12;
358 vm->lpg_shift = 16;
359
360 pgt_bits = 29;
361 block = (1 << pgt_bits);
362 if (length < block)
363 block = length;
364
365 } else
366 if (dev_priv->card_type >= NV_C0) {
367 vm->map_pgt = nvc0_vm_map_pgt;
368 vm->map = nvc0_vm_map;
369 vm->map_sg = nvc0_vm_map_sg;
370 vm->unmap = nvc0_vm_unmap;
371 vm->flush = nvc0_vm_flush;
372 vm->spg_shift = 12;
373 vm->lpg_shift = 17;
374 pgt_bits = 27;
375 block = 4096;
376 } else {
377 kfree(vm);
378 return -ENOSYS;
379 }
380 363
381 vm->fpde = offset >> pgt_bits; 364 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
382 vm->lpde = (offset + length - 1) >> pgt_bits;
383 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
384 if (!vm->pgt) { 365 if (!vm->pgt) {
385 kfree(vm); 366 kfree(vm);
386 return -ENOMEM; 367 return -ENOMEM;
387 } 368 }
388 369
389 INIT_LIST_HEAD(&vm->pgd_list);
390 vm->dev = dev;
391 vm->refcount = 1;
392 vm->pgt_bits = pgt_bits - 12;
393
394 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, 370 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
395 block >> 12); 371 block >> 12);
396 if (ret) { 372 if (ret) {
373 kfree(vm->pgt);
397 kfree(vm); 374 kfree(vm);
398 return ret; 375 return ret;
399 } 376 }
400 377
401 *pvm = vm;
402 return 0; 378 return 0;
403} 379}
404 380
381int
382nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
383 u64 mm_offset, struct nouveau_vm **pvm)
384{
385 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
386 return vmm->create(vmm, offset, length, mm_offset, pvm);
387}
388
405static int 389static int
406nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) 390nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
407{ 391{
392 struct nouveau_vmmgr *vmm = vm->vmm;
408 struct nouveau_vm_pgd *vpgd; 393 struct nouveau_vm_pgd *vpgd;
409 int i; 394 int i;
410 395
@@ -419,7 +404,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
419 404
420 mutex_lock(&vm->mm.mutex); 405 mutex_lock(&vm->mm.mutex);
421 for (i = vm->fpde; i <= vm->lpde; i++) 406 for (i = vm->fpde; i <= vm->lpde; i++)
422 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 407 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
423 list_add(&vpgd->head, &vm->pgd_list); 408 list_add(&vpgd->head, &vm->pgd_list);
424 mutex_unlock(&vm->mm.mutex); 409 mutex_unlock(&vm->mm.mutex);
425 return 0; 410 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
new file mode 100644
index 000000000000..6475c0201d01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include "nv04.h"
28
29#define NV04_PDMA_SIZE (128 * 1024 * 1024)
30#define NV04_PDMA_PAGE ( 4 * 1024)
31
32/*******************************************************************************
33 * VM map/unmap callbacks
34 ******************************************************************************/
35
36static void
37nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
38 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
39{
40 pte = 0x00008 + (pte * 4);
41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++;
44 while (cnt && page--) {
45 nv_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE;
47 pte += 4;
48 cnt -= 1;
49 }
50 }
51}
52
53static void
54nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
55{
56 pte = 0x00008 + (pte * 4);
57 while (cnt--) {
58 nv_wo32(pgt, pte, 0x00000000);
59 pte += 4;
60 }
61}
62
63static void
64nv04_vm_flush(struct nouveau_vm *vm)
65{
66}
67
68/*******************************************************************************
69 * VM object
70 ******************************************************************************/
71
72int
73nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
74 struct nouveau_vm **pvm)
75{
76 return -EINVAL;
77}
78
79/*******************************************************************************
80 * VMMGR subdev
81 ******************************************************************************/
82
83static int
84nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
85 struct nouveau_oclass *oclass, void *data, u32 size,
86 struct nouveau_object **pobject)
87{
88 struct nv04_vmmgr_priv *priv;
89 struct nouveau_gpuobj *dma;
90 int ret;
91
92 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
93 "pcigart", &priv);
94 *pobject = nv_object(priv);
95 if (ret)
96 return ret;
97
98 priv->base.create = nv04_vm_create;
99 priv->base.pgt_bits = 32 - 12;
100 priv->base.spg_shift = 12;
101 priv->base.lpg_shift = 12;
102 priv->base.map_sg = nv04_vm_map_sg;
103 priv->base.unmap = nv04_vm_unmap;
104 priv->base.flush = nv04_vm_flush;
105
106 ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
107 &priv->vm);
108 if (ret)
109 return ret;
110
111 ret = nouveau_gpuobj_new(parent, NULL,
112 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
113 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
114 &priv->vm->pgt[0].obj[0]);
115 dma = priv->vm->pgt[0].obj[0];
116 priv->vm->pgt[0].refcount[0] = 1;
117 if (ret)
118 return ret;
119
120 nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
121 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
122 return 0;
123}
124
125void
126nv04_vmmgr_dtor(struct nouveau_object *object)
127{
128 struct nv04_vmmgr_priv *priv = (void *)object;
129 if (priv->vm) {
130 nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
131 nouveau_vm_ref(NULL, &priv->vm, NULL);
132 }
133 if (priv->page) {
134 pci_unmap_page(nv_device(priv)->pdev, priv->null,
135 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
136 __free_page(priv->page);
137 }
138 nouveau_vmmgr_destroy(&priv->base);
139}
140
141struct nouveau_oclass
142nv04_vmmgr_oclass = {
143 .handle = NV_SUBDEV(VM, 0x04),
144 .ofuncs = &(struct nouveau_ofuncs) {
145 .ctor = nv04_vmmgr_ctor,
146 .dtor = nv04_vmmgr_dtor,
147 .init = _nouveau_vmmgr_init,
148 .fini = _nouveau_vmmgr_fini,
149 },
150};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
new file mode 100644
index 000000000000..530930320bc4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -0,0 +1,13 @@
1#ifndef __NV04_VMMGR_PRIV__
2#define __NV04_VMMGR_PRIV__
3
4#include <subdev/vm.h>
5
6struct nv04_vmmgr_priv {
7 struct nouveau_vmmgr base;
8 struct nouveau_vm *vm;
9 struct page *page;
10 dma_addr_t null;
11};
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
new file mode 100644
index 000000000000..f0367703dff0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/vm.h>
29
30#include "nv04.h"
31
32#define NV41_GART_SIZE (512 * 1024 * 1024)
33#define NV41_GART_PAGE ( 4 * 1024)
34
35/*******************************************************************************
36 * VM map/unmap callbacks
37 ******************************************************************************/
38
39static void
40nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
41 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
42{
43 pte = pte * 4;
44 while (cnt) {
45 u32 page = PAGE_SIZE / NV41_GART_PAGE;
46 u64 phys = (u64)*list++;
47 while (cnt && page--) {
48 nv_wo32(pgt, pte, (phys >> 7) | 1);
49 phys += NV41_GART_PAGE;
50 pte += 4;
51 cnt -= 1;
52 }
53 }
54}
55
56static void
57nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
58{
59 pte = pte * 4;
60 while (cnt--) {
61 nv_wo32(pgt, pte, 0x00000000);
62 pte += 4;
63 }
64}
65
66static void
67nv41_vm_flush(struct nouveau_vm *vm)
68{
69 struct nv04_vm_priv *priv = (void *)vm->vmm;
70
71 mutex_lock(&nv_subdev(priv)->mutex);
72 nv_wr32(priv, 0x100810, 0x00000022);
73 if (!nv_wait(priv, 0x100810, 0x00000100, 0x00000100)) {
74 nv_warn(priv, "flush timeout, 0x%08x\n",
75 nv_rd32(priv, 0x100810));
76 }
77 nv_wr32(priv, 0x100810, 0x00000000);
78 mutex_unlock(&nv_subdev(priv)->mutex);
79}
80
81/*******************************************************************************
82 * VMMGR subdev
83 ******************************************************************************/
84
85static int
86nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
87 struct nouveau_oclass *oclass, void *data, u32 size,
88 struct nouveau_object **pobject)
89{
90 struct nv04_vmmgr_priv *priv;
91 int ret;
92
93 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
94 "pciegart", &priv);
95 *pobject = nv_object(priv);
96 if (ret)
97 return ret;
98
99 priv->base.create = nv04_vm_create;
100 priv->base.pgt_bits = 32 - 12;
101 priv->base.spg_shift = 12;
102 priv->base.lpg_shift = 12;
103 priv->base.map_sg = nv41_vm_map_sg;
104 priv->base.unmap = nv41_vm_unmap;
105 priv->base.flush = nv41_vm_flush;
106
107 ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
108 &priv->vm);
109 if (ret)
110 return ret;
111
112 ret = nouveau_gpuobj_new(parent, NULL,
113 (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
114 16, NVOBJ_FLAG_ZERO_ALLOC,
115 &priv->vm->pgt[0].obj[0]);
116 priv->vm->pgt[0].refcount[0] = 1;
117 if (ret)
118 return ret;
119
120 return 0;
121}
122
123static int
124nv41_vmmgr_init(struct nouveau_object *object)
125{
126 struct nv04_vmmgr_priv *priv = (void *)object;
127 struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
128 int ret;
129
130 ret = nouveau_vmmgr_init(&priv->base);
131 if (ret)
132 return ret;
133
134 nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
135 nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
136 nv_wr32(priv, 0x100820, 0x00000000);
137 return 0;
138}
139
140struct nouveau_oclass
141nv41_vmmgr_oclass = {
142 .handle = NV_SUBDEV(VM, 0x41),
143 .ofuncs = &(struct nouveau_ofuncs) {
144 .ctor = nv41_vmmgr_ctor,
145 .dtor = nv04_vmmgr_dtor,
146 .init = nv41_vmmgr_init,
147 .fini = _nouveau_vmmgr_fini,
148 },
149};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
new file mode 100644
index 000000000000..d17f76120bcd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/vm.h>
29
30#include "nv04.h"
31
32#define NV44_GART_SIZE (512 * 1024 * 1024)
33#define NV44_GART_PAGE ( 4 * 1024)
34
35/*******************************************************************************
36 * VM map/unmap callbacks
37 ******************************************************************************/
38
39static void
40nv44_vm_flush_priv(struct nv04_vmmgr_priv *priv, u32 base, u32 size)
41{
42 nv_wr32(priv, 0x100814, (size - 1) << 12);
43 nv_wr32(priv, 0x100808, base | 0x20);
44 if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
45 nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
46 nv_wr32(priv, 0x100808, 0x00000000);
47}
48
49static void
50nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
51 dma_addr_t *list, u32 pte, u32 cnt)
52{
53 u32 base = (pte << 2) & ~0x0000000f;
54 u32 tmp[4];
55
56 tmp[0] = nv_ro32(pgt, base + 0x0);
57 tmp[1] = nv_ro32(pgt, base + 0x4);
58 tmp[2] = nv_ro32(pgt, base + 0x8);
59 tmp[3] = nv_ro32(pgt, base + 0xc);
60 while (cnt--) {
61 u32 addr = list ? (*list++ >> 12) : (null >> 12);
62 switch (pte++ & 0x3) {
63 case 0:
64 tmp[0] &= ~0x07ffffff;
65 tmp[0] |= addr;
66 break;
67 case 1:
68 tmp[0] &= ~0xf8000000;
69 tmp[0] |= addr << 27;
70 tmp[1] &= ~0x003fffff;
71 tmp[1] |= addr >> 5;
72 break;
73 case 2:
74 tmp[1] &= ~0xffc00000;
75 tmp[1] |= addr << 22;
76 tmp[2] &= ~0x0001ffff;
77 tmp[2] |= addr >> 10;
78 break;
79 case 3:
80 tmp[2] &= ~0xfffe0000;
81 tmp[2] |= addr << 17;
82 tmp[3] &= ~0x00000fff;
83 tmp[3] |= addr >> 15;
84 break;
85 }
86 }
87
88 nv_wo32(pgt, base + 0x0, tmp[0]);
89 nv_wo32(pgt, base + 0x4, tmp[1]);
90 nv_wo32(pgt, base + 0x8, tmp[2]);
91 nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
92}
93
94static void
95nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
96 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
97{
98 struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
99 u32 base = pte << 12;
100 u32 size = cnt;
101 u32 tmp[4];
102 int i;
103
104 if (pte & 3) {
105 u32 max = 4 - (pte & 3);
106 u32 part = (cnt > max) ? max : cnt;
107 nv44_vm_fill(pgt, priv->null, list, pte, part);
108 pte += part;
109 list += part;
110 cnt -= part;
111 }
112
113 while (cnt >= 4) {
114 for (i = 0; i < 4; i++)
115 tmp[i] = *list++ >> 12;
116 nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
117 nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
118 nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
119 nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
120 cnt -= 4;
121 }
122
123 if (cnt)
124 nv44_vm_fill(pgt, priv->null, list, pte, cnt);
125 nv44_vm_flush_priv(priv, base, size);
126}
127
128static void
129nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
130{
131 struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
132 u32 base = pte << 12;
133 u32 size = cnt;
134
135 if (pte & 3) {
136 u32 max = 4 - (pte & 3);
137 u32 part = (cnt > max) ? max : cnt;
138 nv44_vm_fill(pgt, priv->null, NULL, pte, part);
139 pte += part;
140 cnt -= part;
141 }
142
143 while (cnt >= 4) {
144 nv_wo32(pgt, pte++ * 4, 0x00000000);
145 nv_wo32(pgt, pte++ * 4, 0x00000000);
146 nv_wo32(pgt, pte++ * 4, 0x00000000);
147 nv_wo32(pgt, pte++ * 4, 0x00000000);
148 cnt -= 4;
149 }
150
151 if (cnt)
152 nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
153 nv44_vm_flush_priv(priv, base, size);
154}
155
156static void
157nv44_vm_flush(struct nouveau_vm *vm)
158{
159}
160
161/*******************************************************************************
162 * VMMGR subdev
163 ******************************************************************************/
164
165static int
166nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
167 struct nouveau_oclass *oclass, void *data, u32 size,
168 struct nouveau_object **pobject)
169{
170 struct nouveau_device *device = nv_device(parent);
171 struct nv04_vmmgr_priv *priv;
172 int ret;
173
174 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
175 "pciegart", &priv);
176 *pobject = nv_object(priv);
177 if (ret)
178 return ret;
179
180 priv->base.create = nv04_vm_create;
181 priv->base.pgt_bits = 32 - 12;
182 priv->base.spg_shift = 12;
183 priv->base.lpg_shift = 12;
184 priv->base.map_sg = nv44_vm_map_sg;
185 priv->base.unmap = nv44_vm_unmap;
186 priv->base.flush = nv44_vm_flush;
187
188 priv->page = alloc_page(GFP_DMA32 | GFP_KERNEL);
189 if (priv->page) {
190 priv->null = pci_map_page(device->pdev, priv->page, 0,
191 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
192 if (pci_dma_mapping_error(device->pdev, priv->null)) {
193 __free_page(priv->page);
194 priv->page = NULL;
195 priv->null = 0;
196 }
197 }
198
199 if (!priv->page)
200 nv_warn(priv, "unable to allocate dummy page\n");
201
202 ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
203 &priv->vm);
204 if (ret)
205 return ret;
206
207 ret = nouveau_gpuobj_new(parent, NULL,
208 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
209 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
210 &priv->vm->pgt[0].obj[0]);
211 priv->vm->pgt[0].refcount[0] = 1;
212 if (ret)
213 return ret;
214
215 return 0;
216}
217
218static int
219nv44_vmmgr_init(struct nouveau_object *object)
220{
221 struct nv04_vmmgr_priv *priv = (void *)object;
222 struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
223 u32 addr;
224 int ret;
225
226 ret = nouveau_vmmgr_init(&priv->base);
227 if (ret)
228 return ret;
229
230 /* calculate vram address of this PRAMIN block, object must be
231 * allocated on 512KiB alignment, and not exceed a total size
232 * of 512KiB for this to work correctly
233 */
234 addr = nv_rd32(priv, 0x10020c);
235 addr -= ((gart->addr >> 19) + 1) << 19;
236
237 nv_wr32(priv, 0x100850, 0x80000000);
238 nv_wr32(priv, 0x100818, priv->null);
239 nv_wr32(priv, 0x100804, NV44_GART_SIZE);
240 nv_wr32(priv, 0x100850, 0x00008000);
241 nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
242 nv_wr32(priv, 0x100820, 0x00000000);
243 nv_wr32(priv, 0x10082c, 0x00000001);
244 nv_wr32(priv, 0x100800, addr | 0x00000010);
245 return 0;
246}
247
248struct nouveau_oclass
249nv44_vmmgr_oclass = {
250 .handle = NV_SUBDEV(VM, 0x44),
251 .ofuncs = &(struct nouveau_ofuncs) {
252 .ctor = nv44_vmmgr_ctor,
253 .dtor = nv04_vmmgr_dtor,
254 .init = nv44_vmmgr_init,
255 .fini = _nouveau_vmmgr_fini,
256 },
257};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 7e46826de0e9..6e9bcd212cfc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -22,11 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
29#include <subdev/fb.h>
28#include <subdev/vm.h> 30#include <subdev/vm.h>
29 31
32struct nv50_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
30void 37void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 38nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
@@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
35 u32 coverage = 0; 42 u32 coverage = 0;
36 43
37 if (pgt[0]) { 44 if (pgt[0]) {
38 phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ 45 phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
39 coverage = (pgt[0]->size >> 3) << 12; 46 coverage = (pgt[0]->size >> 3) << 12;
40 } else 47 } else
41 if (pgt[1]) { 48 if (pgt[1]) {
42 phys = 0x00000001 | pgt[1]->vinst; /* present */ 49 phys = 0x00000001 | pgt[1]->addr; /* present */
43 coverage = (pgt[1]->size >> 3) << 16; 50 coverage = (pgt[1]->size >> 3) << 16;
44 } 51 }
45 52
@@ -73,15 +80,14 @@ void
73nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 80nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
74 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 81 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
75{ 82{
76 struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
77 u32 comp = (mem->memtype & 0x180) >> 7; 83 u32 comp = (mem->memtype & 0x180) >> 7;
78 u32 block, target; 84 u32 block, target;
79 int i; 85 int i;
80 86
81 /* IGPs don't have real VRAM, re-target to stolen system memory */ 87 /* IGPs don't have real VRAM, re-target to stolen system memory */
82 target = 0; 88 target = 0;
83 if (nvfb_vram_sys_base(dev_priv->dev)) { 89 if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
84 phys += nvfb_vram_sys_base(dev_priv->dev); 90 phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
85 target = 3; 91 target = 3;
86 } 92 }
87 93
@@ -145,33 +151,81 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
145void 151void
146nv50_vm_flush(struct nouveau_vm *vm) 152nv50_vm_flush(struct nouveau_vm *vm)
147{ 153{
148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 154 struct nouveau_engine *engine;
149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 int i; 155 int i;
151 156
152 pinstmem->flush(vm->dev); 157#if 0
153 158 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
154 /* BAR */ 159 if (atomic_read(&vm->engref[i])) {
155 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { 160 engine = nouveau_engine(vm->vmm, i);
156 nv50_vm_flush_engine(vm->dev, 6); 161 if (engine && engine->tlb_flush)
157 return; 162 engine->tlb_flush(engine);
158 } 163 }
159
160 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
161 if (atomic_read(&vm->engref[i]))
162 dev_priv->eng[i]->tlb_flush(vm->dev, i);
163 } 164 }
165#else
166 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
167 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
168 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
169#endif
164} 170}
165 171
166void 172void
167nv50_vm_flush_engine(struct drm_device *dev, int engine) 173nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
168{ 174{
169 struct drm_nouveau_private *dev_priv = dev->dev_private; 175 struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
170 unsigned long flags; 176 unsigned long flags;
171 177
172 spin_lock_irqsave(&dev_priv->vm_lock, flags); 178 spin_lock_irqsave(&priv->lock, flags);
173 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 179 nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
174 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 180 if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
175 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 181 nv_error(subdev, "vm flush timeout: engine %d\n", engine);
176 spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 182 spin_unlock_irqrestore(&priv->lock, flags);
177} 183}
184
185static int
186nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
187 u64 mm_offset, struct nouveau_vm **pvm)
188{
189 u32 block = (1 << (vmm->pgt_bits + 12));
190 if (block > length)
191 block = length;
192
193 return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
194}
195
196static int
197nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
198 struct nouveau_oclass *oclass, void *data, u32 size,
199 struct nouveau_object **pobject)
200{
201 struct nv50_vmmgr_priv *priv;
202 int ret;
203
204 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
205 *pobject = nv_object(priv);
206 if (ret)
207 return ret;
208
209 priv->base.pgt_bits = 29 - 12;
210 priv->base.spg_shift = 12;
211 priv->base.lpg_shift = 16;
212 priv->base.create = nv50_vm_create;
213 priv->base.map_pgt = nv50_vm_map_pgt;
214 priv->base.map = nv50_vm_map;
215 priv->base.map_sg = nv50_vm_map_sg;
216 priv->base.unmap = nv50_vm_unmap;
217 priv->base.flush = nv50_vm_flush;
218 spin_lock_init(&priv->lock);
219 return 0;
220}
221
222struct nouveau_oclass
223nv50_vmmgr_oclass = {
224 .handle = NV_SUBDEV(VM, 0x50),
225 .ofuncs = &(struct nouveau_ofuncs) {
226 .ctor = nv50_vmmgr_ctor,
227 .dtor = _nouveau_vmmgr_dtor,
228 .init = _nouveau_vmmgr_init,
229 .fini = _nouveau_vmmgr_fini,
230 },
231};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 734877a9aa09..a0bc0f678d12 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -22,11 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
29#include <subdev/fb.h>
28#include <subdev/vm.h> 30#include <subdev/vm.h>
29 31
32struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
30void 37void
31nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, 38nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
@@ -34,9 +41,9 @@ nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
34 u32 pde[2] = { 0, 0 }; 41 u32 pde[2] = { 0, 0 };
35 42
36 if (pgt[0]) 43 if (pgt[0])
37 pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); 44 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
38 if (pgt[1]) 45 if (pgt[1])
39 pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); 46 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
40 47
41 nv_wo32(pgd, (index * 8) + 0, pde[0]); 48 nv_wo32(pgd, (index * 8) + 0, pde[0]);
42 nv_wo32(pgd, (index * 8) + 4, pde[1]); 49 nv_wo32(pgd, (index * 8) + 4, pde[1]);
@@ -100,37 +107,81 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
100} 107}
101 108
102void 109void
103nvc0_vm_flush(struct nouveau_vm *vm) 110nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
104{ 111{
105 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 112 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
106 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
107 struct drm_device *dev = vm->dev;
108 struct nouveau_vm_pgd *vpgd;
109 unsigned long flags; 113 unsigned long flags;
110 u32 engine;
111 114
112 engine = 1; 115 /* looks like maybe a "free flush slots" counter, the
113 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) 116 * faster you write to 0x100cbc to more it decreases
114 engine |= 4; 117 */
118 spin_lock_irqsave(&priv->lock, flags);
119 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120 nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
121 nv_rd32(subdev, 0x100c80), type);
122 }
123
124 nv_wr32(subdev, 0x100cb8, addr >> 8);
125 nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
115 126
116 pinstmem->flush(vm->dev); 127 /* wait for flush to be queued? */
128 if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129 nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
130 nv_rd32(subdev, 0x100c80), type);
131 }
132 spin_unlock_irqrestore(&priv->lock, flags);
133}
134
135void
136nvc0_vm_flush(struct nouveau_vm *vm)
137{
138 struct nouveau_vm_pgd *vpgd;
117 139
118 spin_lock_irqsave(&dev_priv->vm_lock, flags);
119 list_for_each_entry(vpgd, &vm->pgd_list, head) { 140 list_for_each_entry(vpgd, &vm->pgd_list, head) {
120 /* looks like maybe a "free flush slots" counter, the 141 nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
121 * faster you write to 0x100cbc to more it decreases
122 */
123 if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
124 NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
125 nv_rd32(dev, 0x100c80), engine);
126 }
127 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
128 nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
129 /* wait for flush to be queued? */
130 if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
131 NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
132 nv_rd32(dev, 0x100c80), engine);
133 }
134 } 142 }
135 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
136} 143}
144
145static int
146nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
147 u64 mm_offset, struct nouveau_vm **pvm)
148{
149 return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
150}
151
152static int
153nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nouveau_oclass *oclass, void *data, u32 size,
155 struct nouveau_object **pobject)
156{
157 struct nvc0_vmmgr_priv *priv;
158 int ret;
159
160 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
161 *pobject = nv_object(priv);
162 if (ret)
163 return ret;
164
165 priv->base.pgt_bits = 27 - 12;
166 priv->base.spg_shift = 12;
167 priv->base.lpg_shift = 17;
168 priv->base.create = nvc0_vm_create;
169 priv->base.map_pgt = nvc0_vm_map_pgt;
170 priv->base.map = nvc0_vm_map;
171 priv->base.map_sg = nvc0_vm_map_sg;
172 priv->base.unmap = nvc0_vm_unmap;
173 priv->base.flush = nvc0_vm_flush;
174 spin_lock_init(&priv->lock);
175 return 0;
176}
177
178struct nouveau_oclass
179nvc0_vmmgr_oclass = {
180 .handle = NV_SUBDEV(VM, 0xc0),
181 .ofuncs = &(struct nouveau_ofuncs) {
182 .ctor = nvc0_vmmgr_ctor,
183 .dtor = _nouveau_vmmgr_dtor,
184 .init = _nouveau_vmmgr_init,
185 .fini = _nouveau_vmmgr_fini,
186 },
187};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f63785c2aae0..9f5696a1fbb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -34,7 +34,6 @@
34#include "nouveau_drv.h" 34#include "nouveau_drv.h"
35#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include <core/mm.h> 36#include <core/mm.h>
37#include <subdev/vm.h>
38#include "nouveau_fence.h" 37#include "nouveau_fence.h"
39#include <core/ramht.h> 38#include <core/ramht.h>
40 39
@@ -114,9 +113,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
114 nvbo->bo.bdev = &dev_priv->ttm.bdev; 113 nvbo->bo.bdev = &dev_priv->ttm.bdev;
115 114
116 nvbo->page_shift = 12; 115 nvbo->page_shift = 12;
117 if (dev_priv->bar1_vm) { 116 if (dev_priv->chan_vm) {
118 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 117 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
119 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; 118 nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
120 } 119 }
121 120
122 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 121 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -420,6 +419,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
420 if (dev_priv->card_type >= NV_50) 419 if (dev_priv->card_type >= NV_50)
421 man->func = &nouveau_gart_manager; 420 man->func = &nouveau_gart_manager;
422 else 421 else
422 if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
423 man->func = &nv04_gart_manager;
424 else
423 man->func = &ttm_bo_manager_func; 425 man->func = &ttm_bo_manager_func;
424 switch (dev_priv->gart_info.type) { 426 switch (dev_priv->gart_info.type) {
425 case NOUVEAU_GART_AGP: 427 case NOUVEAU_GART_AGP:
@@ -1044,7 +1046,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1044 nouveau_vm_map(vma, new_mem->mm_node); 1046 nouveau_vm_map(vma, new_mem->mm_node);
1045 } else 1047 } else
1046 if (new_mem && new_mem->mem_type == TTM_PL_TT && 1048 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1047 nvbo->page_shift == vma->vm->spg_shift) { 1049 nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
1048 if (((struct nouveau_mem *)new_mem->mm_node)->sg) 1050 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1049 nouveau_vm_map_sg_table(vma, 0, new_mem-> 1051 nouveau_vm_map_sg_table(vma, 0, new_mem->
1050 num_pages << PAGE_SHIFT, 1052 num_pages << PAGE_SHIFT,
@@ -1184,40 +1186,19 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1184#endif 1186#endif
1185 break; 1187 break;
1186 case TTM_PL_VRAM: 1188 case TTM_PL_VRAM:
1187 { 1189 mem->bus.offset = mem->start << PAGE_SHIFT;
1188 struct nouveau_mem *node = mem->mm_node; 1190 mem->bus.base = pci_resource_start(dev->pdev, 1);
1189 u8 page_shift; 1191 mem->bus.is_iomem = true;
1190 1192 if (dev_priv->card_type >= NV_50) {
1191 if (!dev_priv->bar1_vm) { 1193 struct nouveau_mem *node = mem->mm_node;
1192 mem->bus.offset = mem->start << PAGE_SHIFT;
1193 mem->bus.base = pci_resource_start(dev->pdev, 1);
1194 mem->bus.is_iomem = true;
1195 break;
1196 }
1197
1198 if (dev_priv->card_type >= NV_C0)
1199 page_shift = node->page_shift;
1200 else
1201 page_shift = 12;
1202 1194
1203 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 1195 ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
1204 page_shift, NV_MEM_ACCESS_RW, 1196 &node->bar_vma);
1205 &node->bar_vma); 1197 if (ret)
1206 if (ret) 1198 return ret;
1207 return ret;
1208 1199
1209 nouveau_vm_map(&node->bar_vma, node); 1200 mem->bus.offset = node->bar_vma.offset;
1210 if (ret) {
1211 nouveau_vm_put(&node->bar_vma);
1212 return ret;
1213 } 1201 }
1214
1215 mem->bus.offset = node->bar_vma.offset;
1216 if (dev_priv->card_type == NV_50) /*XXX*/
1217 mem->bus.offset -= 0x0020000000ULL;
1218 mem->bus.base = pci_resource_start(dev->pdev, 1);
1219 mem->bus.is_iomem = true;
1220 }
1221 break; 1202 break;
1222 default: 1203 default:
1223 return -EINVAL; 1204 return -EINVAL;
@@ -1231,14 +1212,13 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1231 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1212 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1232 struct nouveau_mem *node = mem->mm_node; 1213 struct nouveau_mem *node = mem->mm_node;
1233 1214
1234 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) 1215 if (mem->mem_type != TTM_PL_VRAM)
1235 return; 1216 return;
1236 1217
1237 if (!node->bar_vma.node) 1218 if (!node->bar_vma.node)
1238 return; 1219 return;
1239 1220
1240 nouveau_vm_unmap(&node->bar_vma); 1221 nvbar_unmap(dev_priv->dev, &node->bar_vma);
1241 nouveau_vm_put(&node->bar_vma);
1242} 1222}
1243 1223
1244static int 1224static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 1dd5232a6d75..b7c2423a2536 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -145,6 +145,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
145 /* allocate hw channel id */ 145 /* allocate hw channel id */
146 spin_lock_irqsave(&dev_priv->channels.lock, flags); 146 spin_lock_irqsave(&dev_priv->channels.lock, flags);
147 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { 147 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
148 if ( dev_priv->card_type == NV_50 && chan->id == 0)
149 continue;
150
148 if (!dev_priv->channels.ptr[chan->id]) { 151 if (!dev_priv->channels.ptr[chan->id]) {
149 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); 152 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
150 break; 153 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.c b/drivers/gpu/drm/nouveau/nouveau_compat.c
index 3d65c1763311..0403f2b94fa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.c
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.c
@@ -11,6 +11,8 @@
11#include <subdev/mc.h> 11#include <subdev/mc.h>
12#include <subdev/timer.h> 12#include <subdev/timer.h>
13#include <subdev/fb.h> 13#include <subdev/fb.h>
14#include <subdev/bar.h>
15#include <subdev/vm.h>
14 16
15void *nouveau_newpriv(struct drm_device *); 17void *nouveau_newpriv(struct drm_device *);
16 18
@@ -438,3 +440,146 @@ nv50_fb_vm_trap(struct drm_device *dev, int disp)
438 struct nouveau_drm *drm = nouveau_newpriv(dev); 440 struct nouveau_drm *drm = nouveau_newpriv(dev);
439 nv50_fb_trap(nouveau_fb(drm->device), disp); 441 nv50_fb_trap(nouveau_fb(drm->device), disp);
440} 442}
443
444#include <core/subdev/instmem/nv04.h>
445
446struct nouveau_gpuobj *
447nvimem_ramro(struct drm_device *dev)
448{
449 struct nouveau_drm *drm = nouveau_newpriv(dev);
450 struct nv04_instmem_priv *imem = (void *)nouveau_instmem(drm->device);
451 return imem->ramro;
452}
453
454struct nouveau_gpuobj *
455nvimem_ramfc(struct drm_device *dev)
456{
457 struct nouveau_drm *drm = nouveau_newpriv(dev);
458 struct nv04_instmem_priv *imem = (void *)nouveau_instmem(drm->device);
459 return imem->ramfc;
460}
461
462int _nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_gpuobj *par,
463 int size, int align, u32 flags,
464 struct nouveau_gpuobj **pobj)
465{
466 struct nouveau_drm *drm = nouveau_newpriv(dev);
467 int ret;
468
469 if (!par)
470 flags |= NVOBJ_FLAG_HEAP;
471
472 ret = nouveau_gpuobj_new(drm->device, nv_object(par), size, align,
473 flags, pobj);
474 if (ret)
475 return ret;
476
477 (*pobj)->dev = dev;
478 return 0;
479}
480
481u32 nv_ri32(struct drm_device *dev , u32 addr)
482{
483 struct nouveau_drm *drm = nouveau_newpriv(dev);
484 struct nouveau_instmem *imem = nouveau_instmem(drm->device);
485 return nv_ro32(imem, addr);
486}
487
488void nv_wi32(struct drm_device *dev, u32 addr, u32 data)
489{
490 struct nouveau_drm *drm = nouveau_newpriv(dev);
491 struct nouveau_instmem *imem = nouveau_instmem(drm->device);
492 nv_wo32(imem, addr, data);
493}
494
495u32 nvimem_reserved(struct drm_device *dev)
496{
497 struct nouveau_drm *drm = nouveau_newpriv(dev);
498 struct nouveau_instmem *imem = nouveau_instmem(drm->device);
499 return imem->reserved;
500}
501
502int
503nvbar_map(struct drm_device *dev, struct nouveau_mem *mem, u32 flags,
504 struct nouveau_vma *vma)
505{
506 struct nouveau_drm *drm = nouveau_newpriv(dev);
507 struct nouveau_bar *bar = nouveau_bar(drm->device);
508 return bar->umap(bar, mem, flags, vma);
509}
510
511void
512nvbar_unmap(struct drm_device *dev, struct nouveau_vma *vma)
513{
514 struct nouveau_drm *drm = nouveau_newpriv(dev);
515 struct nouveau_bar *bar = nouveau_bar(drm->device);
516 bar->unmap(bar, vma);
517}
518
519int
520nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
521 struct nouveau_vma *vma)
522{
523 struct nouveau_drm *drm = nouveau_newpriv(gpuobj->dev);
524 struct nouveau_bar *bar = nouveau_bar(drm->device);
525 struct nouveau_instobj *iobj = (void *)
526 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
527 struct nouveau_mem **mem = (void *)(iobj + 1);
528 struct nouveau_mem *node = *mem;
529
530 return bar->umap(bar, node, flags, vma);
531}
532
533void
534nvimem_flush(struct drm_device *dev)
535{
536}
537
538void _nv50_vm_flush_engine(struct drm_device *dev, int engine)
539{
540 struct nouveau_drm *drm = nouveau_newpriv(dev);
541 nv50_vm_flush_engine(nv_subdev(drm->device), engine);
542}
543
544int _nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length,
545 u64 mm_offset, struct nouveau_vm **pvm)
546{
547 struct nouveau_drm *drm = nouveau_newpriv(dev);
548 return nouveau_vm_new(nv_device(drm->device), offset, length, mm_offset, pvm);
549}
550
551#include <core/subdev/vm/nv04.h>
552struct nouveau_vm *
553nv04vm_ref(struct drm_device *dev)
554{
555 struct nouveau_drm *drm = nouveau_newpriv(dev);
556 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
557 struct nv04_vmmgr_priv *priv = (void *)vmm;
558 return priv->vm;
559}
560
561struct nouveau_gpuobj *
562nv04vm_refdma(struct drm_device *dev)
563{
564 struct nouveau_gpuobj *gpuobj = NULL;
565 nouveau_gpuobj_ref(nv04vm_ref(dev)->pgt[0].obj[0], &gpuobj);
566 return gpuobj;
567}
568
569void
570nvvm_engref(struct nouveau_vm *vm, int eng, int ref)
571{
572 atomic_add(ref, &vm->engref[eng]);
573}
574
575int
576nvvm_spg_shift(struct nouveau_vm *vm)
577{
578 return vm->vmm->spg_shift;
579}
580
581int
582nvvm_lpg_shift(struct nouveau_vm *vm)
583{
584 return vm->vmm->lpg_shift;
585}
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.h b/drivers/gpu/drm/nouveau/nouveau_compat.h
index d047a2046959..d691b2535c72 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.h
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.h
@@ -82,4 +82,46 @@ int nvfb_vram_rank_B(struct drm_device *);
82 82
83void nv50_fb_vm_trap(struct drm_device *, int); 83void nv50_fb_vm_trap(struct drm_device *, int);
84 84
85struct nouveau_gpuobj *nvimem_ramro(struct drm_device *);
86struct nouveau_gpuobj *nvimem_ramfc(struct drm_device *);
87
88int _nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_gpuobj *par,
89 int size, int align, u32 flags,
90 struct nouveau_gpuobj **pboj);
91
92u32 nv_ri32(struct drm_device *, u32);
93void nv_wi32(struct drm_device *, u32, u32);
94u32 nvimem_reserved(struct drm_device *);
95
96void nvimem_flush(struct drm_device *);
97
98void _nv50_vm_flush_engine(struct drm_device *dev, int engine);
99
100int _nouveau_vm_new(struct drm_device *, u64 offset, u64 length,
101 u64 mm_offset, struct nouveau_vm **);
102
103struct nouveau_vma;
104int nouveau_gpuobj_map_bar(struct nouveau_gpuobj *, u32, struct nouveau_vma *);
105
106int
107nvbar_map(struct drm_device *dev, struct nouveau_mem *mem, u32 flags,
108 struct nouveau_vma *vma);
109void
110nvbar_unmap(struct drm_device *dev, struct nouveau_vma *vma);
111
112struct nouveau_vm *
113nv04vm_ref(struct drm_device *dev);
114
115struct nouveau_gpuobj *
116nv04vm_refdma(struct drm_device *dev);
117
118void
119nvvm_engref(struct nouveau_vm *, int, int);
120
121int
122nvvm_spg_shift(struct nouveau_vm *);
123
124int
125nvvm_lpg_shift(struct nouveau_vm *);
126
85#endif 127#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 3f660a94dd18..561bc00c4851 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -152,7 +152,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
152{ 152{
153 struct drm_device *dev = pci_get_drvdata(pdev); 153 struct drm_device *dev = pci_get_drvdata(pdev);
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
156 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 155 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
157 struct nouveau_channel *chan; 156 struct nouveau_channel *chan;
158 struct drm_crtc *crtc; 157 struct drm_crtc *crtc;
@@ -204,20 +203,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
204 } 203 }
205 } 204 }
206 205
207 ret = pinstmem->suspend(dev);
208 if (ret) {
209 NV_ERROR(dev, "... failed: %d\n", ret);
210 goto out_abort;
211 }
212
213 NV_INFO(dev, "Suspending GPU objects...\n");
214 ret = nouveau_gpuobj_suspend(dev);
215 if (ret) {
216 NV_ERROR(dev, "... failed: %d\n", ret);
217 pinstmem->resume(dev);
218 goto out_abort;
219 }
220
221 return 0; 206 return 0;
222 207
223out_abort: 208out_abort:
@@ -247,11 +232,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
247 if (ret) 232 if (ret)
248 return ret; 233 return ret;
249 234
250 NV_INFO(dev, "Restoring GPU objects...\n");
251 nouveau_gpuobj_resume(dev);
252
253 NV_INFO(dev, "Reinitialising engines...\n"); 235 NV_INFO(dev, "Reinitialising engines...\n");
254 engine->instmem.resume(dev);
255 for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 236 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
256 if (dev_priv->eng[i]) 237 if (dev_priv->eng[i])
257 dev_priv->eng[i]->init(dev, i); 238 dev_priv->eng[i]->init(dev, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index fad668793fae..f62732dd30ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,7 +46,9 @@
46#include "ttm/ttm_module.h" 46#include "ttm/ttm_module.h"
47 47
48#define XXX_THIS_IS_A_HACK 48#define XXX_THIS_IS_A_HACK
49#include <subdev/vm.h>
49#include <subdev/fb.h> 50#include <subdev/fb.h>
51#include <core/gpuobj.h>
50 52
51enum blah { 53enum blah {
52 NV_MEM_TYPE_UNKNOWN = 0, 54 NV_MEM_TYPE_UNKNOWN = 0,
@@ -83,11 +85,20 @@ nouveau_fpriv(struct drm_file *file_priv)
83 85
84struct nouveau_grctx; 86struct nouveau_grctx;
85struct nouveau_mem; 87struct nouveau_mem;
86#include <subdev/vm.h>
87 88
88#include <subdev/bios/pll.h> 89#include <subdev/bios/pll.h>
89#include "nouveau_compat.h" 90#include "nouveau_compat.h"
90 91
92#define nouveau_gpuobj_new(d,c,s,a,f,o) \
93 _nouveau_gpuobj_new((d), (c) ? ((struct nouveau_channel *)(c))->ramin : NULL, \
94 (s), (a), (f), (o))
95
96#define nouveau_vm_new(d,o,l,m,v) \
97 _nouveau_vm_new((d), (o), (l), (m), (v))
98
99#define nv50_vm_flush_engine(d,e) \
100 _nv50_vm_flush_engine((d), (e))
101
91#define MAX_NUM_DCB_ENTRIES 16 102#define MAX_NUM_DCB_ENTRIES 16
92 103
93#define NOUVEAU_MAX_CHANNEL_NR 4096 104#define NOUVEAU_MAX_CHANNEL_NR 4096
@@ -172,34 +183,6 @@ enum nouveau_flags {
172#define NVOBJ_ENGINE_NR 16 183#define NVOBJ_ENGINE_NR 16
173#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/ 184#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
174 185
175#define NVOBJ_FLAG_DONT_MAP (1 << 0)
176#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
177#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
178
179#define NVOBJ_CINST_GLOBAL 0xdeadbeef
180
181struct nouveau_gpuobj {
182 struct drm_device *dev;
183 struct kref refcount;
184 struct list_head list;
185
186 void *node;
187 u32 *suspend;
188
189 uint32_t flags;
190
191 u32 size;
192 u32 pinst; /* PRAMIN BAR offset */
193 u32 cinst; /* Channel offset */
194 u64 vinst; /* VRAM address */
195
196 uint32_t engine;
197 uint32_t class;
198
199 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
200 void *priv;
201};
202
203struct nouveau_page_flip_state { 186struct nouveau_page_flip_state {
204 struct list_head head; 187 struct list_head head;
205 struct drm_pending_vblank_event *event; 188 struct drm_pending_vblank_event *event;
@@ -259,7 +242,6 @@ struct nouveau_channel {
259 242
260 /* Objects */ 243 /* Objects */
261 struct nouveau_gpuobj *ramin; /* Private instmem */ 244 struct nouveau_gpuobj *ramin; /* Private instmem */
262 struct drm_mm ramin_heap; /* Private PRAMIN heap */
263 struct nouveau_ramht *ramht; /* Hash table */ 245 struct nouveau_ramht *ramht; /* Hash table */
264 246
265 /* GPU object info for stuff used in-kernel (mm_enabled) */ 247 /* GPU object info for stuff used in-kernel (mm_enabled) */
@@ -301,23 +283,6 @@ struct nouveau_exec_engine {
301 void (*tlb_flush)(struct drm_device *, int engine); 283 void (*tlb_flush)(struct drm_device *, int engine);
302}; 284};
303 285
304struct nouveau_instmem_engine {
305 void *priv;
306
307 int (*init)(struct drm_device *dev);
308 void (*takedown)(struct drm_device *dev);
309 int (*suspend)(struct drm_device *dev);
310 void (*resume)(struct drm_device *dev);
311
312 int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
313 u32 size, u32 align);
314 void (*put)(struct nouveau_gpuobj *);
315 int (*map)(struct nouveau_gpuobj *);
316 void (*unmap)(struct nouveau_gpuobj *);
317
318 void (*flush)(struct drm_device *);
319};
320
321struct nouveau_display_engine { 286struct nouveau_display_engine {
322 void *priv; 287 void *priv;
323 int (*early_init)(struct drm_device *); 288 int (*early_init)(struct drm_device *);
@@ -499,7 +464,6 @@ struct nouveau_pm_engine {
499}; 464};
500 465
501struct nouveau_engine { 466struct nouveau_engine {
502 struct nouveau_instmem_engine instmem;
503 struct nouveau_display_engine display; 467 struct nouveau_display_engine display;
504 struct nouveau_pm_engine pm; 468 struct nouveau_pm_engine pm;
505}; 469};
@@ -599,14 +563,7 @@ struct drm_nouveau_private {
599 int flags; 563 int flags;
600 u32 crystal; 564 u32 crystal;
601 565
602 spinlock_t ramin_lock;
603 void __iomem *ramin;
604 u32 ramin_size;
605 u32 ramin_base;
606 bool ramin_available;
607 struct drm_mm ramin_heap;
608 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR]; 566 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
609 struct list_head gpuobj_list;
610 struct list_head classes; 567 struct list_head classes;
611 568
612 struct nouveau_bo *vga_ram; 569 struct nouveau_bo *vga_ram;
@@ -648,8 +605,6 @@ struct drm_nouveau_private {
648 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 605 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
649 struct nouveau_ramht *ramht; 606 struct nouveau_ramht *ramht;
650 607
651 uint32_t ramin_rsvd_vram;
652
653 struct { 608 struct {
654 enum { 609 enum {
655 NOUVEAU_GART_NONE = 0, 610 NOUVEAU_GART_NONE = 0,
@@ -663,11 +618,6 @@ struct drm_nouveau_private {
663 618
664 struct ttm_backend_func *func; 619 struct ttm_backend_func *func;
665 620
666 struct {
667 struct page *page;
668 dma_addr_t addr;
669 } dummy;
670
671 struct nouveau_gpuobj *sg_ctxdma; 621 struct nouveau_gpuobj *sg_ctxdma;
672 } gart_info; 622 } gart_info;
673 623
@@ -682,10 +632,6 @@ struct drm_nouveau_private {
682 uint64_t fb_aper_free; 632 uint64_t fb_aper_free;
683 int fb_mtrr; 633 int fb_mtrr;
684 634
685 /* BAR control (NV50-) */
686 struct nouveau_vm *bar1_vm;
687 struct nouveau_vm *bar3_vm;
688
689 /* G8x/G9x virtual address space */ 635 /* G8x/G9x virtual address space */
690 struct nouveau_vm *chan_vm; 636 struct nouveau_vm *chan_vm;
691 637
@@ -797,6 +743,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
797 struct nouveau_fence *fence); 743 struct nouveau_fence *fence);
798extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 744extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
799extern const struct ttm_mem_type_manager_func nouveau_gart_manager; 745extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
746extern const struct ttm_mem_type_manager_func nv04_gart_manager;
800 747
801/* nouveau_notifier.c */ 748/* nouveau_notifier.c */
802extern int nouveau_notifier_init_channel(struct nouveau_channel *); 749extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -844,11 +791,6 @@ extern int nouveau_channel_idle(struct nouveau_channel *chan);
844 return ret; \ 791 return ret; \
845} while (0) 792} while (0)
846 793
847extern int nouveau_gpuobj_early_init(struct drm_device *);
848extern int nouveau_gpuobj_init(struct drm_device *);
849extern void nouveau_gpuobj_takedown(struct drm_device *);
850extern int nouveau_gpuobj_suspend(struct drm_device *dev);
851extern void nouveau_gpuobj_resume(struct drm_device *dev);
852extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); 794extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
853extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, 795extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
854 int (*exec)(struct nouveau_channel *, 796 int (*exec)(struct nouveau_channel *,
@@ -858,11 +800,6 @@ extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
858extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, 800extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
859 uint32_t vram_h, uint32_t tt_h); 801 uint32_t vram_h, uint32_t tt_h);
860extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); 802extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
861extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
862 uint32_t size, int align, uint32_t flags,
863 struct nouveau_gpuobj **);
864extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
865 struct nouveau_gpuobj **);
866extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, 803extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
867 uint64_t offset, uint64_t size, int access, 804 uint64_t offset, uint64_t size, int access,
868 int target, struct nouveau_gpuobj **); 805 int target, struct nouveau_gpuobj **);
@@ -874,11 +811,8 @@ extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
874 int class, u64 base, u64 size, int target, 811 int class, u64 base, u64 size, int target,
875 int access, u32 type, u32 comp); 812 int access, u32 type, u32 comp);
876 813
877 814int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
878int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, u32 flags, 815 u32 flags, struct nouveau_vma *vma);
879 struct nouveau_vm *vm, struct nouveau_vma *vma);
880int nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
881 struct nouveau_vma *vma);
882void nouveau_gpuobj_unmap(struct nouveau_vma *vma); 816void nouveau_gpuobj_unmap(struct nouveau_vma *vma);
883 817
884/* nouveau_irq.c */ 818/* nouveau_irq.c */
@@ -1060,49 +994,6 @@ extern int nv84_vp_create(struct drm_device *dev);
1060/* nv98_ppp.c */ 994/* nv98_ppp.c */
1061extern int nv98_ppp_create(struct drm_device *dev); 995extern int nv98_ppp_create(struct drm_device *dev);
1062 996
1063/* nv04_instmem.c */
1064extern int nv04_instmem_init(struct drm_device *);
1065extern void nv04_instmem_takedown(struct drm_device *);
1066extern int nv04_instmem_suspend(struct drm_device *);
1067extern void nv04_instmem_resume(struct drm_device *);
1068extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1069 u32 size, u32 align);
1070extern void nv04_instmem_put(struct nouveau_gpuobj *);
1071extern int nv04_instmem_map(struct nouveau_gpuobj *);
1072extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
1073extern void nv04_instmem_flush(struct drm_device *);
1074
1075/* nv40_instmem.c */
1076extern int nv40_instmem_init(struct drm_device *);
1077extern void nv40_instmem_takedown(struct drm_device *);
1078extern int nv40_instmem_suspend(struct drm_device *);
1079extern void nv40_instmem_resume(struct drm_device *);
1080extern int nv40_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1081 u32 size, u32 align);
1082extern void nv40_instmem_put(struct nouveau_gpuobj *);
1083extern int nv40_instmem_map(struct nouveau_gpuobj *);
1084extern void nv40_instmem_unmap(struct nouveau_gpuobj *);
1085extern void nv40_instmem_flush(struct drm_device *);
1086
1087/* nv50_instmem.c */
1088extern int nv50_instmem_init(struct drm_device *);
1089extern void nv50_instmem_takedown(struct drm_device *);
1090extern int nv50_instmem_suspend(struct drm_device *);
1091extern void nv50_instmem_resume(struct drm_device *);
1092extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1093 u32 size, u32 align);
1094extern void nv50_instmem_put(struct nouveau_gpuobj *);
1095extern int nv50_instmem_map(struct nouveau_gpuobj *);
1096extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
1097extern void nv50_instmem_flush(struct drm_device *);
1098extern void nv84_instmem_flush(struct drm_device *);
1099
1100/* nvc0_instmem.c */
1101extern int nvc0_instmem_init(struct drm_device *);
1102extern void nvc0_instmem_takedown(struct drm_device *);
1103extern int nvc0_instmem_suspend(struct drm_device *);
1104extern void nvc0_instmem_resume(struct drm_device *);
1105
1106extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, 997extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1107 unsigned long arg); 998 unsigned long arg);
1108 999
@@ -1260,23 +1151,6 @@ static inline void nvchan_wr32(struct nouveau_channel *chan,
1260#define nv_wait_cb(dev, func, data) \ 1151#define nv_wait_cb(dev, func, data) \
1261 nouveau_wait_cb(dev, 2000000000ULL, (func), (data)) 1152 nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
1262 1153
1263/* PRAMIN access */
1264static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1265{
1266 struct drm_nouveau_private *dev_priv = dev->dev_private;
1267 return ioread32_native(dev_priv->ramin + offset);
1268}
1269
1270static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1271{
1272 struct drm_nouveau_private *dev_priv = dev->dev_private;
1273 iowrite32_native(val, dev_priv->ramin + offset);
1274}
1275
1276/* object access */
1277extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
1278extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
1279
1280/* 1154/*
1281 * Logging 1155 * Logging
1282 * Argument d is (struct drm_device *). 1156 * Argument d is (struct drm_device *).
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
index 0d370e8c468f..a774b7ad0f21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
@@ -37,7 +37,6 @@
37#include <engine/fifo.h> 37#include <engine/fifo.h>
38#include <core/ramht.h> 38#include <core/ramht.h>
39#include "nouveau_software.h" 39#include "nouveau_software.h"
40#include <subdev/vm.h>
41 40
42struct nouveau_gpuobj_method { 41struct nouveau_gpuobj_method {
43 struct list_head head; 42 struct list_head head;
@@ -135,173 +134,12 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
135 return ret; 134 return ret;
136} 135}
137 136
138int
139nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
140 uint32_t size, int align, uint32_t flags,
141 struct nouveau_gpuobj **gpuobj_ret)
142{
143 struct drm_nouveau_private *dev_priv = dev->dev_private;
144 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
145 struct nouveau_gpuobj *gpuobj;
146 struct drm_mm_node *ramin = NULL;
147 int ret, i;
148
149 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
150 chan ? chan->id : -1, size, align, flags);
151
152 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
153 if (!gpuobj)
154 return -ENOMEM;
155 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
156 gpuobj->dev = dev;
157 gpuobj->flags = flags;
158 kref_init(&gpuobj->refcount);
159 gpuobj->size = size;
160
161 spin_lock(&dev_priv->ramin_lock);
162 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
163 spin_unlock(&dev_priv->ramin_lock);
164
165 if (chan) {
166 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
167 if (ramin)
168 ramin = drm_mm_get_block(ramin, size, align);
169 if (!ramin) {
170 nouveau_gpuobj_ref(NULL, &gpuobj);
171 return -ENOMEM;
172 }
173
174 gpuobj->pinst = chan->ramin->pinst;
175 if (gpuobj->pinst != ~0)
176 gpuobj->pinst += ramin->start;
177
178 gpuobj->cinst = ramin->start;
179 gpuobj->vinst = ramin->start + chan->ramin->vinst;
180 gpuobj->node = ramin;
181 } else {
182 ret = instmem->get(gpuobj, chan, size, align);
183 if (ret) {
184 nouveau_gpuobj_ref(NULL, &gpuobj);
185 return ret;
186 }
187
188 ret = -ENOSYS;
189 if (!(flags & NVOBJ_FLAG_DONT_MAP))
190 ret = instmem->map(gpuobj);
191 if (ret)
192 gpuobj->pinst = ~0;
193
194 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
195 }
196
197 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
198 for (i = 0; i < gpuobj->size; i += 4)
199 nv_wo32(gpuobj, i, 0);
200 instmem->flush(dev);
201 }
202
203
204 *gpuobj_ret = gpuobj;
205 return 0;
206}
207
208int
209nouveau_gpuobj_init(struct drm_device *dev)
210{
211 struct drm_nouveau_private *dev_priv = dev->dev_private;
212
213 NV_DEBUG(dev, "\n");
214
215 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
216 INIT_LIST_HEAD(&dev_priv->classes);
217 spin_lock_init(&dev_priv->ramin_lock);
218 dev_priv->ramin_base = ~0;
219
220 return 0;
221}
222
223void
224nouveau_gpuobj_takedown(struct drm_device *dev)
225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_gpuobj_method *om, *tm;
228 struct nouveau_gpuobj_class *oc, *tc;
229
230 NV_DEBUG(dev, "\n");
231
232 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
233 list_for_each_entry_safe(om, tm, &oc->methods, head) {
234 list_del(&om->head);
235 kfree(om);
236 }
237 list_del(&oc->head);
238 kfree(oc);
239 }
240
241 WARN_ON(!list_empty(&dev_priv->gpuobj_list));
242}
243
244
245static void
246nouveau_gpuobj_del(struct kref *ref)
247{
248 struct nouveau_gpuobj *gpuobj =
249 container_of(ref, struct nouveau_gpuobj, refcount);
250 struct drm_device *dev = gpuobj->dev;
251 struct drm_nouveau_private *dev_priv = dev->dev_private;
252 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
253 int i;
254
255 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
256
257 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
258 for (i = 0; i < gpuobj->size; i += 4)
259 nv_wo32(gpuobj, i, 0);
260 instmem->flush(dev);
261 }
262
263 if (gpuobj->dtor)
264 gpuobj->dtor(dev, gpuobj);
265
266 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
267 if (gpuobj->node) {
268 instmem->unmap(gpuobj);
269 instmem->put(gpuobj);
270 }
271 } else {
272 if (gpuobj->node) {
273 spin_lock(&dev_priv->ramin_lock);
274 drm_mm_put_block(gpuobj->node);
275 spin_unlock(&dev_priv->ramin_lock);
276 }
277 }
278
279 spin_lock(&dev_priv->ramin_lock);
280 list_del(&gpuobj->list);
281 spin_unlock(&dev_priv->ramin_lock);
282
283 kfree(gpuobj);
284}
285
286void
287nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
288{
289 if (ref)
290 kref_get(&ref->refcount);
291
292 if (*ptr)
293 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
294
295 *ptr = ref;
296}
297
298void 137void
299nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, 138nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
300 u64 base, u64 size, int target, int access, 139 u64 base, u64 size, int target, int access,
301 u32 type, u32 comp) 140 u32 type, u32 comp)
302{ 141{
303 struct drm_nouveau_private *dev_priv = obj->dev->dev_private; 142 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
304 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
305 u32 flags0; 143 u32 flags0;
306 144
307 flags0 = (comp << 29) | (type << 22) | class; 145 flags0 = (comp << 29) | (type << 22) | class;
@@ -343,7 +181,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
343 nv_wo32(obj, offset + 0x10, 0x00000000); 181 nv_wo32(obj, offset + 0x10, 0x00000000);
344 nv_wo32(obj, offset + 0x14, 0x00000000); 182 nv_wo32(obj, offset + 0x14, 0x00000000);
345 183
346 pinstmem->flush(obj->dev); 184 nvimem_flush(obj->dev);
347} 185}
348 186
349int 187int
@@ -485,10 +323,6 @@ nv04_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
485 if (ret) 323 if (ret)
486 return ret; 324 return ret;
487 325
488 ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
489 if (ret)
490 return ret;
491
492 return 0; 326 return 0;
493} 327}
494 328
@@ -503,10 +337,6 @@ nv50_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
503 if (ret) 337 if (ret)
504 return ret; 338 return ret;
505 339
506 ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
507 if (ret)
508 return ret;
509
510 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc); 340 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc);
511 if (ret) 341 if (ret)
512 return ret; 342 return ret;
@@ -533,10 +363,6 @@ nv84_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
533 if (ret) 363 if (ret)
534 return ret; 364 return ret;
535 365
536 ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
537 if (ret)
538 return ret;
539
540 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr); 366 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr);
541 if (ret) 367 if (ret)
542 return ret; 368 return ret;
@@ -552,30 +378,20 @@ static int
552nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) 378nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
553{ 379{
554 struct drm_device *dev = chan->dev; 380 struct drm_device *dev = chan->dev;
555 struct nouveau_gpuobj *pgd = NULL;
556 struct nouveau_vm_pgd *vpgd;
557 int ret; 381 int ret;
558 382
559 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); 383 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
560 if (ret) 384 if (ret)
561 return ret; 385 return ret;
562 386
563 /* create page directory for this vm if none currently exists, 387 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &chan->vm_pd);
564 * will be destroyed automagically when last reference to the 388 if (ret)
565 * vm is removed 389 return ret;
566 */
567 if (list_empty(&vm->pgd_list)) {
568 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
569 if (ret)
570 return ret;
571 }
572 nouveau_vm_ref(vm, &chan->vm, pgd);
573 nouveau_gpuobj_ref(NULL, &pgd);
574 390
575 /* point channel at vm's page directory */ 391 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
576 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); 392
577 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); 393 nv_wo32(chan->ramin, 0x0200, lower_32_bits(chan->vm_pd->addr));
578 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); 394 nv_wo32(chan->ramin, 0x0204, upper_32_bits(chan->vm_pd->addr));
579 nv_wo32(chan->ramin, 0x0208, 0xffffffff); 395 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
580 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 396 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
581 397
@@ -698,132 +514,5 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
698 nouveau_gpuobj_ref(NULL, &chan->ramfc); 514 nouveau_gpuobj_ref(NULL, &chan->ramfc);
699 nouveau_gpuobj_ref(NULL, &chan->engptr); 515 nouveau_gpuobj_ref(NULL, &chan->engptr);
700 516
701 if (drm_mm_initialized(&chan->ramin_heap))
702 drm_mm_takedown(&chan->ramin_heap);
703 nouveau_gpuobj_ref(NULL, &chan->ramin); 517 nouveau_gpuobj_ref(NULL, &chan->ramin);
704} 518}
705
706int
707nouveau_gpuobj_suspend(struct drm_device *dev)
708{
709 struct drm_nouveau_private *dev_priv = dev->dev_private;
710 struct nouveau_gpuobj *gpuobj;
711 int i;
712
713 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
714 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
715 continue;
716
717 gpuobj->suspend = vmalloc(gpuobj->size);
718 if (!gpuobj->suspend) {
719 nouveau_gpuobj_resume(dev);
720 return -ENOMEM;
721 }
722
723 for (i = 0; i < gpuobj->size; i += 4)
724 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
725 }
726
727 return 0;
728}
729
730void
731nouveau_gpuobj_resume(struct drm_device *dev)
732{
733 struct drm_nouveau_private *dev_priv = dev->dev_private;
734 struct nouveau_gpuobj *gpuobj;
735 int i;
736
737 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
738 if (!gpuobj->suspend)
739 continue;
740
741 for (i = 0; i < gpuobj->size; i += 4)
742 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
743
744 vfree(gpuobj->suspend);
745 gpuobj->suspend = NULL;
746 }
747
748 dev_priv->engine.instmem.flush(dev);
749}
750
751u32
752nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
753{
754 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
755 struct drm_device *dev = gpuobj->dev;
756 unsigned long flags;
757
758 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
759 u64 ptr = gpuobj->vinst + offset;
760 u32 base = ptr >> 16;
761 u32 val;
762
763 spin_lock_irqsave(&dev_priv->vm_lock, flags);
764 if (dev_priv->ramin_base != base) {
765 dev_priv->ramin_base = base;
766 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
767 }
768 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
769 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
770 return val;
771 }
772
773 return nv_ri32(dev, gpuobj->pinst + offset);
774}
775
776void
777nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
778{
779 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
780 struct drm_device *dev = gpuobj->dev;
781 unsigned long flags;
782
783 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
784 u64 ptr = gpuobj->vinst + offset;
785 u32 base = ptr >> 16;
786
787 spin_lock_irqsave(&dev_priv->vm_lock, flags);
788 if (dev_priv->ramin_base != base) {
789 dev_priv->ramin_base = base;
790 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
791 }
792 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
793 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
794 return;
795 }
796
797 nv_wi32(dev, gpuobj->pinst + offset, val);
798}
799
800int
801nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, u32 flags,
802 struct nouveau_vm *vm, struct nouveau_vma *vma)
803{
804 struct nouveau_mem **mem = gpuobj->node;
805 struct nouveau_mem *node = *mem;
806 int ret;
807
808 ret = nouveau_vm_get(vm, node->size << 12, 12, flags, vma);
809 if (ret)
810 return ret;
811
812 nouveau_vm_map(vma, node);
813 return 0;
814}
815
816int
817nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
818 struct nouveau_vma *vma)
819{
820 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
821 return nouveau_gpuobj_map_vm(gpuobj, flags, dev_priv->bar1_vm, vma);
822}
823
824void
825nouveau_gpuobj_unmap(struct nouveau_vma *vma)
826{
827 nouveau_vm_unmap(vma);
828 nouveau_vm_put(vma);
829}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 4aea1c4c46ef..48131ceeeb80 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -38,7 +38,6 @@
38#include "nouveau_drv.h" 38#include "nouveau_drv.h"
39#include "nouveau_pm.h" 39#include "nouveau_pm.h"
40#include <core/mm.h> 40#include <core/mm.h>
41#include <subdev/vm.h>
42#include <engine/fifo.h> 41#include <engine/fifo.h>
43#include "nouveau_fence.h" 42#include "nouveau_fence.h"
44 43
@@ -220,7 +219,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
220 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); 219 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
221 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 220 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
222 221
223 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 222 dev_priv->fb_available_size -= nvimem_reserved(dev);
224 dev_priv->fb_aper_free = dev_priv->fb_available_size; 223 dev_priv->fb_aper_free = dev_priv->fb_available_size;
225 224
226 /* mappable vram */ 225 /* mappable vram */
@@ -1058,3 +1057,71 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1058 nouveau_gart_manager_del, 1057 nouveau_gart_manager_del,
1059 nouveau_gart_manager_debug 1058 nouveau_gart_manager_debug
1060}; 1059};
1060
1061static int
1062nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1063{
1064 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1065 struct drm_device *dev = dev_priv->dev;
1066 man->priv = nv04vm_ref(dev);
1067 return (man->priv != NULL) ? 0 : -ENODEV;
1068}
1069
1070static int
1071nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
1072{
1073 struct nouveau_vm *vm = man->priv;
1074 nouveau_vm_ref(NULL, &vm, NULL);
1075 man->priv = NULL;
1076 return 0;
1077}
1078
1079static void
1080nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
1081{
1082 struct nouveau_mem *node = mem->mm_node;
1083 if (node->vma[0].node)
1084 nouveau_vm_put(&node->vma[0]);
1085 kfree(mem->mm_node);
1086 mem->mm_node = NULL;
1087}
1088
1089static int
1090nv04_gart_manager_new(struct ttm_mem_type_manager *man,
1091 struct ttm_buffer_object *bo,
1092 struct ttm_placement *placement,
1093 struct ttm_mem_reg *mem)
1094{
1095 struct nouveau_mem *node;
1096 int ret;
1097
1098 node = kzalloc(sizeof(*node), GFP_KERNEL);
1099 if (!node)
1100 return -ENOMEM;
1101
1102 node->page_shift = 12;
1103
1104 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
1105 NV_MEM_ACCESS_RW, &node->vma[0]);
1106 if (ret) {
1107 kfree(node);
1108 return ret;
1109 }
1110
1111 mem->mm_node = node;
1112 mem->start = node->vma[0].offset >> PAGE_SHIFT;
1113 return 0;
1114}
1115
1116void
1117nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1118{
1119}
1120
1121const struct ttm_mem_type_manager_func nv04_gart_manager = {
1122 nv04_gart_manager_init,
1123 nv04_gart_manager_fini,
1124 nv04_gart_manager_new,
1125 nv04_gart_manager_del,
1126 nv04_gart_manager_debug
1127};
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 151ffbb411c3..2cc4779b4299 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,16 +96,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
96 drm_mm_takedown(&chan->notifier_heap); 96 drm_mm_takedown(&chan->notifier_heap);
97} 97}
98 98
99static void
100nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
101 struct nouveau_gpuobj *gpuobj)
102{
103 NV_DEBUG(dev, "\n");
104
105 if (gpuobj->priv)
106 drm_mm_put_block(gpuobj->priv);
107}
108
109int 99int
110nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 100nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
111 int size, uint32_t start, uint32_t end, 101 int size, uint32_t start, uint32_t end,
@@ -147,8 +137,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
147 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); 137 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
148 return ret; 138 return ret;
149 } 139 }
150 nobj->dtor = nouveau_notifier_gpuobj_dtor;
151 nobj->priv = mem;
152 140
153 ret = nouveau_ramht_insert(chan, handle, nobj); 141 ret = nouveau_ramht_insert(chan, handle, nobj);
154 nouveau_gpuobj_ref(NULL, &nobj); 142 nouveau_gpuobj_ref(NULL, &nobj);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 38483a042bc2..464beda94c58 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -13,7 +13,7 @@ struct nouveau_sgdma_be {
13 */ 13 */
14 struct ttm_dma_tt ttm; 14 struct ttm_dma_tt ttm;
15 struct drm_device *dev; 15 struct drm_device *dev;
16 u64 offset; 16 struct nouveau_mem *node;
17}; 17};
18 18
19static void 19static void
@@ -32,25 +32,18 @@ static int
32nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 32nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33{ 33{
34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 struct drm_device *dev = nvbe->dev; 35 struct nouveau_mem *node = mem->mm_node;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 u64 size = mem->num_pages << 12;
37 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
38 unsigned i, j, pte;
39
40 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
41
42 nvbe->offset = mem->start << PAGE_SHIFT;
43 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
44 for (i = 0; i < ttm->num_pages; i++) {
45 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
46 uint32_t offset_l = lower_32_bits(dma_offset);
47 37
48 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 38 if (ttm->sg) {
49 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 39 node->sg = ttm->sg;
50 offset_l += NV_CTXDMA_PAGE_SIZE; 40 nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
51 } 41 } else {
42 node->pages = nvbe->ttm.dma_address;
43 nouveau_vm_map_sg(&node->vma[0], 0, size, node);
52 } 44 }
53 45
46 nvbe->node = node;
54 return 0; 47 return 0;
55} 48}
56 49
@@ -58,22 +51,7 @@ static int
58nv04_sgdma_unbind(struct ttm_tt *ttm) 51nv04_sgdma_unbind(struct ttm_tt *ttm)
59{ 52{
60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 53 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
61 struct drm_device *dev = nvbe->dev; 54 nouveau_vm_unmap(&nvbe->node->vma[0]);
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
64 unsigned i, j, pte;
65
66 NV_DEBUG(dev, "\n");
67
68 if (ttm->state != tt_bound)
69 return 0;
70
71 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
72 for (i = 0; i < ttm->num_pages; i++) {
73 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
74 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
75 }
76
77 return 0; 55 return 0;
78} 56}
79 57
@@ -83,206 +61,6 @@ static struct ttm_backend_func nv04_sgdma_backend = {
83 .destroy = nouveau_sgdma_destroy 61 .destroy = nouveau_sgdma_destroy
84}; 62};
85 63
86static void
87nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
88{
89 struct drm_device *dev = nvbe->dev;
90
91 nv_wr32(dev, 0x100810, 0x00000022);
92 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
93 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
94 nv_rd32(dev, 0x100810));
95 nv_wr32(dev, 0x100810, 0x00000000);
96}
97
98static int
99nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
100{
101 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
102 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
103 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
104 dma_addr_t *list = nvbe->ttm.dma_address;
105 u32 pte = mem->start << 2;
106 u32 cnt = ttm->num_pages;
107
108 nvbe->offset = mem->start << PAGE_SHIFT;
109
110 while (cnt--) {
111 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
112 pte += 4;
113 }
114
115 nv41_sgdma_flush(nvbe);
116 return 0;
117}
118
119static int
120nv41_sgdma_unbind(struct ttm_tt *ttm)
121{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
124 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
125 u32 pte = (nvbe->offset >> 12) << 2;
126 u32 cnt = ttm->num_pages;
127
128 while (cnt--) {
129 nv_wo32(pgt, pte, 0x00000000);
130 pte += 4;
131 }
132
133 nv41_sgdma_flush(nvbe);
134 return 0;
135}
136
137static struct ttm_backend_func nv41_sgdma_backend = {
138 .bind = nv41_sgdma_bind,
139 .unbind = nv41_sgdma_unbind,
140 .destroy = nouveau_sgdma_destroy
141};
142
143static void
144nv44_sgdma_flush(struct ttm_tt *ttm)
145{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
147 struct drm_device *dev = nvbe->dev;
148
149 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
150 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
151 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
152 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
153 nv_rd32(dev, 0x100808));
154 nv_wr32(dev, 0x100808, 0x00000000);
155}
156
157static void
158nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
159{
160 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
161 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
162 u32 pte, tmp[4];
163
164 pte = base >> 2;
165 base &= ~0x0000000f;
166
167 tmp[0] = nv_ro32(pgt, base + 0x0);
168 tmp[1] = nv_ro32(pgt, base + 0x4);
169 tmp[2] = nv_ro32(pgt, base + 0x8);
170 tmp[3] = nv_ro32(pgt, base + 0xc);
171 while (cnt--) {
172 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
173 switch (pte++ & 0x3) {
174 case 0:
175 tmp[0] &= ~0x07ffffff;
176 tmp[0] |= addr;
177 break;
178 case 1:
179 tmp[0] &= ~0xf8000000;
180 tmp[0] |= addr << 27;
181 tmp[1] &= ~0x003fffff;
182 tmp[1] |= addr >> 5;
183 break;
184 case 2:
185 tmp[1] &= ~0xffc00000;
186 tmp[1] |= addr << 22;
187 tmp[2] &= ~0x0001ffff;
188 tmp[2] |= addr >> 10;
189 break;
190 case 3:
191 tmp[2] &= ~0xfffe0000;
192 tmp[2] |= addr << 17;
193 tmp[3] &= ~0x00000fff;
194 tmp[3] |= addr >> 15;
195 break;
196 }
197 }
198
199 tmp[3] |= 0x40000000;
200
201 nv_wo32(pgt, base + 0x0, tmp[0]);
202 nv_wo32(pgt, base + 0x4, tmp[1]);
203 nv_wo32(pgt, base + 0x8, tmp[2]);
204 nv_wo32(pgt, base + 0xc, tmp[3]);
205}
206
207static int
208nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
209{
210 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
211 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
212 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
213 dma_addr_t *list = nvbe->ttm.dma_address;
214 u32 pte = mem->start << 2, tmp[4];
215 u32 cnt = ttm->num_pages;
216 int i;
217
218 nvbe->offset = mem->start << PAGE_SHIFT;
219
220 if (pte & 0x0000000c) {
221 u32 max = 4 - ((pte >> 2) & 0x3);
222 u32 part = (cnt > max) ? max : cnt;
223 nv44_sgdma_fill(pgt, list, pte, part);
224 pte += (part << 2);
225 list += part;
226 cnt -= part;
227 }
228
229 while (cnt >= 4) {
230 for (i = 0; i < 4; i++)
231 tmp[i] = *list++ >> 12;
232 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
233 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
234 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
235 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
236 pte += 0x10;
237 cnt -= 4;
238 }
239
240 if (cnt)
241 nv44_sgdma_fill(pgt, list, pte, cnt);
242
243 nv44_sgdma_flush(ttm);
244 return 0;
245}
246
247static int
248nv44_sgdma_unbind(struct ttm_tt *ttm)
249{
250 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
251 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
252 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
253 u32 pte = (nvbe->offset >> 12) << 2;
254 u32 cnt = ttm->num_pages;
255
256 if (pte & 0x0000000c) {
257 u32 max = 4 - ((pte >> 2) & 0x3);
258 u32 part = (cnt > max) ? max : cnt;
259 nv44_sgdma_fill(pgt, NULL, pte, part);
260 pte += (part << 2);
261 cnt -= part;
262 }
263
264 while (cnt >= 4) {
265 nv_wo32(pgt, pte + 0x0, 0x00000000);
266 nv_wo32(pgt, pte + 0x4, 0x00000000);
267 nv_wo32(pgt, pte + 0x8, 0x00000000);
268 nv_wo32(pgt, pte + 0xc, 0x00000000);
269 pte += 0x10;
270 cnt -= 4;
271 }
272
273 if (cnt)
274 nv44_sgdma_fill(pgt, NULL, pte, cnt);
275
276 nv44_sgdma_flush(ttm);
277 return 0;
278}
279
280static struct ttm_backend_func nv44_sgdma_backend = {
281 .bind = nv44_sgdma_bind,
282 .unbind = nv44_sgdma_unbind,
283 .destroy = nouveau_sgdma_destroy
284};
285
286static int 64static int
287nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 65nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
288{ 66{
@@ -337,82 +115,24 @@ int
337nouveau_sgdma_init(struct drm_device *dev) 115nouveau_sgdma_init(struct drm_device *dev)
338{ 116{
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 117 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_gpuobj *gpuobj = NULL; 118 u32 aper_size;
341 u32 aper_size, align;
342 int ret;
343 119
344 if (dev_priv->card_type >= NV_40) 120 if (dev_priv->card_type >= NV_50)
345 aper_size = 512 * 1024 * 1024; 121 aper_size = 512 * 1024 * 1024;
346 else 122 else
347 aper_size = 128 * 1024 * 1024; 123 aper_size = 128 * 1024 * 1024;
348 124
349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
350 * christmas. The cards before it have them, the cards after
351 * it have them, why is NV44 so unloved?
352 */
353 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
354 if (!dev_priv->gart_info.dummy.page)
355 return -ENOMEM;
356
357 dev_priv->gart_info.dummy.addr =
358 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
359 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
360 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
361 NV_ERROR(dev, "error mapping dummy page\n");
362 __free_page(dev_priv->gart_info.dummy.page);
363 dev_priv->gart_info.dummy.page = NULL;
364 return -ENOMEM;
365 }
366
367 if (dev_priv->card_type >= NV_50) { 125 if (dev_priv->card_type >= NV_50) {
368 dev_priv->gart_info.aper_base = 0; 126 dev_priv->gart_info.aper_base = 0;
369 dev_priv->gart_info.aper_size = aper_size; 127 dev_priv->gart_info.aper_size = aper_size;
370 dev_priv->gart_info.type = NOUVEAU_GART_HW; 128 dev_priv->gart_info.type = NOUVEAU_GART_HW;
371 dev_priv->gart_info.func = &nv50_sgdma_backend; 129 dev_priv->gart_info.func = &nv50_sgdma_backend;
372 } else
373 if (0 && pci_is_pcie(dev->pdev) &&
374 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
375 if (nv44_graph_class(dev)) {
376 dev_priv->gart_info.func = &nv44_sgdma_backend;
377 align = 512 * 1024;
378 } else {
379 dev_priv->gart_info.func = &nv41_sgdma_backend;
380 align = 16;
381 }
382
383 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
384 NVOBJ_FLAG_ZERO_ALLOC |
385 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
386 if (ret) {
387 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
388 return ret;
389 }
390
391 dev_priv->gart_info.sg_ctxdma = gpuobj;
392 dev_priv->gart_info.aper_base = 0;
393 dev_priv->gart_info.aper_size = aper_size;
394 dev_priv->gart_info.type = NOUVEAU_GART_HW;
395 } else { 130 } else {
396 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
397 NVOBJ_FLAG_ZERO_ALLOC |
398 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
399 if (ret) {
400 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
401 return ret;
402 }
403
404 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
405 (1 << 12) /* PT present */ |
406 (0 << 13) /* PT *not* linear */ |
407 (0 << 14) /* RW */ |
408 (2 << 16) /* PCI */);
409 nv_wo32(gpuobj, 4, aper_size - 1);
410
411 dev_priv->gart_info.sg_ctxdma = gpuobj;
412 dev_priv->gart_info.aper_base = 0; 131 dev_priv->gart_info.aper_base = 0;
413 dev_priv->gart_info.aper_size = aper_size; 132 dev_priv->gart_info.aper_size = aper_size;
414 dev_priv->gart_info.type = NOUVEAU_GART_PDMA; 133 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
415 dev_priv->gart_info.func = &nv04_sgdma_backend; 134 dev_priv->gart_info.func = &nv04_sgdma_backend;
135 dev_priv->gart_info.sg_ctxdma = nv04vm_refdma(dev);
416 } 136 }
417 137
418 return 0; 138 return 0;
@@ -424,13 +144,6 @@ nouveau_sgdma_takedown(struct drm_device *dev)
424 struct drm_nouveau_private *dev_priv = dev->dev_private; 144 struct drm_nouveau_private *dev_priv = dev->dev_private;
425 145
426 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 146 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
427
428 if (dev_priv->gart_info.dummy.page) {
429 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
430 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
431 __free_page(dev_priv->gart_info.dummy.page);
432 dev_priv->gart_info.dummy.page = NULL;
433 }
434} 147}
435 148
436uint32_t 149uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 14998ee89c94..f53c6a748200 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -52,15 +52,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
52 52
53 switch (dev_priv->chipset & 0xf0) { 53 switch (dev_priv->chipset & 0xf0) {
54 case 0x00: 54 case 0x00:
55 engine->instmem.init = nv04_instmem_init;
56 engine->instmem.takedown = nv04_instmem_takedown;
57 engine->instmem.suspend = nv04_instmem_suspend;
58 engine->instmem.resume = nv04_instmem_resume;
59 engine->instmem.get = nv04_instmem_get;
60 engine->instmem.put = nv04_instmem_put;
61 engine->instmem.map = nv04_instmem_map;
62 engine->instmem.unmap = nv04_instmem_unmap;
63 engine->instmem.flush = nv04_instmem_flush;
64 engine->display.early_init = nv04_display_early_init; 55 engine->display.early_init = nv04_display_early_init;
65 engine->display.late_takedown = nv04_display_late_takedown; 56 engine->display.late_takedown = nv04_display_late_takedown;
66 engine->display.create = nv04_display_create; 57 engine->display.create = nv04_display_create;
@@ -72,15 +63,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
72 engine->pm.clocks_set = nv04_pm_clocks_set; 63 engine->pm.clocks_set = nv04_pm_clocks_set;
73 break; 64 break;
74 case 0x10: 65 case 0x10:
75 engine->instmem.init = nv04_instmem_init;
76 engine->instmem.takedown = nv04_instmem_takedown;
77 engine->instmem.suspend = nv04_instmem_suspend;
78 engine->instmem.resume = nv04_instmem_resume;
79 engine->instmem.get = nv04_instmem_get;
80 engine->instmem.put = nv04_instmem_put;
81 engine->instmem.map = nv04_instmem_map;
82 engine->instmem.unmap = nv04_instmem_unmap;
83 engine->instmem.flush = nv04_instmem_flush;
84 engine->display.early_init = nv04_display_early_init; 66 engine->display.early_init = nv04_display_early_init;
85 engine->display.late_takedown = nv04_display_late_takedown; 67 engine->display.late_takedown = nv04_display_late_takedown;
86 engine->display.create = nv04_display_create; 68 engine->display.create = nv04_display_create;
@@ -92,15 +74,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
92 engine->pm.clocks_set = nv04_pm_clocks_set; 74 engine->pm.clocks_set = nv04_pm_clocks_set;
93 break; 75 break;
94 case 0x20: 76 case 0x20:
95 engine->instmem.init = nv04_instmem_init;
96 engine->instmem.takedown = nv04_instmem_takedown;
97 engine->instmem.suspend = nv04_instmem_suspend;
98 engine->instmem.resume = nv04_instmem_resume;
99 engine->instmem.get = nv04_instmem_get;
100 engine->instmem.put = nv04_instmem_put;
101 engine->instmem.map = nv04_instmem_map;
102 engine->instmem.unmap = nv04_instmem_unmap;
103 engine->instmem.flush = nv04_instmem_flush;
104 engine->display.early_init = nv04_display_early_init; 77 engine->display.early_init = nv04_display_early_init;
105 engine->display.late_takedown = nv04_display_late_takedown; 78 engine->display.late_takedown = nv04_display_late_takedown;
106 engine->display.create = nv04_display_create; 79 engine->display.create = nv04_display_create;
@@ -112,15 +85,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
112 engine->pm.clocks_set = nv04_pm_clocks_set; 85 engine->pm.clocks_set = nv04_pm_clocks_set;
113 break; 86 break;
114 case 0x30: 87 case 0x30:
115 engine->instmem.init = nv04_instmem_init;
116 engine->instmem.takedown = nv04_instmem_takedown;
117 engine->instmem.suspend = nv04_instmem_suspend;
118 engine->instmem.resume = nv04_instmem_resume;
119 engine->instmem.get = nv04_instmem_get;
120 engine->instmem.put = nv04_instmem_put;
121 engine->instmem.map = nv04_instmem_map;
122 engine->instmem.unmap = nv04_instmem_unmap;
123 engine->instmem.flush = nv04_instmem_flush;
124 engine->display.early_init = nv04_display_early_init; 88 engine->display.early_init = nv04_display_early_init;
125 engine->display.late_takedown = nv04_display_late_takedown; 89 engine->display.late_takedown = nv04_display_late_takedown;
126 engine->display.create = nv04_display_create; 90 engine->display.create = nv04_display_create;
@@ -135,15 +99,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
135 break; 99 break;
136 case 0x40: 100 case 0x40:
137 case 0x60: 101 case 0x60:
138 engine->instmem.init = nv40_instmem_init;
139 engine->instmem.takedown = nv40_instmem_takedown;
140 engine->instmem.suspend = nv40_instmem_suspend;
141 engine->instmem.resume = nv40_instmem_resume;
142 engine->instmem.get = nv40_instmem_get;
143 engine->instmem.put = nv40_instmem_put;
144 engine->instmem.map = nv40_instmem_map;
145 engine->instmem.unmap = nv40_instmem_unmap;
146 engine->instmem.flush = nv40_instmem_flush;
147 engine->display.early_init = nv04_display_early_init; 102 engine->display.early_init = nv04_display_early_init;
148 engine->display.late_takedown = nv04_display_late_takedown; 103 engine->display.late_takedown = nv04_display_late_takedown;
149 engine->display.create = nv04_display_create; 104 engine->display.create = nv04_display_create;
@@ -163,18 +118,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
163 case 0x80: /* gotta love NVIDIA's consistency.. */ 118 case 0x80: /* gotta love NVIDIA's consistency.. */
164 case 0x90: 119 case 0x90:
165 case 0xa0: 120 case 0xa0:
166 engine->instmem.init = nv50_instmem_init;
167 engine->instmem.takedown = nv50_instmem_takedown;
168 engine->instmem.suspend = nv50_instmem_suspend;
169 engine->instmem.resume = nv50_instmem_resume;
170 engine->instmem.get = nv50_instmem_get;
171 engine->instmem.put = nv50_instmem_put;
172 engine->instmem.map = nv50_instmem_map;
173 engine->instmem.unmap = nv50_instmem_unmap;
174 if (dev_priv->chipset == 0x50)
175 engine->instmem.flush = nv50_instmem_flush;
176 else
177 engine->instmem.flush = nv84_instmem_flush;
178 engine->display.early_init = nv50_display_early_init; 121 engine->display.early_init = nv50_display_early_init;
179 engine->display.late_takedown = nv50_display_late_takedown; 122 engine->display.late_takedown = nv50_display_late_takedown;
180 engine->display.create = nv50_display_create; 123 engine->display.create = nv50_display_create;
@@ -212,15 +155,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
212 engine->pm.pwm_set = nv50_pm_pwm_set; 155 engine->pm.pwm_set = nv50_pm_pwm_set;
213 break; 156 break;
214 case 0xc0: 157 case 0xc0:
215 engine->instmem.init = nvc0_instmem_init;
216 engine->instmem.takedown = nvc0_instmem_takedown;
217 engine->instmem.suspend = nvc0_instmem_suspend;
218 engine->instmem.resume = nvc0_instmem_resume;
219 engine->instmem.get = nv50_instmem_get;
220 engine->instmem.put = nv50_instmem_put;
221 engine->instmem.map = nv50_instmem_map;
222 engine->instmem.unmap = nv50_instmem_unmap;
223 engine->instmem.flush = nv84_instmem_flush;
224 engine->display.early_init = nv50_display_early_init; 158 engine->display.early_init = nv50_display_early_init;
225 engine->display.late_takedown = nv50_display_late_takedown; 159 engine->display.late_takedown = nv50_display_late_takedown;
226 engine->display.create = nv50_display_create; 160 engine->display.create = nv50_display_create;
@@ -237,15 +171,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
237 engine->pm.pwm_set = nv50_pm_pwm_set; 171 engine->pm.pwm_set = nv50_pm_pwm_set;
238 break; 172 break;
239 case 0xd0: 173 case 0xd0:
240 engine->instmem.init = nvc0_instmem_init;
241 engine->instmem.takedown = nvc0_instmem_takedown;
242 engine->instmem.suspend = nvc0_instmem_suspend;
243 engine->instmem.resume = nvc0_instmem_resume;
244 engine->instmem.get = nv50_instmem_get;
245 engine->instmem.put = nv50_instmem_put;
246 engine->instmem.map = nv50_instmem_map;
247 engine->instmem.unmap = nv50_instmem_unmap;
248 engine->instmem.flush = nv84_instmem_flush;
249 engine->display.early_init = nouveau_stub_init; 174 engine->display.early_init = nouveau_stub_init;
250 engine->display.late_takedown = nouveau_stub_takedown; 175 engine->display.late_takedown = nouveau_stub_takedown;
251 engine->display.create = nvd0_display_create; 176 engine->display.create = nvd0_display_create;
@@ -260,15 +185,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
260 engine->pm.voltage_set = nouveau_voltage_gpio_set; 185 engine->pm.voltage_set = nouveau_voltage_gpio_set;
261 break; 186 break;
262 case 0xe0: 187 case 0xe0:
263 engine->instmem.init = nvc0_instmem_init;
264 engine->instmem.takedown = nvc0_instmem_takedown;
265 engine->instmem.suspend = nvc0_instmem_suspend;
266 engine->instmem.resume = nvc0_instmem_resume;
267 engine->instmem.get = nv50_instmem_get;
268 engine->instmem.put = nv50_instmem_put;
269 engine->instmem.map = nv50_instmem_map;
270 engine->instmem.unmap = nv50_instmem_unmap;
271 engine->instmem.flush = nv84_instmem_flush;
272 engine->display.early_init = nouveau_stub_init; 188 engine->display.early_init = nouveau_stub_init;
273 engine->display.late_takedown = nouveau_stub_takedown; 189 engine->display.late_takedown = nouveau_stub_takedown;
274 engine->display.create = nvd0_display_create; 190 engine->display.create = nvd0_display_create;
@@ -354,8 +270,10 @@ nouveau_card_channel_fini(struct drm_device *dev)
354{ 270{
355 struct drm_nouveau_private *dev_priv = dev->dev_private; 271 struct drm_nouveau_private *dev_priv = dev->dev_private;
356 272
357 if (dev_priv->channel) 273 if (dev_priv->channel) {
358 nouveau_channel_put_unlocked(&dev_priv->channel); 274 nouveau_channel_put_unlocked(&dev_priv->channel);
275 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
276 }
359} 277}
360 278
361static int 279static int
@@ -365,6 +283,10 @@ nouveau_card_channel_init(struct drm_device *dev)
365 struct nouveau_channel *chan; 283 struct nouveau_channel *chan;
366 int ret; 284 int ret;
367 285
286 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x1000, &dev_priv->chan_vm);
287 if (ret)
288 return ret;
289
368 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT); 290 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
369 dev_priv->channel = chan; 291 dev_priv->channel = chan;
370 if (ret) 292 if (ret)
@@ -400,6 +322,7 @@ nouveau_card_init(struct drm_device *dev)
400 spin_lock_init(&dev_priv->tile.lock); 322 spin_lock_init(&dev_priv->tile.lock);
401 spin_lock_init(&dev_priv->context_switch_lock); 323 spin_lock_init(&dev_priv->context_switch_lock);
402 spin_lock_init(&dev_priv->vm_lock); 324 spin_lock_init(&dev_priv->vm_lock);
325 INIT_LIST_HEAD(&dev_priv->classes);
403 326
404 /* Make the CRTCs and I2C buses accessible */ 327 /* Make the CRTCs and I2C buses accessible */
405 ret = engine->display.early_init(dev); 328 ret = engine->display.early_init(dev);
@@ -419,17 +342,9 @@ nouveau_card_init(struct drm_device *dev)
419 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); 342 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
420 } 343 }
421 344
422 ret = nouveau_gpuobj_init(dev);
423 if (ret)
424 goto out_bios;
425
426 ret = engine->instmem.init(dev);
427 if (ret)
428 goto out_gpuobj;
429
430 ret = nouveau_mem_vram_init(dev); 345 ret = nouveau_mem_vram_init(dev);
431 if (ret) 346 if (ret)
432 goto out_instmem; 347 goto out_bios;
433 348
434 ret = nouveau_mem_gart_init(dev); 349 ret = nouveau_mem_gart_init(dev);
435 if (ret) 350 if (ret)
@@ -652,10 +567,6 @@ out_engine:
652 nouveau_mem_gart_fini(dev); 567 nouveau_mem_gart_fini(dev);
653out_ttmvram: 568out_ttmvram:
654 nouveau_mem_vram_fini(dev); 569 nouveau_mem_vram_fini(dev);
655out_instmem:
656 engine->instmem.takedown(dev);
657out_gpuobj:
658 nouveau_gpuobj_takedown(dev);
659out_bios: 570out_bios:
660 nouveau_bios_takedown(dev); 571 nouveau_bios_takedown(dev);
661out_display_early: 572out_display_early:
@@ -703,9 +614,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
703 nouveau_mem_gart_fini(dev); 614 nouveau_mem_gart_fini(dev);
704 nouveau_mem_vram_fini(dev); 615 nouveau_mem_vram_fini(dev);
705 616
706 engine->instmem.takedown(dev);
707 nouveau_gpuobj_takedown(dev);
708
709 nouveau_bios_takedown(dev); 617 nouveau_bios_takedown(dev);
710 engine->display.late_takedown(dev); 618 engine->display.late_takedown(dev);
711 619
@@ -955,32 +863,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
955 if (ret) 863 if (ret)
956 goto err_priv; 864 goto err_priv;
957 865
958 /* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
959 if (dev_priv->card_type >= NV_40) {
960 int ramin_bar = 2;
961 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
962 ramin_bar = 3;
963
964 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
965 dev_priv->ramin =
966 ioremap(pci_resource_start(dev->pdev, ramin_bar),
967 dev_priv->ramin_size);
968 if (!dev_priv->ramin) {
969 NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
970 ret = -ENOMEM;
971 goto err_priv;
972 }
973 } else {
974 dev_priv->ramin_size = 1 * 1024 * 1024;
975 dev_priv->ramin = ioremap(pci_resource_start(dev->pdev, 0),
976 dev_priv->ramin_size);
977 if (!dev_priv->ramin) {
978 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
979 ret = -ENOMEM;
980 goto err_priv;
981 }
982 }
983
984 nouveau_OF_copy_vbios_to_ramin(dev); 866 nouveau_OF_copy_vbios_to_ramin(dev);
985 867
986 /* Special flags */ 868 /* Special flags */
@@ -992,12 +874,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
992 /* For kernel modesetting, init card now and bring up fbcon */ 874 /* For kernel modesetting, init card now and bring up fbcon */
993 ret = nouveau_card_init(dev); 875 ret = nouveau_card_init(dev);
994 if (ret) 876 if (ret)
995 goto err_ramin; 877 goto err_priv;
996 878
997 return 0; 879 return 0;
998 880
999err_ramin:
1000 iounmap(dev_priv->ramin);
1001err_priv: 881err_priv:
1002 dev->dev_private = dev_priv->newpriv; 882 dev->dev_private = dev_priv->newpriv;
1003 kfree(dev_priv); 883 kfree(dev_priv);
@@ -1016,8 +896,6 @@ int nouveau_unload(struct drm_device *dev)
1016 896
1017 nouveau_card_takedown(dev); 897 nouveau_card_takedown(dev);
1018 898
1019 iounmap(dev_priv->ramin);
1020
1021 dev->dev_private = dev_priv->newpriv; 899 dev->dev_private = dev_priv->newpriv;
1022 kfree(dev_priv); 900 kfree(dev_priv);
1023 return 0; 901 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ccbb03ead8eb..9c0bb20b0dc3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -217,7 +217,7 @@ nv50_display_init(struct drm_device *dev)
217 return ret; 217 return ret;
218 evo = nv50_display(dev)->master; 218 evo = nv50_display(dev)->master;
219 219
220 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 220 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->addr >> 8) | 9);
221 221
222 ret = RING_SPACE(evo, 3); 222 ret = RING_SPACE(evo, 3);
223 if (ret) 223 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 7a1424243665..7e9a6d6d673b 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -70,7 +70,7 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
70 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, 70 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
71 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); 71 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
72 nv_wo32(obj, 0x14, flags5); 72 nv_wo32(obj, 0x14, flags5);
73 dev_priv->engine.instmem.flush(obj->dev); 73 nvimem_flush(obj->dev);
74} 74}
75 75
76int 76int
@@ -263,12 +263,6 @@ nv50_evo_create(struct drm_device *dev)
263 goto err; 263 goto err;
264 } 264 }
265 265
266 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
267 if (ret) {
268 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
269 goto err;
270 }
271
272 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); 266 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
273 if (ret) { 267 if (ret) {
274 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); 268 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
@@ -294,7 +288,7 @@ nv50_evo_create(struct drm_device *dev)
294 goto err; 288 goto err;
295 289
296 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, 290 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
297 disp->ntfy->vinst, disp->ntfy->size, NULL); 291 disp->ntfy->addr, disp->ntfy->size, NULL);
298 if (ret) 292 if (ret)
299 goto err; 293 goto err;
300 294
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
index 1440a948d0aa..a1c06d44eeb5 100644
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -48,7 +48,7 @@ mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
48 if (!gpuobj) 48 if (!gpuobj)
49 return -ENOENT; 49 return -ENOENT;
50 50
51 pch->base.vblank.ctxdma = gpuobj->cinst >> 4; 51 pch->base.vblank.ctxdma = gpuobj->node->offset >> 4;
52 return 0; 52 return 0;
53} 53}
54 54
@@ -105,7 +105,7 @@ nv50_software_context_new(struct nouveau_channel *chan, int engine)
105 return -ENOMEM; 105 return -ENOMEM;
106 106
107 nouveau_software_context_new(&pch->base); 107 nouveau_software_context_new(&pch->base);
108 pch->base.vblank.channel = chan->ramin->vinst >> 12; 108 pch->base.vblank.channel = chan->ramin->addr >> 12;
109 chan->engctx[engine] = pch; 109 chan->engctx[engine] = pch;
110 110
111 /* dma objects for display sync channel semaphore blocks */ 111 /* dma objects for display sync channel semaphore blocks */
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 199c57c669b5..721716aacbe0 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -106,7 +106,7 @@ nv84_fence_context_new(struct nouveau_channel *chan, int engine)
106 nouveau_fence_context_new(&fctx->base); 106 nouveau_fence_context_new(&fctx->base);
107 107
108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
109 priv->mem->vinst, priv->mem->size, 109 priv->mem->addr, priv->mem->size,
110 NV_MEM_ACCESS_RW, 110 NV_MEM_ACCESS_RW,
111 NV_MEM_TARGET_VRAM, &obj); 111 NV_MEM_TARGET_VRAM, &obj);
112 if (ret == 0) { 112 if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 8d9fc00718f0..715359ef9211 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -1903,7 +1903,7 @@ nvd0_display_init(struct drm_device *dev)
1903 } 1903 }
1904 1904
1905 /* point at our hash table / objects, enable interrupts */ 1905 /* point at our hash table / objects, enable interrupts */
1906 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9); 1906 nv_wr32(dev, 0x610010, (disp->mem->addr >> 8) | 9);
1907 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); 1907 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
1908 1908
1909 /* init master */ 1909 /* init master */
@@ -1967,7 +1967,6 @@ int
1967nvd0_display_create(struct drm_device *dev) 1967nvd0_display_create(struct drm_device *dev)
1968{ 1968{
1969 struct drm_nouveau_private *dev_priv = dev->dev_private; 1969 struct drm_nouveau_private *dev_priv = dev->dev_private;
1970 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
1971 struct dcb_table *dcb = &dev_priv->vbios.dcb; 1970 struct dcb_table *dcb = &dev_priv->vbios.dcb;
1972 struct drm_connector *connector, *tmp; 1971 struct drm_connector *connector, *tmp;
1973 struct pci_dev *pdev = dev->pdev; 1972 struct pci_dev *pdev = dev->pdev;
@@ -2106,7 +2105,7 @@ nvd0_display_create(struct drm_device *dev)
2106 ((dmao + 0x60) << 9)); 2105 ((dmao + 0x60) << 9));
2107 } 2106 }
2108 2107
2109 pinstmem->flush(dev); 2108 nvimem_flush(dev);
2110 2109
2111out: 2110out:
2112 if (ret) 2111 if (ret)