aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c256
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c344
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h418
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c419
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c271
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h62
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c726
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c279
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c421
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h107
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c240
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c645
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c124
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c193
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c422
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c318
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c90
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c642
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c377
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c178
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c190
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c121
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h3
-rw-r--r--include/drm/nouveau_drm.h5
63 files changed, 6600 insertions, 4018 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d667d6..b1d8941e04d8 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,12 +5,13 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ 6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \ 7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \
8 nouveau_sgdma.o nouveau_dma.o \ 8 nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o nouveau_ramht.o \ 12 nouveau_dp.o nouveau_ramht.o \
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
14 nouveau_mm.o nouveau_vm.o \
14 nv04_timer.o \ 15 nv04_timer.o \
15 nv04_mc.o nv40_mc.o nv50_mc.o \ 16 nv04_mc.o nv40_mc.o nv50_mc.o \
16 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ 17 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -18,14 +19,16 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
18 nv04_graph.o nv10_graph.o nv20_graph.o \ 19 nv04_graph.o nv10_graph.o nv20_graph.o \
19 nv40_graph.o nv50_graph.o nvc0_graph.o \ 20 nv40_graph.o nv50_graph.o nvc0_graph.o \
20 nv40_grctx.o nv50_grctx.o \ 21 nv40_grctx.o nv50_grctx.o \
22 nv84_crypt.o \
21 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 23 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
22 nv50_crtc.o nv50_dac.o nv50_sor.o \ 24 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
23 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 25 nv50_cursor.o nv50_display.o nv50_fbcon.o \
24 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 26 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
25 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 27 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
26 nv10_gpio.o nv50_gpio.o \ 28 nv10_gpio.o nv50_gpio.o \
27 nv50_calc.o \ 29 nv50_calc.o \
28 nv04_pm.o nv50_pm.o nva3_pm.o 30 nv04_pm.o nv50_pm.o nva3_pm.o \
31 nv50_vram.o nv50_vm.o
29 32
30nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 33nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
31nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 34nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b2293576f278..d3046559bf05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
6053 return entry; 6053 return entry;
6054} 6054}
6055 6055
6056static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) 6056static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
6057 int heads, int or)
6057{ 6058{
6058 struct dcb_entry *entry = new_dcb_entry(dcb); 6059 struct dcb_entry *entry = new_dcb_entry(dcb);
6059 6060
6060 entry->type = 0; 6061 entry->type = type;
6061 entry->i2c_index = i2c; 6062 entry->i2c_index = i2c;
6062 entry->heads = heads; 6063 entry->heads = heads;
6063 entry->location = DCB_LOC_ON_CHIP; 6064 if (type != OUTPUT_ANALOG)
6064 entry->or = 1; 6065 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6065} 6066 entry->or = or;
6066
6067static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
6068{
6069 struct dcb_entry *entry = new_dcb_entry(dcb);
6070
6071 entry->type = 2;
6072 entry->i2c_index = LEGACY_I2C_PANEL;
6073 entry->heads = twoHeads ? 3 : 1;
6074 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6075 entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
6076 entry->duallink_possible = false; /* SiI164 and co. are single link */
6077
6078#if 0
6079 /*
6080 * For dvi-a either crtc probably works, but my card appears to only
6081 * support dvi-d. "nvidia" still attempts to program it for dvi-a,
6082 * doing the full fp output setup (program 0x6808.. fp dimension regs,
6083 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
6084 * the monitor picks up the mode res ok and lights up, but no pixel
6085 * data appears, so the board manufacturer probably connected up the
6086 * sync lines, but missed the video traces / components
6087 *
6088 * with this introduction, dvi-a left as an exercise for the reader.
6089 */
6090 fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
6091#endif
6092}
6093
6094static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
6095{
6096 struct dcb_entry *entry = new_dcb_entry(dcb);
6097
6098 entry->type = 1;
6099 entry->i2c_index = LEGACY_I2C_TV;
6100 entry->heads = twoHeads ? 3 : 1;
6101 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6102} 6067}
6103 6068
6104static bool 6069static bool
@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6365 return true; 6330 return true;
6366} 6331}
6367 6332
6333static void
6334fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6335{
6336 struct dcb_table *dcb = &bios->dcb;
6337 int all_heads = (nv_two_heads(dev) ? 3 : 1);
6338
6339#ifdef __powerpc__
6340 /* Apple iMac G4 NV17 */
6341 if (of_machine_is_compatible("PowerMac4,5")) {
6342 fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
6343 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
6344 return;
6345 }
6346#endif
6347
6348 /* Make up some sane defaults */
6349 fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
6350
6351 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6352 fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
6353 all_heads, 0);
6354
6355 else if (bios->tmds.output0_script_ptr ||
6356 bios->tmds.output1_script_ptr)
6357 fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
6358 all_heads, 1);
6359}
6360
6368static int 6361static int
6369parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 6362parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6370{ 6363{
6371 struct drm_nouveau_private *dev_priv = dev->dev_private; 6364 struct drm_nouveau_private *dev_priv = dev->dev_private;
6372 struct dcb_table *dcb = &bios->dcb; 6365 struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
6386 6379
6387 /* this situation likely means a really old card, pre DCB */ 6380 /* this situation likely means a really old card, pre DCB */
6388 if (dcbptr == 0x0) { 6381 if (dcbptr == 0x0) {
6389 NV_INFO(dev, "Assuming a CRT output exists\n"); 6382 fabricate_dcb_encoder_table(dev, bios);
6390 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
6391
6392 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6393 fabricate_tv_output(dcb, twoHeads);
6394
6395 return 0; 6383 return 0;
6396 } 6384 }
6397 6385
@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
6451 */ 6439 */
6452 NV_TRACEWARN(dev, "No useful information in BIOS output table; " 6440 NV_TRACEWARN(dev, "No useful information in BIOS output table; "
6453 "adding all possible outputs\n"); 6441 "adding all possible outputs\n");
6454 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 6442 fabricate_dcb_encoder_table(dev, bios);
6455
6456 /*
6457 * Attempt to detect TV before DVI because the test
6458 * for the former is more accurate and it rules the
6459 * latter out.
6460 */
6461 if (nv04_tv_identify(dev,
6462 bios->legacy.i2c_indices.tv) >= 0)
6463 fabricate_tv_output(dcb, twoHeads);
6464
6465 else if (bios->tmds.output0_script_ptr ||
6466 bios->tmds.output1_script_ptr)
6467 fabricate_dvi_i_output(dcb, twoHeads);
6468
6469 return 0; 6443 return 0;
6470 } 6444 }
6471 6445
@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
6859 if (ret) 6833 if (ret)
6860 return ret; 6834 return ret;
6861 6835
6862 ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); 6836 ret = parse_dcb_table(dev, bios);
6863 if (ret) 6837 if (ret)
6864 return ret; 6838 return ret;
6865 6839
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c200ef5..42d1ad62b381 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
32#include "nouveau_drm.h" 32#include "nouveau_drm.h"
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
35 37
36#include <linux/log2.h> 38#include <linux/log2.h>
37#include <linux/slab.h> 39#include <linux/slab.h>
@@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46 if (unlikely(nvbo->gem)) 48 if (unlikely(nvbo->gem))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
48 50
49 if (nvbo->tile) 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
50 nv10_mem_expire_tiling(dev, nvbo->tile, NULL); 52 nouveau_vm_put(&nvbo->vma);
51
52 kfree(nvbo); 53 kfree(nvbo);
53} 54}
54 55
55static void 56static void
56nouveau_bo_fixup_align(struct drm_device *dev, 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
57 uint32_t tile_mode, uint32_t tile_flags, 58 int *page_shift)
58 int *align, int *size)
59{ 59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61
62 /*
63 * Some of the tile_flags have a periodic structure of N*4096 bytes,
64 * align to to that as well as the page size. Align the size to the
65 * appropriate boundaries. This does imply that sizes are rounded up
66 * 3-7 pages, so be aware of this and do not waste memory by allocating
67 * many small buffers.
68 */
69 if (dev_priv->card_type == NV_50) {
70 uint32_t block_size = dev_priv->vram_size >> 15;
71 int i;
72
73 switch (tile_flags) {
74 case 0x1800:
75 case 0x2800:
76 case 0x4800:
77 case 0x7a00:
78 if (is_power_of_2(block_size)) {
79 for (i = 1; i < 10; i++) {
80 *align = 12 * i * block_size;
81 if (!(*align % 65536))
82 break;
83 }
84 } else {
85 for (i = 1; i < 10; i++) {
86 *align = 8 * i * block_size;
87 if (!(*align % 65536))
88 break;
89 }
90 }
91 *size = roundup(*size, *align);
92 break;
93 default:
94 break;
95 }
96 61
97 } else { 62 if (dev_priv->card_type < NV_50) {
98 if (tile_mode) { 63 if (nvbo->tile_mode) {
99 if (dev_priv->chipset >= 0x40) { 64 if (dev_priv->chipset >= 0x40) {
100 *align = 65536; 65 *align = 65536;
101 *size = roundup(*size, 64 * tile_mode); 66 *size = roundup(*size, 64 * nvbo->tile_mode);
102 67
103 } else if (dev_priv->chipset >= 0x30) { 68 } else if (dev_priv->chipset >= 0x30) {
104 *align = 32768; 69 *align = 32768;
105 *size = roundup(*size, 64 * tile_mode); 70 *size = roundup(*size, 64 * nvbo->tile_mode);
106 71
107 } else if (dev_priv->chipset >= 0x20) { 72 } else if (dev_priv->chipset >= 0x20) {
108 *align = 16384; 73 *align = 16384;
109 *size = roundup(*size, 64 * tile_mode); 74 *size = roundup(*size, 64 * nvbo->tile_mode);
110 75
111 } else if (dev_priv->chipset >= 0x10) { 76 } else if (dev_priv->chipset >= 0x10) {
112 *align = 16384; 77 *align = 16384;
113 *size = roundup(*size, 32 * tile_mode); 78 *size = roundup(*size, 32 * nvbo->tile_mode);
114 } 79 }
115 } 80 }
81 } else {
82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else
86 *page_shift = dev_priv->chan_vm->spg_shift;
87 } else {
88 *page_shift = 12;
89 }
90
91 *size = roundup(*size, (1 << *page_shift));
92 *align = max((1 << *page_shift), *align);
116 } 93 }
117 94
118 /* ALIGN works only on powers of two. */
119 *size = roundup(*size, PAGE_SIZE); 95 *size = roundup(*size, PAGE_SIZE);
120
121 if (dev_priv->card_type == NV_50) {
122 *size = roundup(*size, 65536);
123 *align = max(65536, *align);
124 }
125} 96}
126 97
127int 98int
@@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
132{ 103{
133 struct drm_nouveau_private *dev_priv = dev->dev_private; 104 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 struct nouveau_bo *nvbo; 105 struct nouveau_bo *nvbo;
135 int ret = 0; 106 int ret = 0, page_shift = 0;
136 107
137 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 108 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
138 if (!nvbo) 109 if (!nvbo)
@@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
145 nvbo->tile_flags = tile_flags; 116 nvbo->tile_flags = tile_flags;
146 nvbo->bo.bdev = &dev_priv->ttm.bdev; 117 nvbo->bo.bdev = &dev_priv->ttm.bdev;
147 118
148 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), 119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
149 &align, &size);
150 align >>= PAGE_SHIFT; 120 align >>= PAGE_SHIFT;
151 121
122 if (!nvbo->no_vm && dev_priv->chan_vm) {
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 }
130
152 nouveau_bo_placement_set(nvbo, flags, 0); 131 nouveau_bo_placement_set(nvbo, flags, 0);
153 132
154 nvbo->channel = chan; 133 nvbo->channel = chan;
@@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
161 } 140 }
162 nvbo->channel = NULL; 141 nvbo->channel = NULL;
163 142
143 if (nvbo->vma.node) {
144 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
145 nvbo->bo.offset = nvbo->vma.offset;
146 }
147
164 *pnvbo = nvbo; 148 *pnvbo = nvbo;
165 return 0; 149 return 0;
166} 150}
@@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
244 228
245 nouveau_bo_placement_set(nvbo, memtype, 0); 229 nouveau_bo_placement_set(nvbo, memtype, 0);
246 230
247 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 231 ret = nouveau_bo_validate(nvbo, false, false, false);
248 if (ret == 0) { 232 if (ret == 0) {
249 switch (bo->mem.mem_type) { 233 switch (bo->mem.mem_type) {
250 case TTM_PL_VRAM: 234 case TTM_PL_VRAM:
@@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
280 264
281 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 265 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
282 266
283 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 267 ret = nouveau_bo_validate(nvbo, false, false, false);
284 if (ret == 0) { 268 if (ret == 0) {
285 switch (bo->mem.mem_type) { 269 switch (bo->mem.mem_type) {
286 case TTM_PL_VRAM: 270 case TTM_PL_VRAM:
@@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
319 ttm_bo_kunmap(&nvbo->kmap); 303 ttm_bo_kunmap(&nvbo->kmap);
320} 304}
321 305
306int
307nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
308 bool no_wait_reserve, bool no_wait_gpu)
309{
310 int ret;
311
312 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
313 no_wait_reserve, no_wait_gpu);
314 if (ret)
315 return ret;
316
317 if (nvbo->vma.node) {
318 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
319 nvbo->bo.offset = nvbo->vma.offset;
320 }
321
322 return 0;
323}
324
322u16 325u16
323nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) 326nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
324{ 327{
@@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
410 man->default_caching = TTM_PL_FLAG_CACHED; 413 man->default_caching = TTM_PL_FLAG_CACHED;
411 break; 414 break;
412 case TTM_PL_VRAM: 415 case TTM_PL_VRAM:
413 man->func = &ttm_bo_manager_func; 416 if (dev_priv->card_type == NV_50) {
417 man->func = &nouveau_vram_manager;
418 man->io_reserve_fastpath = false;
419 man->use_io_reserve_lru = true;
420 } else {
421 man->func = &ttm_bo_manager_func;
422 }
414 man->flags = TTM_MEMTYPE_FLAG_FIXED | 423 man->flags = TTM_MEMTYPE_FLAG_FIXED |
415 TTM_MEMTYPE_FLAG_MAPPABLE; 424 TTM_MEMTYPE_FLAG_MAPPABLE;
416 man->available_caching = TTM_PL_FLAG_UNCACHED | 425 man->available_caching = TTM_PL_FLAG_UNCACHED |
417 TTM_PL_FLAG_WC; 426 TTM_PL_FLAG_WC;
418 man->default_caching = TTM_PL_FLAG_WC; 427 man->default_caching = TTM_PL_FLAG_WC;
419 if (dev_priv->card_type == NV_50)
420 man->gpu_offset = 0x40000000;
421 else
422 man->gpu_offset = 0;
423 break; 428 break;
424 case TTM_PL_TT: 429 case TTM_PL_TT:
425 man->func = &ttm_bo_manager_func; 430 man->func = &ttm_bo_manager_func;
426 switch (dev_priv->gart_info.type) { 431 switch (dev_priv->gart_info.type) {
427 case NOUVEAU_GART_AGP: 432 case NOUVEAU_GART_AGP:
428 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
429 man->available_caching = TTM_PL_FLAG_UNCACHED; 434 man->available_caching = TTM_PL_FLAG_UNCACHED |
430 man->default_caching = TTM_PL_FLAG_UNCACHED; 435 TTM_PL_FLAG_WC;
436 man->default_caching = TTM_PL_FLAG_WC;
431 break; 437 break;
432 case NOUVEAU_GART_SGDMA: 438 case NOUVEAU_GART_SGDMA:
433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 439 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
434 TTM_MEMTYPE_FLAG_CMA; 440 TTM_MEMTYPE_FLAG_CMA;
435 man->available_caching = TTM_PL_MASK_CACHING; 441 man->available_caching = TTM_PL_MASK_CACHING;
436 man->default_caching = TTM_PL_FLAG_CACHED; 442 man->default_caching = TTM_PL_FLAG_CACHED;
443 man->gpu_offset = dev_priv->gart_info.aper_base;
437 break; 444 break;
438 default: 445 default:
439 NV_ERROR(dev, "Unknown GART type: %d\n", 446 NV_ERROR(dev, "Unknown GART type: %d\n",
440 dev_priv->gart_info.type); 447 dev_priv->gart_info.type);
441 return -EINVAL; 448 return -EINVAL;
442 } 449 }
443 man->gpu_offset = dev_priv->vm_gart_base;
444 break; 450 break;
445 default: 451 default:
446 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); 452 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
485 if (ret) 491 if (ret)
486 return ret; 492 return ret;
487 493
488 if (nvbo->channel) {
489 ret = nouveau_fence_sync(fence, nvbo->channel);
490 if (ret)
491 goto out;
492 }
493
494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, 494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
495 no_wait_reserve, no_wait_gpu, new_mem); 495 no_wait_reserve, no_wait_gpu, new_mem);
496out: 496 nouveau_fence_unref(&fence);
497 nouveau_fence_unref((void *)&fence);
498 return ret; 497 return ret;
499} 498}
500 499
@@ -529,14 +528,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
529 dst_offset = new_mem->start << PAGE_SHIFT; 528 dst_offset = new_mem->start << PAGE_SHIFT;
530 if (!nvbo->no_vm) { 529 if (!nvbo->no_vm) {
531 if (old_mem->mem_type == TTM_PL_VRAM) 530 if (old_mem->mem_type == TTM_PL_VRAM)
532 src_offset += dev_priv->vm_vram_base; 531 src_offset = nvbo->vma.offset;
533 else 532 else
534 src_offset += dev_priv->vm_gart_base; 533 src_offset += dev_priv->gart_info.aper_base;
535 534
536 if (new_mem->mem_type == TTM_PL_VRAM) 535 if (new_mem->mem_type == TTM_PL_VRAM)
537 dst_offset += dev_priv->vm_vram_base; 536 dst_offset = nvbo->vma.offset;
538 else 537 else
539 dst_offset += dev_priv->vm_gart_base; 538 dst_offset += dev_priv->gart_info.aper_base;
540 } 539 }
541 540
542 ret = RING_SPACE(chan, 3); 541 ret = RING_SPACE(chan, 3);
@@ -683,17 +682,24 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
683 int ret; 682 int ret;
684 683
685 chan = nvbo->channel; 684 chan = nvbo->channel;
686 if (!chan || nvbo->no_vm) 685 if (!chan || nvbo->no_vm) {
687 chan = dev_priv->channel; 686 chan = dev_priv->channel;
687 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
688 }
688 689
689 if (dev_priv->card_type < NV_50) 690 if (dev_priv->card_type < NV_50)
690 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); 691 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
691 else 692 else
692 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); 693 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
693 if (ret) 694 if (ret == 0) {
694 return ret; 695 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
696 no_wait_reserve,
697 no_wait_gpu, new_mem);
698 }
695 699
696 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); 700 if (chan == dev_priv->channel)
701 mutex_unlock(&chan->mutex);
702 return ret;
697} 703}
698 704
699static int 705static int
@@ -771,7 +777,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
771 struct drm_device *dev = dev_priv->dev; 777 struct drm_device *dev = dev_priv->dev;
772 struct nouveau_bo *nvbo = nouveau_bo(bo); 778 struct nouveau_bo *nvbo = nouveau_bo(bo);
773 uint64_t offset; 779 uint64_t offset;
774 int ret;
775 780
776 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 781 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
777 /* Nothing to do. */ 782 /* Nothing to do. */
@@ -781,18 +786,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
781 786
782 offset = new_mem->start << PAGE_SHIFT; 787 offset = new_mem->start << PAGE_SHIFT;
783 788
784 if (dev_priv->card_type == NV_50) { 789 if (dev_priv->chan_vm) {
785 ret = nv50_mem_vm_bind_linear(dev, 790 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
786 offset + dev_priv->vm_vram_base,
787 new_mem->size,
788 nouveau_bo_tile_layout(nvbo),
789 offset);
790 if (ret)
791 return ret;
792
793 } else if (dev_priv->card_type >= NV_10) { 791 } else if (dev_priv->card_type >= NV_10) {
794 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 792 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
795 nvbo->tile_mode); 793 nvbo->tile_mode,
794 nvbo->tile_flags);
796 } 795 }
797 796
798 return 0; 797 return 0;
@@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
808 807
809 if (dev_priv->card_type >= NV_10 && 808 if (dev_priv->card_type >= NV_10 &&
810 dev_priv->card_type < NV_50) { 809 dev_priv->card_type < NV_50) {
811 if (*old_tile) 810 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
812 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
813
814 *old_tile = new_tile; 811 *old_tile = new_tile;
815 } 812 }
816} 813}
@@ -879,6 +876,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
879 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 876 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
880 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 877 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
881 struct drm_device *dev = dev_priv->dev; 878 struct drm_device *dev = dev_priv->dev;
879 int ret;
882 880
883 mem->bus.addr = NULL; 881 mem->bus.addr = NULL;
884 mem->bus.offset = 0; 882 mem->bus.offset = 0;
@@ -901,9 +899,32 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
901#endif 899#endif
902 break; 900 break;
903 case TTM_PL_VRAM: 901 case TTM_PL_VRAM:
904 mem->bus.offset = mem->start << PAGE_SHIFT; 902 {
903 struct nouveau_vram *vram = mem->mm_node;
904
905 if (!dev_priv->bar1_vm) {
906 mem->bus.offset = mem->start << PAGE_SHIFT;
907 mem->bus.base = pci_resource_start(dev->pdev, 1);
908 mem->bus.is_iomem = true;
909 break;
910 }
911
912 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12,
913 NV_MEM_ACCESS_RW, &vram->bar_vma);
914 if (ret)
915 return ret;
916
917 nouveau_vm_map(&vram->bar_vma, vram);
918 if (ret) {
919 nouveau_vm_put(&vram->bar_vma);
920 return ret;
921 }
922
923 mem->bus.offset = vram->bar_vma.offset;
924 mem->bus.offset -= 0x0020000000ULL;
905 mem->bus.base = pci_resource_start(dev->pdev, 1); 925 mem->bus.base = pci_resource_start(dev->pdev, 1);
906 mem->bus.is_iomem = true; 926 mem->bus.is_iomem = true;
927 }
907 break; 928 break;
908 default: 929 default:
909 return -EINVAL; 930 return -EINVAL;
@@ -914,6 +935,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
914static void 935static void
915nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 936nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
916{ 937{
938 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
939 struct nouveau_vram *vram = mem->mm_node;
940
941 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
942 return;
943
944 if (!vram->bar_vma.node)
945 return;
946
947 nouveau_vm_unmap(&vram->bar_vma);
948 nouveau_vm_put(&vram->bar_vma);
917} 949}
918 950
919static int 951static int
@@ -939,7 +971,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
939 nvbo->placement.fpfn = 0; 971 nvbo->placement.fpfn = 0;
940 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 972 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
941 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); 973 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
942 return ttm_bo_validate(bo, &nvbo->placement, false, true, false); 974 return nouveau_bo_validate(nvbo, false, true, false);
975}
976
977void
978nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
979{
980 struct nouveau_fence *old_fence;
981
982 if (likely(fence))
983 nouveau_fence_ref(fence);
984
985 spin_lock(&nvbo->bo.bdev->fence_lock);
986 old_fence = nvbo->bo.sync_obj;
987 nvbo->bo.sync_obj = fence;
988 spin_unlock(&nvbo->bo.bdev->fence_lock);
989
990 nouveau_fence_unref(&old_fence);
943} 991}
944 992
945struct ttm_bo_driver nouveau_bo_driver = { 993struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +997,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
949 .evict_flags = nouveau_bo_evict_flags, 997 .evict_flags = nouveau_bo_evict_flags,
950 .move = nouveau_bo_move, 998 .move = nouveau_bo_move,
951 .verify_access = nouveau_bo_verify_access, 999 .verify_access = nouveau_bo_verify_access,
952 .sync_obj_signaled = nouveau_fence_signalled, 1000 .sync_obj_signaled = __nouveau_fence_signalled,
953 .sync_obj_wait = nouveau_fence_wait, 1001 .sync_obj_wait = __nouveau_fence_wait,
954 .sync_obj_flush = nouveau_fence_flush, 1002 .sync_obj_flush = __nouveau_fence_flush,
955 .sync_obj_unref = nouveau_fence_unref, 1003 .sync_obj_unref = __nouveau_fence_unref,
956 .sync_obj_ref = nouveau_fence_ref, 1004 .sync_obj_ref = __nouveau_fence_ref,
957 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1005 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
958 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1006 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
959 .io_mem_free = &nouveau_ttm_io_mem_free, 1007 .io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e34814..6f37995aee2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
39 39
40 if (dev_priv->card_type >= NV_50) { 40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO, 42 (1ULL << 40), NV_MEM_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf); 43 NV_MEM_TARGET_VM, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset; 44 chan->pushbuf_base = pb->bo.offset;
45 } else 45 } else
46 if (pb->bo.mem.mem_type == TTM_PL_TT) { 46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 47 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
48 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_MEM_ACCESS_RO,
50 NULL); 50 NV_MEM_TARGET_GART, &pushbuf);
51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
52 } else 52 } else
53 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO, 56 NV_MEM_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_MEM_TARGET_VRAM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
59 } else { 59 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
62 * VRAM. 62 * VRAM.
63 */ 63 */
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
65 pci_resource_start(dev->pdev, 65 pci_resource_start(dev->pdev, 1),
66 1),
67 dev_priv->fb_available_size, 66 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO, 67 NV_MEM_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf); 68 NV_MEM_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 69 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
71 } 70 }
72 71
@@ -107,74 +106,60 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
107int 106int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, 107nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv, 108 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t tt_handle) 109 uint32_t vram_handle, uint32_t gart_handle)
111{ 110{
112 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 112 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 113 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan; 114 struct nouveau_channel *chan;
116 int channel, user; 115 unsigned long flags;
117 int ret; 116 int ret;
118 117
119 /* 118 /* allocate and lock channel structure */
120 * Alright, here is the full story 119 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
121 * Nvidia cards have multiple hw fifo contexts (praise them for that, 120 if (!chan)
122 * no complicated crash-prone context switches) 121 return -ENOMEM;
123 * We allocate a new context for each app and let it write to it 122 chan->dev = dev;
124 * directly (woo, full userspace command submission !) 123 chan->file_priv = file_priv;
125 * When there are no more contexts, you lost 124 chan->vram_handle = vram_handle;
126 */ 125 chan->gart_handle = gart_handle;
127 for (channel = 0; channel < pfifo->channels; channel++) { 126
128 if (dev_priv->fifos[channel] == NULL) 127 kref_init(&chan->ref);
128 atomic_set(&chan->users, 1);
129 mutex_init(&chan->mutex);
130 mutex_lock(&chan->mutex);
131
132 /* allocate hw channel id */
133 spin_lock_irqsave(&dev_priv->channels.lock, flags);
134 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
135 if (!dev_priv->channels.ptr[chan->id]) {
136 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
129 break; 137 break;
138 }
130 } 139 }
140 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
131 141
132 /* no more fifos. you lost. */ 142 if (chan->id == pfifo->channels) {
133 if (channel == pfifo->channels) 143 mutex_unlock(&chan->mutex);
134 return -EINVAL; 144 kfree(chan);
145 return -ENODEV;
146 }
135 147
136 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), 148 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
137 GFP_KERNEL);
138 if (!dev_priv->fifos[channel])
139 return -ENOMEM;
140 chan = dev_priv->fifos[channel];
141 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 149 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
150 INIT_LIST_HEAD(&chan->nvsw.flip);
142 INIT_LIST_HEAD(&chan->fence.pending); 151 INIT_LIST_HEAD(&chan->fence.pending);
143 chan->dev = dev;
144 chan->id = channel;
145 chan->file_priv = file_priv;
146 chan->vram_handle = vram_handle;
147 chan->gart_handle = tt_handle;
148
149 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
150 152
151 /* Allocate DMA push buffer */ 153 /* Allocate DMA push buffer */
152 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); 154 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
153 if (!chan->pushbuf_bo) { 155 if (!chan->pushbuf_bo) {
154 ret = -ENOMEM; 156 ret = -ENOMEM;
155 NV_ERROR(dev, "pushbuf %d\n", ret); 157 NV_ERROR(dev, "pushbuf %d\n", ret);
156 nouveau_channel_free(chan); 158 nouveau_channel_put(&chan);
157 return ret; 159 return ret;
158 } 160 }
159 161
160 nouveau_dma_pre_init(chan); 162 nouveau_dma_pre_init(chan);
161
162 /* Locate channel's user control regs */
163 if (dev_priv->card_type < NV_40)
164 user = NV03_USER(channel);
165 else
166 if (dev_priv->card_type < NV_50)
167 user = NV40_USER(channel);
168 else
169 user = NV50_USER(channel);
170
171 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
172 PAGE_SIZE);
173 if (!chan->user) {
174 NV_ERROR(dev, "ioremap of regs failed.\n");
175 nouveau_channel_free(chan);
176 return -ENOMEM;
177 }
178 chan->user_put = 0x40; 163 chan->user_put = 0x40;
179 chan->user_get = 0x44; 164 chan->user_get = 0x44;
180 165
@@ -182,15 +167,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
182 ret = nouveau_notifier_init_channel(chan); 167 ret = nouveau_notifier_init_channel(chan);
183 if (ret) { 168 if (ret) {
184 NV_ERROR(dev, "ntfy %d\n", ret); 169 NV_ERROR(dev, "ntfy %d\n", ret);
185 nouveau_channel_free(chan); 170 nouveau_channel_put(&chan);
186 return ret; 171 return ret;
187 } 172 }
188 173
189 /* Setup channel's default objects */ 174 /* Setup channel's default objects */
190 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); 175 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
191 if (ret) { 176 if (ret) {
192 NV_ERROR(dev, "gpuobj %d\n", ret); 177 NV_ERROR(dev, "gpuobj %d\n", ret);
193 nouveau_channel_free(chan); 178 nouveau_channel_put(&chan);
194 return ret; 179 return ret;
195 } 180 }
196 181
@@ -198,7 +183,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
198 ret = nouveau_channel_pushbuf_ctxdma_init(chan); 183 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
199 if (ret) { 184 if (ret) {
200 NV_ERROR(dev, "pbctxdma %d\n", ret); 185 NV_ERROR(dev, "pbctxdma %d\n", ret);
201 nouveau_channel_free(chan); 186 nouveau_channel_put(&chan);
202 return ret; 187 return ret;
203 } 188 }
204 189
@@ -206,16 +191,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
206 pfifo->reassign(dev, false); 191 pfifo->reassign(dev, false);
207 192
208 /* Create a graphics context for new channel */ 193 /* Create a graphics context for new channel */
209 ret = pgraph->create_context(chan); 194 if (dev_priv->card_type < NV_50) {
210 if (ret) { 195 ret = pgraph->create_context(chan);
211 nouveau_channel_free(chan); 196 if (ret) {
212 return ret; 197 nouveau_channel_put(&chan);
198 return ret;
199 }
213 } 200 }
214 201
215 /* Construct inital RAMFC for new channel */ 202 /* Construct inital RAMFC for new channel */
216 ret = pfifo->create_context(chan); 203 ret = pfifo->create_context(chan);
217 if (ret) { 204 if (ret) {
218 nouveau_channel_free(chan); 205 nouveau_channel_put(&chan);
219 return ret; 206 return ret;
220 } 207 }
221 208
@@ -225,83 +212,108 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
225 if (!ret) 212 if (!ret)
226 ret = nouveau_fence_channel_init(chan); 213 ret = nouveau_fence_channel_init(chan);
227 if (ret) { 214 if (ret) {
228 nouveau_channel_free(chan); 215 nouveau_channel_put(&chan);
229 return ret; 216 return ret;
230 } 217 }
231 218
232 nouveau_debugfs_channel_init(chan); 219 nouveau_debugfs_channel_init(chan);
233 220
234 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); 221 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
235 *chan_ret = chan; 222 *chan_ret = chan;
236 return 0; 223 return 0;
237} 224}
238 225
239/* stops a fifo */ 226struct nouveau_channel *
227nouveau_channel_get_unlocked(struct nouveau_channel *ref)
228{
229 struct nouveau_channel *chan = NULL;
230
231 if (likely(ref && atomic_inc_not_zero(&ref->users)))
232 nouveau_channel_ref(ref, &chan);
233
234 return chan;
235}
236
237struct nouveau_channel *
238nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
239{
240 struct drm_nouveau_private *dev_priv = dev->dev_private;
241 struct nouveau_channel *chan;
242 unsigned long flags;
243
244 spin_lock_irqsave(&dev_priv->channels.lock, flags);
245 chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
246 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
247
248 if (unlikely(!chan))
249 return ERR_PTR(-EINVAL);
250
251 if (unlikely(file_priv && chan->file_priv != file_priv)) {
252 nouveau_channel_put_unlocked(&chan);
253 return ERR_PTR(-EINVAL);
254 }
255
256 mutex_lock(&chan->mutex);
257 return chan;
258}
259
240void 260void
241nouveau_channel_free(struct nouveau_channel *chan) 261nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
242{ 262{
263 struct nouveau_channel *chan = *pchan;
243 struct drm_device *dev = chan->dev; 264 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 265 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
246 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 266 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
267 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
268 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
247 unsigned long flags; 269 unsigned long flags;
248 int ret;
249 270
250 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); 271 /* decrement the refcount, and we're done if there's still refs */
272 if (likely(!atomic_dec_and_test(&chan->users))) {
273 nouveau_channel_ref(NULL, pchan);
274 return;
275 }
251 276
277 /* noone wants the channel anymore */
278 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
252 nouveau_debugfs_channel_fini(chan); 279 nouveau_debugfs_channel_fini(chan);
253 280
254 /* Give outstanding push buffers a chance to complete */ 281 /* give it chance to idle */
255 nouveau_fence_update(chan); 282 nouveau_channel_idle(chan);
256 if (chan->fence.sequence != chan->fence.sequence_ack) {
257 struct nouveau_fence *fence = NULL;
258
259 ret = nouveau_fence_new(chan, &fence, true);
260 if (ret == 0) {
261 ret = nouveau_fence_wait(fence, NULL, false, false);
262 nouveau_fence_unref((void *)&fence);
263 }
264
265 if (ret)
266 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
267 }
268 283
269 /* Ensure all outstanding fences are signaled. They should be if the 284 /* ensure all outstanding fences are signaled. they should be if the
270 * above attempts at idling were OK, but if we failed this'll tell TTM 285 * above attempts at idling were OK, but if we failed this'll tell TTM
271 * we're done with the buffers. 286 * we're done with the buffers.
272 */ 287 */
273 nouveau_fence_channel_fini(chan); 288 nouveau_fence_channel_fini(chan);
274 289
275 /* This will prevent pfifo from switching channels. */ 290 /* boot it off the hardware */
276 pfifo->reassign(dev, false); 291 pfifo->reassign(dev, false);
277 292
278 /* We want to give pgraph a chance to idle and get rid of all potential 293 /* We want to give pgraph a chance to idle and get rid of all
279 * errors. We need to do this before the lock, otherwise the irq handler 294 * potential errors. We need to do this without the context
280 * is unable to process them. 295 * switch lock held, otherwise the irq handler is unable to
296 * process them.
281 */ 297 */
282 if (pgraph->channel(dev) == chan) 298 if (pgraph->channel(dev) == chan)
283 nouveau_wait_for_idle(dev); 299 nouveau_wait_for_idle(dev);
284 300
285 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 301 /* destroy the engine specific contexts */
286
287 pgraph->fifo_access(dev, false);
288 if (pgraph->channel(dev) == chan)
289 pgraph->unload_context(dev);
290 pgraph->destroy_context(chan);
291 pgraph->fifo_access(dev, true);
292
293 if (pfifo->channel_id(dev) == chan->id) {
294 pfifo->disable(dev);
295 pfifo->unload_context(dev);
296 pfifo->enable(dev);
297 }
298 pfifo->destroy_context(chan); 302 pfifo->destroy_context(chan);
303 pgraph->destroy_context(chan);
304 if (pcrypt->destroy_context)
305 pcrypt->destroy_context(chan);
299 306
300 pfifo->reassign(dev, true); 307 pfifo->reassign(dev, true);
301 308
302 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 309 /* aside from its resources, the channel should now be dead,
310 * remove it from the channel list
311 */
312 spin_lock_irqsave(&dev_priv->channels.lock, flags);
313 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
314 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
303 315
304 /* Release the channel's resources */ 316 /* destroy any resources the channel owned */
305 nouveau_gpuobj_ref(NULL, &chan->pushbuf); 317 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
306 if (chan->pushbuf_bo) { 318 if (chan->pushbuf_bo) {
307 nouveau_bo_unmap(chan->pushbuf_bo); 319 nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +322,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
310 } 322 }
311 nouveau_gpuobj_channel_takedown(chan); 323 nouveau_gpuobj_channel_takedown(chan);
312 nouveau_notifier_takedown_channel(chan); 324 nouveau_notifier_takedown_channel(chan);
313 if (chan->user)
314 iounmap(chan->user);
315 325
316 dev_priv->fifos[chan->id] = NULL; 326 nouveau_channel_ref(NULL, pchan);
327}
328
329void
330nouveau_channel_put(struct nouveau_channel **pchan)
331{
332 mutex_unlock(&(*pchan)->mutex);
333 nouveau_channel_put_unlocked(pchan);
334}
335
336static void
337nouveau_channel_del(struct kref *ref)
338{
339 struct nouveau_channel *chan =
340 container_of(ref, struct nouveau_channel, ref);
341
317 kfree(chan); 342 kfree(chan);
318} 343}
319 344
345void
346nouveau_channel_ref(struct nouveau_channel *chan,
347 struct nouveau_channel **pchan)
348{
349 if (chan)
350 kref_get(&chan->ref);
351
352 if (*pchan)
353 kref_put(&(*pchan)->ref, nouveau_channel_del);
354
355 *pchan = chan;
356}
357
358void
359nouveau_channel_idle(struct nouveau_channel *chan)
360{
361 struct drm_device *dev = chan->dev;
362 struct nouveau_fence *fence = NULL;
363 int ret;
364
365 nouveau_fence_update(chan);
366
367 if (chan->fence.sequence != chan->fence.sequence_ack) {
368 ret = nouveau_fence_new(chan, &fence, true);
369 if (!ret) {
370 ret = nouveau_fence_wait(fence, false, false);
371 nouveau_fence_unref(&fence);
372 }
373
374 if (ret)
375 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
376 }
377}
378
320/* cleans up all the fifos from file_priv */ 379/* cleans up all the fifos from file_priv */
321void 380void
322nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) 381nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
323{ 382{
324 struct drm_nouveau_private *dev_priv = dev->dev_private; 383 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_engine *engine = &dev_priv->engine; 384 struct nouveau_engine *engine = &dev_priv->engine;
385 struct nouveau_channel *chan;
326 int i; 386 int i;
327 387
328 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 388 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
329 for (i = 0; i < engine->fifo.channels; i++) { 389 for (i = 0; i < engine->fifo.channels; i++) {
330 struct nouveau_channel *chan = dev_priv->fifos[i]; 390 chan = nouveau_channel_get(dev, file_priv, i);
391 if (IS_ERR(chan))
392 continue;
331 393
332 if (chan && chan->file_priv == file_priv) 394 atomic_dec(&chan->users);
333 nouveau_channel_free(chan); 395 nouveau_channel_put(&chan);
334 } 396 }
335} 397}
336 398
337int
338nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
339 int channel)
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct nouveau_engine *engine = &dev_priv->engine;
343
344 if (channel >= engine->fifo.channels)
345 return 0;
346 if (dev_priv->fifos[channel] == NULL)
347 return 0;
348
349 return (dev_priv->fifos[channel]->file_priv == file_priv);
350}
351 399
352/*********************************** 400/***********************************
353 * ioctls wrapping the functions 401 * ioctls wrapping the functions
@@ -395,24 +443,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
395 /* Named memory object area */ 443 /* Named memory object area */
396 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 444 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
397 &init->notifier_handle); 445 &init->notifier_handle);
398 if (ret) {
399 nouveau_channel_free(chan);
400 return ret;
401 }
402 446
403 return 0; 447 if (ret == 0)
448 atomic_inc(&chan->users); /* userspace reference */
449 nouveau_channel_put(&chan);
450 return ret;
404} 451}
405 452
406static int 453static int
407nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, 454nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
408 struct drm_file *file_priv) 455 struct drm_file *file_priv)
409{ 456{
410 struct drm_nouveau_channel_free *cfree = data; 457 struct drm_nouveau_channel_free *req = data;
411 struct nouveau_channel *chan; 458 struct nouveau_channel *chan;
412 459
413 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); 460 chan = nouveau_channel_get(dev, file_priv, req->channel);
461 if (IS_ERR(chan))
462 return PTR_ERR(chan);
414 463
415 nouveau_channel_free(chan); 464 atomic_dec(&chan->users);
465 nouveau_channel_put(&chan);
416 return 0; 466 return 0;
417} 467}
418 468
@@ -421,18 +471,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
421 ***********************************/ 471 ***********************************/
422 472
423struct drm_ioctl_desc nouveau_ioctls[] = { 473struct drm_ioctl_desc nouveau_ioctls[] = {
424 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 474 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
425 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 475 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
426 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 476 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
427 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), 477 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
428 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), 478 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
429 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), 479 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
430 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 480 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
431 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 481 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
432 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 482 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
433 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 483 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
434 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 484 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
435 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 485 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
436}; 486};
437 487
438int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 488int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e9a3d1..a21e00076839 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
37#include "nouveau_connector.h" 37#include "nouveau_connector.h"
38#include "nouveau_hw.h" 38#include "nouveau_hw.h"
39 39
40static void nouveau_connector_hotplug(void *, int);
41
40static struct nouveau_encoder * 42static struct nouveau_encoder *
41find_encoder_by_type(struct drm_connector *connector, int type) 43find_encoder_by_type(struct drm_connector *connector, int type)
42{ 44{
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
94} 96}
95 97
96static void 98static void
97nouveau_connector_destroy(struct drm_connector *drm_connector) 99nouveau_connector_destroy(struct drm_connector *connector)
98{ 100{
99 struct nouveau_connector *nv_connector = 101 struct nouveau_connector *nv_connector = nouveau_connector(connector);
100 nouveau_connector(drm_connector); 102 struct drm_nouveau_private *dev_priv;
103 struct nouveau_gpio_engine *pgpio;
101 struct drm_device *dev; 104 struct drm_device *dev;
102 105
103 if (!nv_connector) 106 if (!nv_connector)
104 return; 107 return;
105 108
106 dev = nv_connector->base.dev; 109 dev = nv_connector->base.dev;
110 dev_priv = dev->dev_private;
107 NV_DEBUG_KMS(dev, "\n"); 111 NV_DEBUG_KMS(dev, "\n");
108 112
113 pgpio = &dev_priv->engine.gpio;
114 if (pgpio->irq_unregister) {
115 pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
116 nouveau_connector_hotplug, connector);
117 }
118
109 kfree(nv_connector->edid); 119 kfree(nv_connector->edid);
110 drm_sysfs_connector_remove(drm_connector); 120 drm_sysfs_connector_remove(connector);
111 drm_connector_cleanup(drm_connector); 121 drm_connector_cleanup(connector);
112 kfree(drm_connector); 122 kfree(connector);
113} 123}
114 124
115static struct nouveau_i2c_chan * 125static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
760{ 770{
761 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 771 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
762 struct drm_nouveau_private *dev_priv = dev->dev_private; 772 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
763 struct nouveau_connector *nv_connector = NULL; 774 struct nouveau_connector *nv_connector = NULL;
764 struct dcb_connector_table_entry *dcb = NULL; 775 struct dcb_connector_table_entry *dcb = NULL;
765 struct drm_connector *connector; 776 struct drm_connector *connector;
@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
876 break; 887 break;
877 } 888 }
878 889
890 if (pgpio->irq_register) {
891 pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
892 nouveau_connector_hotplug, connector);
893 }
894
879 drm_sysfs_connector_add(connector); 895 drm_sysfs_connector_add(connector);
880 dcb->drm = connector; 896 dcb->drm = connector;
881 return dcb->drm; 897 return dcb->drm;
@@ -886,3 +902,29 @@ fail:
886 return ERR_PTR(ret); 902 return ERR_PTR(ret);
887 903
888} 904}
905
906static void
907nouveau_connector_hotplug(void *data, int plugged)
908{
909 struct drm_connector *connector = data;
910 struct drm_device *dev = connector->dev;
911
912 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
913 drm_get_connector_name(connector));
914
915 if (connector->encoder && connector->encoder->crtc &&
916 connector->encoder->crtc->enabled) {
917 struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
918 struct drm_encoder_helper_funcs *helper =
919 connector->encoder->helper_private;
920
921 if (nv_encoder->dcb->type == OUTPUT_DP) {
922 if (plugged)
923 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
924 else
925 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
926 }
927 }
928
929 drm_helper_hpd_irq_event(dev);
930}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd65b4dd..505c6bfb4d75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fb.h" 30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h" 31#include "nouveau_fbcon.h"
32#include "nouveau_hw.h"
33#include "nouveau_crtc.h"
34#include "nouveau_dma.h"
32 35
33static void 36static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 37nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
104 .output_poll_changed = nouveau_fbcon_output_poll_changed, 107 .output_poll_changed = nouveau_fbcon_output_poll_changed,
105}; 108};
106 109
110int
111nouveau_vblank_enable(struct drm_device *dev, int crtc)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114
115 if (dev_priv->card_type >= NV_50)
116 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
117 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
118 else
119 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
120 NV_PCRTC_INTR_0_VBLANK);
121
122 return 0;
123}
124
125void
126nouveau_vblank_disable(struct drm_device *dev, int crtc)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 if (dev_priv->card_type >= NV_50)
131 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
132 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
133 else
134 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
135}
136
137static int
138nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
139 struct nouveau_bo *new_bo)
140{
141 int ret;
142
143 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
144 if (ret)
145 return ret;
146
147 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
148 if (ret)
149 goto fail;
150
151 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
152 if (ret)
153 goto fail_unreserve;
154
155 return 0;
156
157fail_unreserve:
158 ttm_bo_unreserve(&new_bo->bo);
159fail:
160 nouveau_bo_unpin(new_bo);
161 return ret;
162}
163
164static void
165nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
166 struct nouveau_bo *new_bo,
167 struct nouveau_fence *fence)
168{
169 nouveau_bo_fence(new_bo, fence);
170 ttm_bo_unreserve(&new_bo->bo);
171
172 nouveau_bo_fence(old_bo, fence);
173 ttm_bo_unreserve(&old_bo->bo);
174
175 nouveau_bo_unpin(old_bo);
176}
177
178static int
179nouveau_page_flip_emit(struct nouveau_channel *chan,
180 struct nouveau_bo *old_bo,
181 struct nouveau_bo *new_bo,
182 struct nouveau_page_flip_state *s,
183 struct nouveau_fence **pfence)
184{
185 struct drm_device *dev = chan->dev;
186 unsigned long flags;
187 int ret;
188
189 /* Queue it to the pending list */
190 spin_lock_irqsave(&dev->event_lock, flags);
191 list_add_tail(&s->head, &chan->nvsw.flip);
192 spin_unlock_irqrestore(&dev->event_lock, flags);
193
194 /* Synchronize with the old framebuffer */
195 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
196 if (ret)
197 goto fail;
198
199 /* Emit the pageflip */
200 ret = RING_SPACE(chan, 2);
201 if (ret)
202 goto fail;
203
204 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
205 OUT_RING(chan, 0);
206 FIRE_RING(chan);
207
208 ret = nouveau_fence_new(chan, pfence, true);
209 if (ret)
210 goto fail;
211
212 return 0;
213fail:
214 spin_lock_irqsave(&dev->event_lock, flags);
215 list_del(&s->head);
216 spin_unlock_irqrestore(&dev->event_lock, flags);
217 return ret;
218}
219
220int
221nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
222 struct drm_pending_vblank_event *event)
223{
224 struct drm_device *dev = crtc->dev;
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
227 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
228 struct nouveau_page_flip_state *s;
229 struct nouveau_channel *chan;
230 struct nouveau_fence *fence;
231 int ret;
232
233 if (dev_priv->engine.graph.accel_blocked)
234 return -ENODEV;
235
236 s = kzalloc(sizeof(*s), GFP_KERNEL);
237 if (!s)
238 return -ENOMEM;
239
240 /* Don't let the buffers go away while we flip */
241 ret = nouveau_page_flip_reserve(old_bo, new_bo);
242 if (ret)
243 goto fail_free;
244
245 /* Initialize a page flip struct */
246 *s = (struct nouveau_page_flip_state)
247 { { }, s->event, nouveau_crtc(crtc)->index,
248 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
249 new_bo->bo.offset };
250
251 /* Choose the channel the flip will be handled in */
252 chan = nouveau_fence_channel(new_bo->bo.sync_obj);
253 if (!chan)
254 chan = nouveau_channel_get_unlocked(dev_priv->channel);
255 mutex_lock(&chan->mutex);
256
257 /* Emit a page flip */
258 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
259 nouveau_channel_put(&chan);
260 if (ret)
261 goto fail_unreserve;
262
263 /* Update the crtc struct and cleanup */
264 crtc->fb = fb;
265
266 nouveau_page_flip_unreserve(old_bo, new_bo, fence);
267 nouveau_fence_unref(&fence);
268 return 0;
269
270fail_unreserve:
271 nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
272fail_free:
273 kfree(s);
274 return ret;
275}
276
277int
278nouveau_finish_page_flip(struct nouveau_channel *chan,
279 struct nouveau_page_flip_state *ps)
280{
281 struct drm_device *dev = chan->dev;
282 struct nouveau_page_flip_state *s;
283 unsigned long flags;
284
285 spin_lock_irqsave(&dev->event_lock, flags);
286
287 if (list_empty(&chan->nvsw.flip)) {
288 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
289 spin_unlock_irqrestore(&dev->event_lock, flags);
290 return -EINVAL;
291 }
292
293 s = list_first_entry(&chan->nvsw.flip,
294 struct nouveau_page_flip_state, head);
295 if (s->event) {
296 struct drm_pending_vblank_event *e = s->event;
297 struct timeval now;
298
299 do_gettimeofday(&now);
300 e->event.sequence = 0;
301 e->event.tv_sec = now.tv_sec;
302 e->event.tv_usec = now.tv_usec;
303 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
304 wake_up_interruptible(&e->base.file_priv->event_wait);
305 }
306
307 list_del(&s->head);
308 *ps = *s;
309 kfree(s);
310
311 spin_unlock_irqrestore(&dev->event_lock, flags);
312 return 0;
313}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e600dcd..6ff77cedc008 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan)
59{ 59{
60 struct drm_device *dev = chan->dev; 60 struct drm_device *dev = chan->dev;
61 struct drm_nouveau_private *dev_priv = dev->dev_private; 61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_gpuobj *obj = NULL;
63 int ret, i; 62 int ret, i;
64 63
65 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 64 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
66 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? 65 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
67 0x0039 : 0x5039, &obj); 66 0x0039 : 0x5039);
68 if (ret)
69 return ret;
70
71 ret = nouveau_ramht_insert(chan, NvM2MF, obj);
72 nouveau_gpuobj_ref(NULL, &obj);
73 if (ret) 67 if (ret)
74 return ret; 68 return ret;
75 69
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f309ae3d..38d599554bce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
279 struct bit_displayport_encoder_table *dpe; 279 struct bit_displayport_encoder_table *dpe;
280 int dpe_headerlen; 280 int dpe_headerlen;
281 uint8_t config[4], status[3]; 281 uint8_t config[4], status[3];
282 bool cr_done, cr_max_vs, eq_done; 282 bool cr_done, cr_max_vs, eq_done, hpd_state;
283 int ret = 0, i, tries, voltage; 283 int ret = 0, i, tries, voltage;
284 284
285 NV_DEBUG_KMS(dev, "link training!!\n"); 285 NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
297 /* disable hotplug detect, this flips around on some panels during 297 /* disable hotplug detect, this flips around on some panels during
298 * link training. 298 * link training.
299 */ 299 */
300 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); 300 hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
301 301
302 if (dpe->script0) { 302 if (dpe->script0) {
303 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); 303 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@ stop:
439 } 439 }
440 440
441 /* re-enable hotplug detect */ 441 /* re-enable hotplug detect */
442 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); 442 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
443 443
444 return eq_done; 444 return eq_done;
445} 445}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 90875494a65a..bb170570938b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
115int nouveau_perflvl_wr; 115int nouveau_perflvl_wr;
116module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); 116module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
117 117
118MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
119int nouveau_msi;
120module_param_named(msi, nouveau_msi, int, 0400);
121
118int nouveau_fbpercrtc; 122int nouveau_fbpercrtc;
119#if 0 123#if 0
120module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 124module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -193,23 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
193 197
194 NV_INFO(dev, "Idling channels...\n"); 198 NV_INFO(dev, "Idling channels...\n");
195 for (i = 0; i < pfifo->channels; i++) { 199 for (i = 0; i < pfifo->channels; i++) {
196 struct nouveau_fence *fence = NULL; 200 chan = dev_priv->channels.ptr[i];
197
198 chan = dev_priv->fifos[i];
199 if (!chan || (dev_priv->card_type >= NV_50 &&
200 chan == dev_priv->fifos[0]))
201 continue;
202
203 ret = nouveau_fence_new(chan, &fence, true);
204 if (ret == 0) {
205 ret = nouveau_fence_wait(fence, NULL, false, false);
206 nouveau_fence_unref((void *)&fence);
207 }
208 201
209 if (ret) { 202 if (chan && chan->pushbuf_bo)
210 NV_ERROR(dev, "Failed to idle channel %d for suspend\n", 203 nouveau_channel_idle(chan);
211 chan->id);
212 }
213 } 204 }
214 205
215 pgraph->fifo_access(dev, false); 206 pgraph->fifo_access(dev, false);
@@ -219,17 +210,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
219 pfifo->unload_context(dev); 210 pfifo->unload_context(dev);
220 pgraph->unload_context(dev); 211 pgraph->unload_context(dev);
221 212
222 NV_INFO(dev, "Suspending GPU objects...\n"); 213 ret = pinstmem->suspend(dev);
223 ret = nouveau_gpuobj_suspend(dev);
224 if (ret) { 214 if (ret) {
225 NV_ERROR(dev, "... failed: %d\n", ret); 215 NV_ERROR(dev, "... failed: %d\n", ret);
226 goto out_abort; 216 goto out_abort;
227 } 217 }
228 218
229 ret = pinstmem->suspend(dev); 219 NV_INFO(dev, "Suspending GPU objects...\n");
220 ret = nouveau_gpuobj_suspend(dev);
230 if (ret) { 221 if (ret) {
231 NV_ERROR(dev, "... failed: %d\n", ret); 222 NV_ERROR(dev, "... failed: %d\n", ret);
232 nouveau_gpuobj_suspend_cleanup(dev); 223 pinstmem->resume(dev);
233 goto out_abort; 224 goto out_abort;
234 } 225 }
235 226
@@ -294,17 +285,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
294 } 285 }
295 } 286 }
296 287
288 NV_INFO(dev, "Restoring GPU objects...\n");
289 nouveau_gpuobj_resume(dev);
290
297 NV_INFO(dev, "Reinitialising engines...\n"); 291 NV_INFO(dev, "Reinitialising engines...\n");
298 engine->instmem.resume(dev); 292 engine->instmem.resume(dev);
299 engine->mc.init(dev); 293 engine->mc.init(dev);
300 engine->timer.init(dev); 294 engine->timer.init(dev);
301 engine->fb.init(dev); 295 engine->fb.init(dev);
302 engine->graph.init(dev); 296 engine->graph.init(dev);
297 engine->crypt.init(dev);
303 engine->fifo.init(dev); 298 engine->fifo.init(dev);
304 299
305 NV_INFO(dev, "Restoring GPU objects...\n");
306 nouveau_gpuobj_resume(dev);
307
308 nouveau_irq_postinstall(dev); 300 nouveau_irq_postinstall(dev);
309 301
310 /* Re-write SKIPS, they'll have been lost over the suspend */ 302 /* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +305,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
313 int j; 305 int j;
314 306
315 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 307 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
316 chan = dev_priv->fifos[i]; 308 chan = dev_priv->channels.ptr[i];
317 if (!chan || !chan->pushbuf_bo) 309 if (!chan || !chan->pushbuf_bo)
318 continue; 310 continue;
319 311
@@ -347,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
347 339
348 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 340 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
349 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 341 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
342 u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
350 343
351 nv_crtc->cursor.set_offset(nv_crtc, 344 nv_crtc->cursor.set_offset(nv_crtc, offset);
352 nv_crtc->cursor.nvbo->bo.offset -
353 dev_priv->vm_vram_base);
354
355 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 345 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
356 nv_crtc->cursor_saved_y); 346 nv_crtc->cursor_saved_y);
357 } 347 }
358 348
359 /* Force CLUT to get re-loaded during modeset */ 349 /* Force CLUT to get re-loaded during modeset */
@@ -393,6 +383,9 @@ static struct drm_driver driver = {
393 .irq_postinstall = nouveau_irq_postinstall, 383 .irq_postinstall = nouveau_irq_postinstall,
394 .irq_uninstall = nouveau_irq_uninstall, 384 .irq_uninstall = nouveau_irq_uninstall,
395 .irq_handler = nouveau_irq_handler, 385 .irq_handler = nouveau_irq_handler,
386 .get_vblank_counter = drm_vblank_count,
387 .enable_vblank = nouveau_vblank_enable,
388 .disable_vblank = nouveau_vblank_disable,
396 .reclaim_buffers = drm_core_reclaim_buffers, 389 .reclaim_buffers = drm_core_reclaim_buffers,
397 .ioctls = nouveau_ioctls, 390 .ioctls = nouveau_ioctls,
398 .fops = { 391 .fops = {
@@ -403,6 +396,7 @@ static struct drm_driver driver = {
403 .mmap = nouveau_ttm_mmap, 396 .mmap = nouveau_ttm_mmap,
404 .poll = drm_poll, 397 .poll = drm_poll,
405 .fasync = drm_fasync, 398 .fasync = drm_fasync,
399 .read = drm_read,
406#if defined(CONFIG_COMPAT) 400#if defined(CONFIG_COMPAT)
407 .compat_ioctl = nouveau_compat_ioctl, 401 .compat_ioctl = nouveau_compat_ioctl,
408#endif 402#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64c03bf..8f13906185b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,36 @@ struct nouveau_fpriv {
54#include "nouveau_drm.h" 54#include "nouveau_drm.h"
55#include "nouveau_reg.h" 55#include "nouveau_reg.h"
56#include "nouveau_bios.h" 56#include "nouveau_bios.h"
57#include "nouveau_util.h"
58
57struct nouveau_grctx; 59struct nouveau_grctx;
60struct nouveau_vram;
61#include "nouveau_vm.h"
58 62
59#define MAX_NUM_DCB_ENTRIES 16 63#define MAX_NUM_DCB_ENTRIES 16
60 64
61#define NOUVEAU_MAX_CHANNEL_NR 128 65#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15 66#define NOUVEAU_MAX_TILE_NR 15
63 67
64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) 68struct nouveau_vram {
65#define NV50_VM_BLOCK (512*1024*1024ULL) 69 struct drm_device *dev;
66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 70
71 struct nouveau_vma bar_vma;
72
73 struct list_head regions;
74 u32 memtype;
75 u64 offset;
76 u64 size;
77};
67 78
68struct nouveau_tile_reg { 79struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used; 80 bool used;
81 uint32_t addr;
82 uint32_t limit;
83 uint32_t pitch;
84 uint32_t zcomp;
85 struct drm_mm_node *tag_mem;
86 struct nouveau_fence *fence;
73}; 87};
74 88
75struct nouveau_bo { 89struct nouveau_bo {
@@ -88,6 +102,7 @@ struct nouveau_bo {
88 102
89 struct nouveau_channel *channel; 103 struct nouveau_channel *channel;
90 104
105 struct nouveau_vma vma;
91 bool mappable; 106 bool mappable;
92 bool no_vm; 107 bool no_vm;
93 108
@@ -96,7 +111,6 @@ struct nouveau_bo {
96 struct nouveau_tile_reg *tile; 111 struct nouveau_tile_reg *tile;
97 112
98 struct drm_gem_object *gem; 113 struct drm_gem_object *gem;
99 struct drm_file *cpu_filp;
100 int pin_refcnt; 114 int pin_refcnt;
101}; 115};
102 116
@@ -133,20 +147,28 @@ enum nouveau_flags {
133 147
134#define NVOBJ_ENGINE_SW 0 148#define NVOBJ_ENGINE_SW 0
135#define NVOBJ_ENGINE_GR 1 149#define NVOBJ_ENGINE_GR 1
136#define NVOBJ_ENGINE_DISPLAY 2 150#define NVOBJ_ENGINE_PPP 2
151#define NVOBJ_ENGINE_COPY 3
152#define NVOBJ_ENGINE_VP 4
153#define NVOBJ_ENGINE_CRYPT 5
154#define NVOBJ_ENGINE_BSP 6
155#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
137#define NVOBJ_ENGINE_INT 0xdeadbeef 156#define NVOBJ_ENGINE_INT 0xdeadbeef
138 157
158#define NVOBJ_FLAG_DONT_MAP (1 << 0)
139#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 159#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
140#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 160#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
161#define NVOBJ_FLAG_VM (1 << 3)
162
163#define NVOBJ_CINST_GLOBAL 0xdeadbeef
164
141struct nouveau_gpuobj { 165struct nouveau_gpuobj {
142 struct drm_device *dev; 166 struct drm_device *dev;
143 struct kref refcount; 167 struct kref refcount;
144 struct list_head list; 168 struct list_head list;
145 169
146 struct drm_mm_node *im_pramin; 170 void *node;
147 struct nouveau_bo *im_backing; 171 u32 *suspend;
148 uint32_t *im_backing_suspend;
149 int im_bound;
150 172
151 uint32_t flags; 173 uint32_t flags;
152 174
@@ -162,10 +184,29 @@ struct nouveau_gpuobj {
162 void *priv; 184 void *priv;
163}; 185};
164 186
187struct nouveau_page_flip_state {
188 struct list_head head;
189 struct drm_pending_vblank_event *event;
190 int crtc, bpp, pitch, x, y;
191 uint64_t offset;
192};
193
194enum nouveau_channel_mutex_class {
195 NOUVEAU_UCHANNEL_MUTEX,
196 NOUVEAU_KCHANNEL_MUTEX
197};
198
165struct nouveau_channel { 199struct nouveau_channel {
166 struct drm_device *dev; 200 struct drm_device *dev;
167 int id; 201 int id;
168 202
203 /* references to the channel data structure */
204 struct kref ref;
205 /* users of the hardware channel resources, the hardware
206 * context will be kicked off when it reaches zero. */
207 atomic_t users;
208 struct mutex mutex;
209
169 /* owner of this fifo */ 210 /* owner of this fifo */
170 struct drm_file *file_priv; 211 struct drm_file *file_priv;
171 /* mapping of the fifo itself */ 212 /* mapping of the fifo itself */
@@ -202,12 +243,12 @@ struct nouveau_channel {
202 /* PGRAPH context */ 243 /* PGRAPH context */
203 /* XXX may be merge 2 pointers as private data ??? */ 244 /* XXX may be merge 2 pointers as private data ??? */
204 struct nouveau_gpuobj *ramin_grctx; 245 struct nouveau_gpuobj *ramin_grctx;
246 struct nouveau_gpuobj *crypt_ctx;
205 void *pgraph_ctx; 247 void *pgraph_ctx;
206 248
207 /* NV50 VM */ 249 /* NV50 VM */
250 struct nouveau_vm *vm;
208 struct nouveau_gpuobj *vm_pd; 251 struct nouveau_gpuobj *vm_pd;
209 struct nouveau_gpuobj *vm_gart_pt;
210 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
211 252
212 /* Objects */ 253 /* Objects */
213 struct nouveau_gpuobj *ramin; /* Private instmem */ 254 struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +279,11 @@ struct nouveau_channel {
238 279
239 struct { 280 struct {
240 struct nouveau_gpuobj *vblsem; 281 struct nouveau_gpuobj *vblsem;
282 uint32_t vblsem_head;
241 uint32_t vblsem_offset; 283 uint32_t vblsem_offset;
242 uint32_t vblsem_rval; 284 uint32_t vblsem_rval;
243 struct list_head vbl_wait; 285 struct list_head vbl_wait;
286 struct list_head flip;
244 } nvsw; 287 } nvsw;
245 288
246 struct { 289 struct {
@@ -258,11 +301,11 @@ struct nouveau_instmem_engine {
258 int (*suspend)(struct drm_device *dev); 301 int (*suspend)(struct drm_device *dev);
259 void (*resume)(struct drm_device *dev); 302 void (*resume)(struct drm_device *dev);
260 303
261 int (*populate)(struct drm_device *, struct nouveau_gpuobj *, 304 int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
262 uint32_t *size); 305 void (*put)(struct nouveau_gpuobj *);
263 void (*clear)(struct drm_device *, struct nouveau_gpuobj *); 306 int (*map)(struct nouveau_gpuobj *);
264 int (*bind)(struct drm_device *, struct nouveau_gpuobj *); 307 void (*unmap)(struct nouveau_gpuobj *);
265 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); 308
266 void (*flush)(struct drm_device *); 309 void (*flush)(struct drm_device *);
267}; 310};
268 311
@@ -279,12 +322,17 @@ struct nouveau_timer_engine {
279 322
280struct nouveau_fb_engine { 323struct nouveau_fb_engine {
281 int num_tiles; 324 int num_tiles;
325 struct drm_mm tag_heap;
326 void *priv;
282 327
283 int (*init)(struct drm_device *dev); 328 int (*init)(struct drm_device *dev);
284 void (*takedown)(struct drm_device *dev); 329 void (*takedown)(struct drm_device *dev);
285 330
286 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 331 void (*init_tile_region)(struct drm_device *dev, int i,
287 uint32_t size, uint32_t pitch); 332 uint32_t addr, uint32_t size,
333 uint32_t pitch, uint32_t flags);
334 void (*set_tile_region)(struct drm_device *dev, int i);
335 void (*free_tile_region)(struct drm_device *dev, int i);
288}; 336};
289 337
290struct nouveau_fifo_engine { 338struct nouveau_fifo_engine {
@@ -310,21 +358,9 @@ struct nouveau_fifo_engine {
310 void (*tlb_flush)(struct drm_device *dev); 358 void (*tlb_flush)(struct drm_device *dev);
311}; 359};
312 360
313struct nouveau_pgraph_object_method {
314 int id;
315 int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
316 uint32_t data);
317};
318
319struct nouveau_pgraph_object_class {
320 int id;
321 bool software;
322 struct nouveau_pgraph_object_method *methods;
323};
324
325struct nouveau_pgraph_engine { 361struct nouveau_pgraph_engine {
326 struct nouveau_pgraph_object_class *grclass;
327 bool accel_blocked; 362 bool accel_blocked;
363 bool registered;
328 int grctx_size; 364 int grctx_size;
329 365
330 /* NV2x/NV3x context table (0x400780) */ 366 /* NV2x/NV3x context table (0x400780) */
@@ -342,8 +378,7 @@ struct nouveau_pgraph_engine {
342 int (*unload_context)(struct drm_device *); 378 int (*unload_context)(struct drm_device *);
343 void (*tlb_flush)(struct drm_device *dev); 379 void (*tlb_flush)(struct drm_device *dev);
344 380
345 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 381 void (*set_tile_region)(struct drm_device *dev, int i);
346 uint32_t size, uint32_t pitch);
347}; 382};
348 383
349struct nouveau_display_engine { 384struct nouveau_display_engine {
@@ -355,13 +390,19 @@ struct nouveau_display_engine {
355}; 390};
356 391
357struct nouveau_gpio_engine { 392struct nouveau_gpio_engine {
393 void *priv;
394
358 int (*init)(struct drm_device *); 395 int (*init)(struct drm_device *);
359 void (*takedown)(struct drm_device *); 396 void (*takedown)(struct drm_device *);
360 397
361 int (*get)(struct drm_device *, enum dcb_gpio_tag); 398 int (*get)(struct drm_device *, enum dcb_gpio_tag);
362 int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); 399 int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
363 400
364 void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); 401 int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
402 void (*)(void *, int), void *);
403 void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
404 void (*)(void *, int), void *);
405 bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
365}; 406};
366 407
367struct nouveau_pm_voltage_level { 408struct nouveau_pm_voltage_level {
@@ -437,6 +478,7 @@ struct nouveau_pm_engine {
437 struct nouveau_pm_level *cur; 478 struct nouveau_pm_level *cur;
438 479
439 struct device *hwmon; 480 struct device *hwmon;
481 struct notifier_block acpi_nb;
440 482
441 int (*clock_get)(struct drm_device *, u32 id); 483 int (*clock_get)(struct drm_device *, u32 id);
442 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, 484 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +491,25 @@ struct nouveau_pm_engine {
449 int (*temp_get)(struct drm_device *); 491 int (*temp_get)(struct drm_device *);
450}; 492};
451 493
494struct nouveau_crypt_engine {
495 bool registered;
496
497 int (*init)(struct drm_device *);
498 void (*takedown)(struct drm_device *);
499 int (*create_context)(struct nouveau_channel *);
500 void (*destroy_context)(struct nouveau_channel *);
501 void (*tlb_flush)(struct drm_device *dev);
502};
503
504struct nouveau_vram_engine {
505 int (*init)(struct drm_device *);
506 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
507 u32 type, struct nouveau_vram **);
508 void (*put)(struct drm_device *, struct nouveau_vram **);
509
510 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
511};
512
452struct nouveau_engine { 513struct nouveau_engine {
453 struct nouveau_instmem_engine instmem; 514 struct nouveau_instmem_engine instmem;
454 struct nouveau_mc_engine mc; 515 struct nouveau_mc_engine mc;
@@ -459,6 +520,8 @@ struct nouveau_engine {
459 struct nouveau_display_engine display; 520 struct nouveau_display_engine display;
460 struct nouveau_gpio_engine gpio; 521 struct nouveau_gpio_engine gpio;
461 struct nouveau_pm_engine pm; 522 struct nouveau_pm_engine pm;
523 struct nouveau_crypt_engine crypt;
524 struct nouveau_vram_engine vram;
462}; 525};
463 526
464struct nouveau_pll_vals { 527struct nouveau_pll_vals {
@@ -577,18 +640,15 @@ struct drm_nouveau_private {
577 bool ramin_available; 640 bool ramin_available;
578 struct drm_mm ramin_heap; 641 struct drm_mm ramin_heap;
579 struct list_head gpuobj_list; 642 struct list_head gpuobj_list;
643 struct list_head classes;
580 644
581 struct nouveau_bo *vga_ram; 645 struct nouveau_bo *vga_ram;
582 646
647 /* interrupt handling */
648 void (*irq_handler[32])(struct drm_device *);
649 bool msi_enabled;
583 struct workqueue_struct *wq; 650 struct workqueue_struct *wq;
584 struct work_struct irq_work; 651 struct work_struct irq_work;
585 struct work_struct hpd_work;
586
587 struct {
588 spinlock_t lock;
589 uint32_t hpd0_bits;
590 uint32_t hpd1_bits;
591 } hpd_state;
592 652
593 struct list_head vbl_waiting; 653 struct list_head vbl_waiting;
594 654
@@ -605,8 +665,10 @@ struct drm_nouveau_private {
605 struct nouveau_bo *bo; 665 struct nouveau_bo *bo;
606 } fence; 666 } fence;
607 667
608 int fifo_alloc_count; 668 struct {
609 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 669 spinlock_t lock;
670 struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
671 } channels;
610 672
611 struct nouveau_engine engine; 673 struct nouveau_engine engine;
612 struct nouveau_channel *channel; 674 struct nouveau_channel *channel;
@@ -632,12 +694,14 @@ struct drm_nouveau_private {
632 uint64_t aper_free; 694 uint64_t aper_free;
633 695
634 struct nouveau_gpuobj *sg_ctxdma; 696 struct nouveau_gpuobj *sg_ctxdma;
635 struct page *sg_dummy_page; 697 struct nouveau_vma vma;
636 dma_addr_t sg_dummy_bus;
637 } gart_info; 698 } gart_info;
638 699
639 /* nv10-nv40 tiling regions */ 700 /* nv10-nv40 tiling regions */
640 struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; 701 struct {
702 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
703 spinlock_t lock;
704 } tile;
641 705
642 /* VRAM/fb configuration */ 706 /* VRAM/fb configuration */
643 uint64_t vram_size; 707 uint64_t vram_size;
@@ -650,14 +714,12 @@ struct drm_nouveau_private {
650 uint64_t fb_aper_free; 714 uint64_t fb_aper_free;
651 int fb_mtrr; 715 int fb_mtrr;
652 716
717 /* BAR control (NV50-) */
718 struct nouveau_vm *bar1_vm;
719 struct nouveau_vm *bar3_vm;
720
653 /* G8x/G9x virtual address space */ 721 /* G8x/G9x virtual address space */
654 uint64_t vm_gart_base; 722 struct nouveau_vm *chan_vm;
655 uint64_t vm_gart_size;
656 uint64_t vm_vram_base;
657 uint64_t vm_vram_size;
658 uint64_t vm_end;
659 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
660 int vm_vram_pt_nr;
661 723
662 struct nvbios vbios; 724 struct nvbios vbios;
663 725
@@ -674,6 +736,7 @@ struct drm_nouveau_private {
674 struct backlight_device *backlight; 736 struct backlight_device *backlight;
675 737
676 struct nouveau_channel *evo; 738 struct nouveau_channel *evo;
739 u32 evo_alloc;
677 struct { 740 struct {
678 struct dcb_entry *dcb; 741 struct dcb_entry *dcb;
679 u16 script; 742 u16 script;
@@ -719,16 +782,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
719 return 0; 782 return 0;
720} 783}
721 784
722#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
723 struct drm_nouveau_private *nv = dev->dev_private; \
724 if (!nouveau_channel_owner(dev, (cl), (id))) { \
725 NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
726 DRM_CURRENTPID, (id)); \
727 return -EPERM; \
728 } \
729 (ch) = nv->fifos[(id)]; \
730} while (0)
731
732/* nouveau_drv.c */ 785/* nouveau_drv.c */
733extern int nouveau_agpmode; 786extern int nouveau_agpmode;
734extern int nouveau_duallink; 787extern int nouveau_duallink;
@@ -748,6 +801,7 @@ extern int nouveau_force_post;
748extern int nouveau_override_conntype; 801extern int nouveau_override_conntype;
749extern char *nouveau_perflvl; 802extern char *nouveau_perflvl;
750extern int nouveau_perflvl_wr; 803extern int nouveau_perflvl_wr;
804extern int nouveau_msi;
751 805
752extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 806extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
753extern int nouveau_pci_resume(struct pci_dev *pdev); 807extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +816,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
762 struct drm_file *); 816 struct drm_file *);
763extern int nouveau_ioctl_setparam(struct drm_device *, void *data, 817extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
764 struct drm_file *); 818 struct drm_file *);
765extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, 819extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
766 uint32_t reg, uint32_t mask, uint32_t val); 820 uint32_t reg, uint32_t mask, uint32_t val);
821extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
822 uint32_t reg, uint32_t mask, uint32_t val);
767extern bool nouveau_wait_for_idle(struct drm_device *); 823extern bool nouveau_wait_for_idle(struct drm_device *);
768extern int nouveau_card_init(struct drm_device *); 824extern int nouveau_card_init(struct drm_device *);
769 825
@@ -775,18 +831,15 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
775extern int nouveau_mem_init_agp(struct drm_device *); 831extern int nouveau_mem_init_agp(struct drm_device *);
776extern int nouveau_mem_reset_agp(struct drm_device *); 832extern int nouveau_mem_reset_agp(struct drm_device *);
777extern void nouveau_mem_close(struct drm_device *); 833extern void nouveau_mem_close(struct drm_device *);
778extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, 834extern int nouveau_mem_detect(struct drm_device *);
779 uint32_t addr, 835extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
780 uint32_t size, 836extern struct nouveau_tile_reg *nv10_mem_set_tiling(
781 uint32_t pitch); 837 struct drm_device *dev, uint32_t addr, uint32_t size,
782extern void nv10_mem_expire_tiling(struct drm_device *dev, 838 uint32_t pitch, uint32_t flags);
783 struct nouveau_tile_reg *tile, 839extern void nv10_mem_put_tile_region(struct drm_device *dev,
784 struct nouveau_fence *fence); 840 struct nouveau_tile_reg *tile,
785extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 841 struct nouveau_fence *fence);
786 uint32_t size, uint32_t flags, 842extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
787 uint64_t phys);
788extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
789 uint32_t size);
790 843
791/* nouveau_notifier.c */ 844/* nouveau_notifier.c */
792extern int nouveau_notifier_init_channel(struct nouveau_channel *); 845extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +856,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
803extern struct drm_ioctl_desc nouveau_ioctls[]; 856extern struct drm_ioctl_desc nouveau_ioctls[];
804extern int nouveau_max_ioctl; 857extern int nouveau_max_ioctl;
805extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); 858extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
806extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
807 int channel);
808extern int nouveau_channel_alloc(struct drm_device *dev, 859extern int nouveau_channel_alloc(struct drm_device *dev,
809 struct nouveau_channel **chan, 860 struct nouveau_channel **chan,
810 struct drm_file *file_priv, 861 struct drm_file *file_priv,
811 uint32_t fb_ctxdma, uint32_t tt_ctxdma); 862 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
812extern void nouveau_channel_free(struct nouveau_channel *); 863extern struct nouveau_channel *
864nouveau_channel_get_unlocked(struct nouveau_channel *);
865extern struct nouveau_channel *
866nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
867extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
868extern void nouveau_channel_put(struct nouveau_channel **);
869extern void nouveau_channel_ref(struct nouveau_channel *chan,
870 struct nouveau_channel **pchan);
871extern void nouveau_channel_idle(struct nouveau_channel *chan);
813 872
814/* nouveau_object.c */ 873/* nouveau_object.c */
874#define NVOBJ_CLASS(d,c,e) do { \
875 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
876 if (ret) \
877 return ret; \
878} while(0)
879
880#define NVOBJ_MTHD(d,c,m,e) do { \
881 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
882 if (ret) \
883 return ret; \
884} while(0)
885
815extern int nouveau_gpuobj_early_init(struct drm_device *); 886extern int nouveau_gpuobj_early_init(struct drm_device *);
816extern int nouveau_gpuobj_init(struct drm_device *); 887extern int nouveau_gpuobj_init(struct drm_device *);
817extern void nouveau_gpuobj_takedown(struct drm_device *); 888extern void nouveau_gpuobj_takedown(struct drm_device *);
818extern int nouveau_gpuobj_suspend(struct drm_device *dev); 889extern int nouveau_gpuobj_suspend(struct drm_device *dev);
819extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
820extern void nouveau_gpuobj_resume(struct drm_device *dev); 890extern void nouveau_gpuobj_resume(struct drm_device *dev);
891extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
892extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
893 int (*exec)(struct nouveau_channel *,
894 u32 class, u32 mthd, u32 data));
895extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
896extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
821extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, 897extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
822 uint32_t vram_h, uint32_t tt_h); 898 uint32_t vram_h, uint32_t tt_h);
823extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); 899extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +908,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
832extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, 908extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
833 uint64_t offset, uint64_t size, int access, 909 uint64_t offset, uint64_t size, int access,
834 int target, struct nouveau_gpuobj **); 910 int target, struct nouveau_gpuobj **);
835extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, 911extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
836 uint64_t offset, uint64_t size, 912extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
837 int access, struct nouveau_gpuobj **, 913 u64 size, int target, int access, u32 type,
838 uint32_t *o_ret); 914 u32 comp, struct nouveau_gpuobj **pobj);
839extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, 915extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
840 struct nouveau_gpuobj **); 916 int class, u64 base, u64 size, int target,
841extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, 917 int access, u32 type, u32 comp);
842 struct nouveau_gpuobj **);
843extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, 918extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
844 struct drm_file *); 919 struct drm_file *);
845extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, 920extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
846 struct drm_file *); 921 struct drm_file *);
847 922
848/* nouveau_irq.c */ 923/* nouveau_irq.c */
924extern int nouveau_irq_init(struct drm_device *);
925extern void nouveau_irq_fini(struct drm_device *);
849extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); 926extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
927extern void nouveau_irq_register(struct drm_device *, int status_bit,
928 void (*)(struct drm_device *));
929extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
850extern void nouveau_irq_preinstall(struct drm_device *); 930extern void nouveau_irq_preinstall(struct drm_device *);
851extern int nouveau_irq_postinstall(struct drm_device *); 931extern int nouveau_irq_postinstall(struct drm_device *);
852extern void nouveau_irq_uninstall(struct drm_device *); 932extern void nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +934,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
854/* nouveau_sgdma.c */ 934/* nouveau_sgdma.c */
855extern int nouveau_sgdma_init(struct drm_device *); 935extern int nouveau_sgdma_init(struct drm_device *);
856extern void nouveau_sgdma_takedown(struct drm_device *); 936extern void nouveau_sgdma_takedown(struct drm_device *);
857extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, 937extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
858 uint32_t *page); 938 uint32_t offset);
859extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); 939extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
860 940
861/* nouveau_debugfs.c */ 941/* nouveau_debugfs.c */
@@ -966,18 +1046,25 @@ extern void nv04_fb_takedown(struct drm_device *);
966/* nv10_fb.c */ 1046/* nv10_fb.c */
967extern int nv10_fb_init(struct drm_device *); 1047extern int nv10_fb_init(struct drm_device *);
968extern void nv10_fb_takedown(struct drm_device *); 1048extern void nv10_fb_takedown(struct drm_device *);
969extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1049extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
970 uint32_t, uint32_t); 1050 uint32_t addr, uint32_t size,
1051 uint32_t pitch, uint32_t flags);
1052extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
1053extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
971 1054
972/* nv30_fb.c */ 1055/* nv30_fb.c */
973extern int nv30_fb_init(struct drm_device *); 1056extern int nv30_fb_init(struct drm_device *);
974extern void nv30_fb_takedown(struct drm_device *); 1057extern void nv30_fb_takedown(struct drm_device *);
1058extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
1059 uint32_t addr, uint32_t size,
1060 uint32_t pitch, uint32_t flags);
1061extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
975 1062
976/* nv40_fb.c */ 1063/* nv40_fb.c */
977extern int nv40_fb_init(struct drm_device *); 1064extern int nv40_fb_init(struct drm_device *);
978extern void nv40_fb_takedown(struct drm_device *); 1065extern void nv40_fb_takedown(struct drm_device *);
979extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1066extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
980 uint32_t, uint32_t); 1067
981/* nv50_fb.c */ 1068/* nv50_fb.c */
982extern int nv50_fb_init(struct drm_device *); 1069extern int nv50_fb_init(struct drm_device *);
983extern void nv50_fb_takedown(struct drm_device *); 1070extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1076,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
989 1076
990/* nv04_fifo.c */ 1077/* nv04_fifo.c */
991extern int nv04_fifo_init(struct drm_device *); 1078extern int nv04_fifo_init(struct drm_device *);
1079extern void nv04_fifo_fini(struct drm_device *);
992extern void nv04_fifo_disable(struct drm_device *); 1080extern void nv04_fifo_disable(struct drm_device *);
993extern void nv04_fifo_enable(struct drm_device *); 1081extern void nv04_fifo_enable(struct drm_device *);
994extern bool nv04_fifo_reassign(struct drm_device *, bool); 1082extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1086,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
998extern void nv04_fifo_destroy_context(struct nouveau_channel *); 1086extern void nv04_fifo_destroy_context(struct nouveau_channel *);
999extern int nv04_fifo_load_context(struct nouveau_channel *); 1087extern int nv04_fifo_load_context(struct nouveau_channel *);
1000extern int nv04_fifo_unload_context(struct drm_device *); 1088extern int nv04_fifo_unload_context(struct drm_device *);
1089extern void nv04_fifo_isr(struct drm_device *);
1001 1090
1002/* nv10_fifo.c */ 1091/* nv10_fifo.c */
1003extern int nv10_fifo_init(struct drm_device *); 1092extern int nv10_fifo_init(struct drm_device *);
1004extern int nv10_fifo_channel_id(struct drm_device *); 1093extern int nv10_fifo_channel_id(struct drm_device *);
1005extern int nv10_fifo_create_context(struct nouveau_channel *); 1094extern int nv10_fifo_create_context(struct nouveau_channel *);
1006extern void nv10_fifo_destroy_context(struct nouveau_channel *);
1007extern int nv10_fifo_load_context(struct nouveau_channel *); 1095extern int nv10_fifo_load_context(struct nouveau_channel *);
1008extern int nv10_fifo_unload_context(struct drm_device *); 1096extern int nv10_fifo_unload_context(struct drm_device *);
1009 1097
1010/* nv40_fifo.c */ 1098/* nv40_fifo.c */
1011extern int nv40_fifo_init(struct drm_device *); 1099extern int nv40_fifo_init(struct drm_device *);
1012extern int nv40_fifo_create_context(struct nouveau_channel *); 1100extern int nv40_fifo_create_context(struct nouveau_channel *);
1013extern void nv40_fifo_destroy_context(struct nouveau_channel *);
1014extern int nv40_fifo_load_context(struct nouveau_channel *); 1101extern int nv40_fifo_load_context(struct nouveau_channel *);
1015extern int nv40_fifo_unload_context(struct drm_device *); 1102extern int nv40_fifo_unload_context(struct drm_device *);
1016 1103
@@ -1038,7 +1125,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
1038extern int nvc0_fifo_unload_context(struct drm_device *); 1125extern int nvc0_fifo_unload_context(struct drm_device *);
1039 1126
1040/* nv04_graph.c */ 1127/* nv04_graph.c */
1041extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
1042extern int nv04_graph_init(struct drm_device *); 1128extern int nv04_graph_init(struct drm_device *);
1043extern void nv04_graph_takedown(struct drm_device *); 1129extern void nv04_graph_takedown(struct drm_device *);
1044extern void nv04_graph_fifo_access(struct drm_device *, bool); 1130extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1133,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
1047extern void nv04_graph_destroy_context(struct nouveau_channel *); 1133extern void nv04_graph_destroy_context(struct nouveau_channel *);
1048extern int nv04_graph_load_context(struct nouveau_channel *); 1134extern int nv04_graph_load_context(struct nouveau_channel *);
1049extern int nv04_graph_unload_context(struct drm_device *); 1135extern int nv04_graph_unload_context(struct drm_device *);
1050extern void nv04_graph_context_switch(struct drm_device *); 1136extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1137 u32 class, u32 mthd, u32 data);
1138extern struct nouveau_bitfield nv04_graph_nsource[];
1051 1139
1052/* nv10_graph.c */ 1140/* nv10_graph.c */
1053extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
1054extern int nv10_graph_init(struct drm_device *); 1141extern int nv10_graph_init(struct drm_device *);
1055extern void nv10_graph_takedown(struct drm_device *); 1142extern void nv10_graph_takedown(struct drm_device *);
1056extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); 1143extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1145,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
1058extern void nv10_graph_destroy_context(struct nouveau_channel *); 1145extern void nv10_graph_destroy_context(struct nouveau_channel *);
1059extern int nv10_graph_load_context(struct nouveau_channel *); 1146extern int nv10_graph_load_context(struct nouveau_channel *);
1060extern int nv10_graph_unload_context(struct drm_device *); 1147extern int nv10_graph_unload_context(struct drm_device *);
1061extern void nv10_graph_context_switch(struct drm_device *); 1148extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
1062extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1149extern struct nouveau_bitfield nv10_graph_intr[];
1063 uint32_t, uint32_t); 1150extern struct nouveau_bitfield nv10_graph_nstatus[];
1064 1151
1065/* nv20_graph.c */ 1152/* nv20_graph.c */
1066extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
1067extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
1068extern int nv20_graph_create_context(struct nouveau_channel *); 1153extern int nv20_graph_create_context(struct nouveau_channel *);
1069extern void nv20_graph_destroy_context(struct nouveau_channel *); 1154extern void nv20_graph_destroy_context(struct nouveau_channel *);
1070extern int nv20_graph_load_context(struct nouveau_channel *); 1155extern int nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1157,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
1072extern int nv20_graph_init(struct drm_device *); 1157extern int nv20_graph_init(struct drm_device *);
1073extern void nv20_graph_takedown(struct drm_device *); 1158extern void nv20_graph_takedown(struct drm_device *);
1074extern int nv30_graph_init(struct drm_device *); 1159extern int nv30_graph_init(struct drm_device *);
1075extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1160extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
1076 uint32_t, uint32_t);
1077 1161
1078/* nv40_graph.c */ 1162/* nv40_graph.c */
1079extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
1080extern int nv40_graph_init(struct drm_device *); 1163extern int nv40_graph_init(struct drm_device *);
1081extern void nv40_graph_takedown(struct drm_device *); 1164extern void nv40_graph_takedown(struct drm_device *);
1082extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); 1165extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1168,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
1085extern int nv40_graph_load_context(struct nouveau_channel *); 1168extern int nv40_graph_load_context(struct nouveau_channel *);
1086extern int nv40_graph_unload_context(struct drm_device *); 1169extern int nv40_graph_unload_context(struct drm_device *);
1087extern void nv40_grctx_init(struct nouveau_grctx *); 1170extern void nv40_grctx_init(struct nouveau_grctx *);
1088extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1171extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
1089 uint32_t, uint32_t);
1090 1172
1091/* nv50_graph.c */ 1173/* nv50_graph.c */
1092extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
1093extern int nv50_graph_init(struct drm_device *); 1174extern int nv50_graph_init(struct drm_device *);
1094extern void nv50_graph_takedown(struct drm_device *); 1175extern void nv50_graph_takedown(struct drm_device *);
1095extern void nv50_graph_fifo_access(struct drm_device *, bool); 1176extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,7 +1179,6 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
1098extern void nv50_graph_destroy_context(struct nouveau_channel *); 1179extern void nv50_graph_destroy_context(struct nouveau_channel *);
1099extern int nv50_graph_load_context(struct nouveau_channel *); 1180extern int nv50_graph_load_context(struct nouveau_channel *);
1100extern int nv50_graph_unload_context(struct drm_device *); 1181extern int nv50_graph_unload_context(struct drm_device *);
1101extern void nv50_graph_context_switch(struct drm_device *);
1102extern int nv50_grctx_init(struct nouveau_grctx *); 1182extern int nv50_grctx_init(struct nouveau_grctx *);
1103extern void nv50_graph_tlb_flush(struct drm_device *dev); 1183extern void nv50_graph_tlb_flush(struct drm_device *dev);
1104extern void nv86_graph_tlb_flush(struct drm_device *dev); 1184extern void nv86_graph_tlb_flush(struct drm_device *dev);
@@ -1113,16 +1193,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
1113extern int nvc0_graph_load_context(struct nouveau_channel *); 1193extern int nvc0_graph_load_context(struct nouveau_channel *);
1114extern int nvc0_graph_unload_context(struct drm_device *); 1194extern int nvc0_graph_unload_context(struct drm_device *);
1115 1195
1196/* nv84_crypt.c */
1197extern int nv84_crypt_init(struct drm_device *dev);
1198extern void nv84_crypt_fini(struct drm_device *dev);
1199extern int nv84_crypt_create_context(struct nouveau_channel *);
1200extern void nv84_crypt_destroy_context(struct nouveau_channel *);
1201extern void nv84_crypt_tlb_flush(struct drm_device *dev);
1202
1116/* nv04_instmem.c */ 1203/* nv04_instmem.c */
1117extern int nv04_instmem_init(struct drm_device *); 1204extern int nv04_instmem_init(struct drm_device *);
1118extern void nv04_instmem_takedown(struct drm_device *); 1205extern void nv04_instmem_takedown(struct drm_device *);
1119extern int nv04_instmem_suspend(struct drm_device *); 1206extern int nv04_instmem_suspend(struct drm_device *);
1120extern void nv04_instmem_resume(struct drm_device *); 1207extern void nv04_instmem_resume(struct drm_device *);
1121extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, 1208extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
1122 uint32_t *size); 1209extern void nv04_instmem_put(struct nouveau_gpuobj *);
1123extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1210extern int nv04_instmem_map(struct nouveau_gpuobj *);
1124extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1211extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
1125extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1126extern void nv04_instmem_flush(struct drm_device *); 1212extern void nv04_instmem_flush(struct drm_device *);
1127 1213
1128/* nv50_instmem.c */ 1214/* nv50_instmem.c */
@@ -1130,25 +1216,22 @@ extern int nv50_instmem_init(struct drm_device *);
1130extern void nv50_instmem_takedown(struct drm_device *); 1216extern void nv50_instmem_takedown(struct drm_device *);
1131extern int nv50_instmem_suspend(struct drm_device *); 1217extern int nv50_instmem_suspend(struct drm_device *);
1132extern void nv50_instmem_resume(struct drm_device *); 1218extern void nv50_instmem_resume(struct drm_device *);
1133extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, 1219extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
1134 uint32_t *size); 1220extern void nv50_instmem_put(struct nouveau_gpuobj *);
1135extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1221extern int nv50_instmem_map(struct nouveau_gpuobj *);
1136extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1222extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
1137extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1138extern void nv50_instmem_flush(struct drm_device *); 1223extern void nv50_instmem_flush(struct drm_device *);
1139extern void nv84_instmem_flush(struct drm_device *); 1224extern void nv84_instmem_flush(struct drm_device *);
1140extern void nv50_vm_flush(struct drm_device *, int engine);
1141 1225
1142/* nvc0_instmem.c */ 1226/* nvc0_instmem.c */
1143extern int nvc0_instmem_init(struct drm_device *); 1227extern int nvc0_instmem_init(struct drm_device *);
1144extern void nvc0_instmem_takedown(struct drm_device *); 1228extern void nvc0_instmem_takedown(struct drm_device *);
1145extern int nvc0_instmem_suspend(struct drm_device *); 1229extern int nvc0_instmem_suspend(struct drm_device *);
1146extern void nvc0_instmem_resume(struct drm_device *); 1230extern void nvc0_instmem_resume(struct drm_device *);
1147extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, 1231extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
1148 uint32_t *size); 1232extern void nvc0_instmem_put(struct nouveau_gpuobj *);
1149extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1233extern int nvc0_instmem_map(struct nouveau_gpuobj *);
1150extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1234extern void nvc0_instmem_unmap(struct nouveau_gpuobj *);
1151extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1152extern void nvc0_instmem_flush(struct drm_device *); 1235extern void nvc0_instmem_flush(struct drm_device *);
1153 1236
1154/* nv04_mc.c */ 1237/* nv04_mc.c */
@@ -1219,6 +1302,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1219extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1302extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1220extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1303extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1221extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1304extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1305extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1306extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1307 bool no_wait_reserve, bool no_wait_gpu);
1222 1308
1223/* nouveau_fence.c */ 1309/* nouveau_fence.c */
1224struct nouveau_fence; 1310struct nouveau_fence;
@@ -1234,12 +1320,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
1234 void (*work)(void *priv, bool signalled), 1320 void (*work)(void *priv, bool signalled),
1235 void *priv); 1321 void *priv);
1236struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); 1322struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1237extern bool nouveau_fence_signalled(void *obj, void *arg); 1323
1238extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); 1324extern bool __nouveau_fence_signalled(void *obj, void *arg);
1325extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1326extern int __nouveau_fence_flush(void *obj, void *arg);
1327extern void __nouveau_fence_unref(void **obj);
1328extern void *__nouveau_fence_ref(void *obj);
1329
1330static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
1331{
1332 return __nouveau_fence_signalled(obj, NULL);
1333}
1334static inline int
1335nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
1336{
1337 return __nouveau_fence_wait(obj, NULL, lazy, intr);
1338}
1239extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 1339extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
1240extern int nouveau_fence_flush(void *obj, void *arg); 1340static inline int nouveau_fence_flush(struct nouveau_fence *obj)
1241extern void nouveau_fence_unref(void **obj); 1341{
1242extern void *nouveau_fence_ref(void *obj); 1342 return __nouveau_fence_flush(obj, NULL);
1343}
1344static inline void nouveau_fence_unref(struct nouveau_fence **obj)
1345{
1346 __nouveau_fence_unref((void **)obj);
1347}
1348static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1349{
1350 return __nouveau_fence_ref(obj);
1351}
1243 1352
1244/* nouveau_gem.c */ 1353/* nouveau_gem.c */
1245extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, 1354extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1368,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1259extern int nouveau_gem_ioctl_info(struct drm_device *, void *, 1368extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1260 struct drm_file *); 1369 struct drm_file *);
1261 1370
1371/* nouveau_display.c */
1372int nouveau_vblank_enable(struct drm_device *dev, int crtc);
1373void nouveau_vblank_disable(struct drm_device *dev, int crtc);
1374int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1375 struct drm_pending_vblank_event *event);
1376int nouveau_finish_page_flip(struct nouveau_channel *,
1377 struct nouveau_page_flip_state *);
1378
1262/* nv10_gpio.c */ 1379/* nv10_gpio.c */
1263int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1380int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1264int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1381int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1265 1382
1266/* nv50_gpio.c */ 1383/* nv50_gpio.c */
1267int nv50_gpio_init(struct drm_device *dev); 1384int nv50_gpio_init(struct drm_device *dev);
1385void nv50_gpio_fini(struct drm_device *dev);
1268int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1386int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1269int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1387int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1270void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); 1388int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
1389 void (*)(void *, int), void *);
1390void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
1391 void (*)(void *, int), void *);
1392bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
1271 1393
1272/* nv50_calc. */ 1394/* nv50_calc. */
1273int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1395int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1456,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1334} 1456}
1335 1457
1336#define nv_wait(dev, reg, mask, val) \ 1458#define nv_wait(dev, reg, mask, val) \
1337 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) 1459 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
1460#define nv_wait_ne(dev, reg, mask, val) \
1461 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
1338 1462
1339/* PRAMIN access */ 1463/* PRAMIN access */
1340static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) 1464static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1571,23 @@ nv_match_device(struct drm_device *dev, unsigned device,
1447 dev->pdev->subsystem_device == sub_device; 1571 dev->pdev->subsystem_device == sub_device;
1448} 1572}
1449 1573
1574/* memory type/access flags, do not match hardware values */
1575#define NV_MEM_ACCESS_RO 1
1576#define NV_MEM_ACCESS_WO 2
1577#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
1578#define NV_MEM_ACCESS_SYS 4
1579#define NV_MEM_ACCESS_VM 8
1580
1581#define NV_MEM_TARGET_VRAM 0
1582#define NV_MEM_TARGET_PCI 1
1583#define NV_MEM_TARGET_PCI_NOSNOOP 2
1584#define NV_MEM_TARGET_VM 3
1585#define NV_MEM_TARGET_GART 4
1586
1587#define NV_MEM_TYPE_VM 0x7f
1588#define NV_MEM_COMP_VM 0x03
1589
1590/* NV_SW object class */
1450#define NV_SW 0x0000506e 1591#define NV_SW 0x0000506e
1451#define NV_SW_DMA_SEMAPHORE 0x00000060 1592#define NV_SW_DMA_SEMAPHORE 0x00000060
1452#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1593#define NV_SW_SEMAPHORE_OFFSET 0x00000064
@@ -1457,5 +1598,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
1457#define NV_SW_VBLSEM_OFFSET 0x00000400 1598#define NV_SW_VBLSEM_OFFSET 0x00000400
1458#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 1599#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1459#define NV_SW_VBLSEM_RELEASE 0x00000408 1600#define NV_SW_VBLSEM_RELEASE 0x00000408
1601#define NV_SW_PAGE_FLIP 0x00000500
1460 1602
1461#endif /* __NOUVEAU_DRV_H__ */ 1603#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1fd4845..ea861c915149 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,96 @@
49#include "nouveau_fbcon.h" 49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h" 50#include "nouveau_dma.h"
51 51
52static void
53nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{
55 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 int ret;
59
60 if (info->state != FBINFO_STATE_RUNNING)
61 return;
62
63 ret = -ENODEV;
64 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
65 mutex_trylock(&dev_priv->channel->mutex)) {
66 if (dev_priv->card_type < NV_50)
67 ret = nv04_fbcon_fillrect(info, rect);
68 else
69 if (dev_priv->card_type < NV_C0)
70 ret = nv50_fbcon_fillrect(info, rect);
71 mutex_unlock(&dev_priv->channel->mutex);
72 }
73
74 if (ret == 0)
75 return;
76
77 if (ret != -ENODEV)
78 nouveau_fbcon_gpu_lockup(info);
79 cfb_fillrect(info, rect);
80}
81
82static void
83nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
84{
85 struct nouveau_fbdev *nfbdev = info->par;
86 struct drm_device *dev = nfbdev->dev;
87 struct drm_nouveau_private *dev_priv = dev->dev_private;
88 int ret;
89
90 if (info->state != FBINFO_STATE_RUNNING)
91 return;
92
93 ret = -ENODEV;
94 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
95 mutex_trylock(&dev_priv->channel->mutex)) {
96 if (dev_priv->card_type < NV_50)
97 ret = nv04_fbcon_copyarea(info, image);
98 else
99 if (dev_priv->card_type < NV_C0)
100 ret = nv50_fbcon_copyarea(info, image);
101 mutex_unlock(&dev_priv->channel->mutex);
102 }
103
104 if (ret == 0)
105 return;
106
107 if (ret != -ENODEV)
108 nouveau_fbcon_gpu_lockup(info);
109 cfb_copyarea(info, image);
110}
111
112static void
113nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
114{
115 struct nouveau_fbdev *nfbdev = info->par;
116 struct drm_device *dev = nfbdev->dev;
117 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 int ret;
119
120 if (info->state != FBINFO_STATE_RUNNING)
121 return;
122
123 ret = -ENODEV;
124 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
125 mutex_trylock(&dev_priv->channel->mutex)) {
126 if (dev_priv->card_type < NV_50)
127 ret = nv04_fbcon_imageblit(info, image);
128 else
129 if (dev_priv->card_type < NV_C0)
130 ret = nv50_fbcon_imageblit(info, image);
131 mutex_unlock(&dev_priv->channel->mutex);
132 }
133
134 if (ret == 0)
135 return;
136
137 if (ret != -ENODEV)
138 nouveau_fbcon_gpu_lockup(info);
139 cfb_imageblit(info, image);
140}
141
52static int 142static int
53nouveau_fbcon_sync(struct fb_info *info) 143nouveau_fbcon_sync(struct fb_info *info)
54{ 144{
@@ -58,12 +148,17 @@ nouveau_fbcon_sync(struct fb_info *info)
58 struct nouveau_channel *chan = dev_priv->channel; 148 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i; 149 int ret, i;
60 150
61 if (!chan || !chan->accel_done || 151 if (!chan || !chan->accel_done || in_interrupt() ||
62 info->state != FBINFO_STATE_RUNNING || 152 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED) 153 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0; 154 return 0;
65 155
66 if (RING_SPACE(chan, 4)) { 156 if (!mutex_trylock(&chan->mutex))
157 return 0;
158
159 ret = RING_SPACE(chan, 4);
160 if (ret) {
161 mutex_unlock(&chan->mutex);
67 nouveau_fbcon_gpu_lockup(info); 162 nouveau_fbcon_gpu_lockup(info);
68 return 0; 163 return 0;
69 } 164 }
@@ -74,6 +169,7 @@ nouveau_fbcon_sync(struct fb_info *info)
74 OUT_RING(chan, 0); 169 OUT_RING(chan, 0);
75 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 170 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
76 FIRE_RING(chan); 171 FIRE_RING(chan);
172 mutex_unlock(&chan->mutex);
77 173
78 ret = -EBUSY; 174 ret = -EBUSY;
79 for (i = 0; i < 100000; i++) { 175 for (i = 0; i < 100000; i++) {
@@ -97,9 +193,9 @@ static struct fb_ops nouveau_fbcon_ops = {
97 .owner = THIS_MODULE, 193 .owner = THIS_MODULE,
98 .fb_check_var = drm_fb_helper_check_var, 194 .fb_check_var = drm_fb_helper_check_var,
99 .fb_set_par = drm_fb_helper_set_par, 195 .fb_set_par = drm_fb_helper_set_par,
100 .fb_fillrect = cfb_fillrect, 196 .fb_fillrect = nouveau_fbcon_fillrect,
101 .fb_copyarea = cfb_copyarea, 197 .fb_copyarea = nouveau_fbcon_copyarea,
102 .fb_imageblit = cfb_imageblit, 198 .fb_imageblit = nouveau_fbcon_imageblit,
103 .fb_sync = nouveau_fbcon_sync, 199 .fb_sync = nouveau_fbcon_sync,
104 .fb_pan_display = drm_fb_helper_pan_display, 200 .fb_pan_display = drm_fb_helper_pan_display,
105 .fb_blank = drm_fb_helper_blank, 201 .fb_blank = drm_fb_helper_blank,
@@ -108,29 +204,13 @@ static struct fb_ops nouveau_fbcon_ops = {
108 .fb_debug_leave = drm_fb_helper_debug_leave, 204 .fb_debug_leave = drm_fb_helper_debug_leave,
109}; 205};
110 206
111static struct fb_ops nv04_fbcon_ops = { 207static struct fb_ops nouveau_fbcon_sw_ops = {
112 .owner = THIS_MODULE, 208 .owner = THIS_MODULE,
113 .fb_check_var = drm_fb_helper_check_var, 209 .fb_check_var = drm_fb_helper_check_var,
114 .fb_set_par = drm_fb_helper_set_par, 210 .fb_set_par = drm_fb_helper_set_par,
115 .fb_fillrect = nv04_fbcon_fillrect, 211 .fb_fillrect = cfb_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea, 212 .fb_copyarea = cfb_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit, 213 .fb_imageblit = cfb_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122 .fb_debug_enter = drm_fb_helper_debug_enter,
123 .fb_debug_leave = drm_fb_helper_debug_leave,
124};
125
126static struct fb_ops nv50_fbcon_ops = {
127 .owner = THIS_MODULE,
128 .fb_check_var = drm_fb_helper_check_var,
129 .fb_set_par = drm_fb_helper_set_par,
130 .fb_fillrect = nv50_fbcon_fillrect,
131 .fb_copyarea = nv50_fbcon_copyarea,
132 .fb_imageblit = nv50_fbcon_imageblit,
133 .fb_sync = nouveau_fbcon_sync,
134 .fb_pan_display = drm_fb_helper_pan_display, 214 .fb_pan_display = drm_fb_helper_pan_display,
135 .fb_blank = drm_fb_helper_blank, 215 .fb_blank = drm_fb_helper_blank,
136 .fb_setcmap = drm_fb_helper_setcmap, 216 .fb_setcmap = drm_fb_helper_setcmap,
@@ -257,9 +337,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
257 FBINFO_HWACCEL_FILLRECT | 337 FBINFO_HWACCEL_FILLRECT |
258 FBINFO_HWACCEL_IMAGEBLIT; 338 FBINFO_HWACCEL_IMAGEBLIT;
259 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 339 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
260 info->fbops = &nouveau_fbcon_ops; 340 info->fbops = &nouveau_fbcon_sw_ops;
261 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 341 info->fix.smem_start = dev->mode_config.fb_base +
262 dev_priv->vm_vram_base; 342 (nvbo->bo.mem.start << PAGE_SHIFT);
263 info->fix.smem_len = size; 343 info->fix.smem_len = size;
264 344
265 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 345 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
@@ -285,19 +365,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
285 info->pixmap.flags = FB_PIXMAP_SYSTEM; 365 info->pixmap.flags = FB_PIXMAP_SYSTEM;
286 info->pixmap.scan_align = 1; 366 info->pixmap.scan_align = 1;
287 367
368 mutex_unlock(&dev->struct_mutex);
369
288 if (dev_priv->channel && !nouveau_nofbaccel) { 370 if (dev_priv->channel && !nouveau_nofbaccel) {
289 switch (dev_priv->card_type) { 371 ret = -ENODEV;
290 case NV_C0: 372 if (dev_priv->card_type < NV_50)
291 break; 373 ret = nv04_fbcon_accel_init(info);
292 case NV_50: 374 else
293 nv50_fbcon_accel_init(info); 375 if (dev_priv->card_type < NV_C0)
294 info->fbops = &nv50_fbcon_ops; 376 ret = nv50_fbcon_accel_init(info);
295 break; 377
296 default: 378 if (ret == 0)
297 nv04_fbcon_accel_init(info); 379 info->fbops = &nouveau_fbcon_ops;
298 info->fbops = &nv04_fbcon_ops;
299 break;
300 };
301 } 380 }
302 381
303 nouveau_fbcon_zfill(dev, nfbdev); 382 nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +387,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
308 nouveau_fb->base.height, 387 nouveau_fb->base.height,
309 nvbo->bo.offset, nvbo); 388 nvbo->bo.offset, nvbo);
310 389
311 mutex_unlock(&dev->struct_mutex);
312 vga_switcheroo_client_fb_set(dev->pdev, info); 390 vga_switcheroo_client_fb_set(dev->pdev, info);
313 return 0; 391 return 0;
314 392
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e12684c37e..6b933f2c3a5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,13 +40,13 @@ struct nouveau_fbdev {
40 40
41void nouveau_fbcon_restore(void); 41void nouveau_fbcon_restore(void);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 43int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 44int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 45int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
46int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 47int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 48int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 49int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
50int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
51 51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfbf266e..374a9793b85f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref)
64 struct nouveau_fence *fence = 64 struct nouveau_fence *fence =
65 container_of(ref, struct nouveau_fence, refcount); 65 container_of(ref, struct nouveau_fence, refcount);
66 66
67 nouveau_channel_ref(NULL, &fence->channel);
67 kfree(fence); 68 kfree(fence);
68} 69}
69 70
@@ -76,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
76 77
77 spin_lock(&chan->fence.lock); 78 spin_lock(&chan->fence.lock);
78 79
79 if (USE_REFCNT(dev)) 80 /* Fetch the last sequence if the channel is still up and running */
80 sequence = nvchan_rd32(chan, 0x48); 81 if (likely(!list_empty(&chan->fence.pending))) {
81 else 82 if (USE_REFCNT(dev))
82 sequence = atomic_read(&chan->fence.last_sequence_irq); 83 sequence = nvchan_rd32(chan, 0x48);
84 else
85 sequence = atomic_read(&chan->fence.last_sequence_irq);
83 86
84 if (chan->fence.sequence_ack == sequence) 87 if (chan->fence.sequence_ack == sequence)
85 goto out; 88 goto out;
86 chan->fence.sequence_ack = sequence; 89 chan->fence.sequence_ack = sequence;
90 }
87 91
88 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 92 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
89 sequence = fence->sequence; 93 sequence = fence->sequence;
@@ -113,13 +117,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
113 if (!fence) 117 if (!fence)
114 return -ENOMEM; 118 return -ENOMEM;
115 kref_init(&fence->refcount); 119 kref_init(&fence->refcount);
116 fence->channel = chan; 120 nouveau_channel_ref(chan, &fence->channel);
117 121
118 if (emit) 122 if (emit)
119 ret = nouveau_fence_emit(fence); 123 ret = nouveau_fence_emit(fence);
120 124
121 if (ret) 125 if (ret)
122 nouveau_fence_unref((void *)&fence); 126 nouveau_fence_unref(&fence);
123 *pfence = fence; 127 *pfence = fence;
124 return ret; 128 return ret;
125} 129}
@@ -127,7 +131,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
127struct nouveau_channel * 131struct nouveau_channel *
128nouveau_fence_channel(struct nouveau_fence *fence) 132nouveau_fence_channel(struct nouveau_fence *fence)
129{ 133{
130 return fence ? fence->channel : NULL; 134 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
131} 135}
132 136
133int 137int
@@ -182,7 +186,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
182} 186}
183 187
184void 188void
185nouveau_fence_unref(void **sync_obj) 189__nouveau_fence_unref(void **sync_obj)
186{ 190{
187 struct nouveau_fence *fence = nouveau_fence(*sync_obj); 191 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
188 192
@@ -192,7 +196,7 @@ nouveau_fence_unref(void **sync_obj)
192} 196}
193 197
194void * 198void *
195nouveau_fence_ref(void *sync_obj) 199__nouveau_fence_ref(void *sync_obj)
196{ 200{
197 struct nouveau_fence *fence = nouveau_fence(sync_obj); 201 struct nouveau_fence *fence = nouveau_fence(sync_obj);
198 202
@@ -201,7 +205,7 @@ nouveau_fence_ref(void *sync_obj)
201} 205}
202 206
203bool 207bool
204nouveau_fence_signalled(void *sync_obj, void *sync_arg) 208__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
205{ 209{
206 struct nouveau_fence *fence = nouveau_fence(sync_obj); 210 struct nouveau_fence *fence = nouveau_fence(sync_obj);
207 struct nouveau_channel *chan = fence->channel; 211 struct nouveau_channel *chan = fence->channel;
@@ -214,13 +218,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
214} 218}
215 219
216int 220int
217nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 221__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
218{ 222{
219 unsigned long timeout = jiffies + (3 * DRM_HZ); 223 unsigned long timeout = jiffies + (3 * DRM_HZ);
224 unsigned long sleep_time = jiffies + 1;
220 int ret = 0; 225 int ret = 0;
221 226
222 while (1) { 227 while (1) {
223 if (nouveau_fence_signalled(sync_obj, sync_arg)) 228 if (__nouveau_fence_signalled(sync_obj, sync_arg))
224 break; 229 break;
225 230
226 if (time_after_eq(jiffies, timeout)) { 231 if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +235,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
230 235
231 __set_current_state(intr ? TASK_INTERRUPTIBLE 236 __set_current_state(intr ? TASK_INTERRUPTIBLE
232 : TASK_UNINTERRUPTIBLE); 237 : TASK_UNINTERRUPTIBLE);
233 if (lazy) 238 if (lazy && time_after_eq(jiffies, sleep_time))
234 schedule_timeout(1); 239 schedule_timeout(1);
235 240
236 if (intr && signal_pending(current)) { 241 if (intr && signal_pending(current)) {
@@ -368,7 +373,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
368 373
369 kref_get(&sema->ref); 374 kref_get(&sema->ref);
370 nouveau_fence_work(fence, semaphore_work, sema); 375 nouveau_fence_work(fence, semaphore_work, sema);
371 nouveau_fence_unref((void *)&fence); 376 nouveau_fence_unref(&fence);
372 377
373 return 0; 378 return 0;
374} 379}
@@ -380,33 +385,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
380 struct nouveau_channel *chan = nouveau_fence_channel(fence); 385 struct nouveau_channel *chan = nouveau_fence_channel(fence);
381 struct drm_device *dev = wchan->dev; 386 struct drm_device *dev = wchan->dev;
382 struct nouveau_semaphore *sema; 387 struct nouveau_semaphore *sema;
383 int ret; 388 int ret = 0;
384 389
385 if (likely(!fence || chan == wchan || 390 if (likely(!chan || chan == wchan ||
386 nouveau_fence_signalled(fence, NULL))) 391 nouveau_fence_signalled(fence)))
387 return 0; 392 goto out;
388 393
389 sema = alloc_semaphore(dev); 394 sema = alloc_semaphore(dev);
390 if (!sema) { 395 if (!sema) {
391 /* Early card or broken userspace, fall back to 396 /* Early card or broken userspace, fall back to
392 * software sync. */ 397 * software sync. */
393 return nouveau_fence_wait(fence, NULL, false, false); 398 ret = nouveau_fence_wait(fence, true, false);
399 goto out;
400 }
401
402 /* try to take chan's mutex, if we can't take it right away
403 * we have to fallback to software sync to prevent locking
404 * order issues
405 */
406 if (!mutex_trylock(&chan->mutex)) {
407 ret = nouveau_fence_wait(fence, true, false);
408 goto out_unref;
394 } 409 }
395 410
396 /* Make wchan wait until it gets signalled */ 411 /* Make wchan wait until it gets signalled */
397 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); 412 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
398 if (ret) 413 if (ret)
399 goto out; 414 goto out_unlock;
400 415
401 /* Signal the semaphore from chan */ 416 /* Signal the semaphore from chan */
402 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); 417 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
403out: 418
419out_unlock:
420 mutex_unlock(&chan->mutex);
421out_unref:
404 kref_put(&sema->ref, free_semaphore); 422 kref_put(&sema->ref, free_semaphore);
423out:
424 if (chan)
425 nouveau_channel_put_unlocked(&chan);
405 return ret; 426 return ret;
406} 427}
407 428
408int 429int
409nouveau_fence_flush(void *sync_obj, void *sync_arg) 430__nouveau_fence_flush(void *sync_obj, void *sync_arg)
410{ 431{
411 return 0; 432 return 0;
412} 433}
@@ -420,12 +441,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
420 int ret; 441 int ret;
421 442
422 /* Create an NV_SW object for various sync purposes */ 443 /* Create an NV_SW object for various sync purposes */
423 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); 444 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
424 if (ret)
425 return ret;
426
427 ret = nouveau_ramht_insert(chan, NvSw, obj);
428 nouveau_gpuobj_ref(NULL, &obj);
429 if (ret) 445 if (ret)
430 return ret; 446 return ret;
431 447
@@ -437,13 +453,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
437 453
438 /* Create a DMA object for the shared cross-channel sync area. */ 454 /* Create a DMA object for the shared cross-channel sync area. */
439 if (USE_SEMA(dev)) { 455 if (USE_SEMA(dev)) {
440 struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; 456 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
441 457
442 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 458 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
443 mem->start << PAGE_SHIFT, 459 mem->start << PAGE_SHIFT,
444 mem->size << PAGE_SHIFT, 460 mem->size, NV_MEM_ACCESS_RW,
445 NV_DMA_ACCESS_RW, 461 NV_MEM_TARGET_VRAM, &obj);
446 NV_DMA_TARGET_VIDMEM, &obj);
447 if (ret) 462 if (ret)
448 return ret; 463 return ret;
449 464
@@ -473,6 +488,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
473{ 488{
474 struct nouveau_fence *tmp, *fence; 489 struct nouveau_fence *tmp, *fence;
475 490
491 spin_lock(&chan->fence.lock);
492
476 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 493 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
477 fence->signalled = true; 494 fence->signalled = true;
478 list_del(&fence->entry); 495 list_del(&fence->entry);
@@ -482,6 +499,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
482 499
483 kref_put(&fence->refcount, nouveau_fence_del); 500 kref_put(&fence->refcount, nouveau_fence_del);
484 } 501 }
502
503 spin_unlock(&chan->fence.lock);
485} 504}
486 505
487int 506int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1f2301d26c0a..506c508b7eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
48 return; 48 return;
49 nvbo->gem = NULL; 49 nvbo->gem = NULL;
50 50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) { 51 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1; 52 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo); 53 nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
106 return 0; 103 return 0;
107} 104}
108 105
109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113
114 if (dev_priv->card_type >= NV_50) {
115 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
116 case 0x0000:
117 case 0x1800:
118 case 0x2800:
119 case 0x4800:
120 case 0x7000:
121 case 0x7400:
122 case 0x7a00:
123 case 0xe000:
124 return true;
125 }
126 } else {
127 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
128 return true;
129 }
130
131 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
132 return false;
133}
134
135int 106int
136nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 107nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
137 struct drm_file *file_priv) 108 struct drm_file *file_priv)
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
146 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) 117 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
147 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; 118 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
148 119
149 if (req->channel_hint) {
150 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
151 file_priv, chan);
152 }
153
154 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) 120 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
155 flags |= TTM_PL_FLAG_VRAM; 121 flags |= TTM_PL_FLAG_VRAM;
156 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) 122 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
158 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) 124 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
159 flags |= TTM_PL_FLAG_SYSTEM; 125 flags |= TTM_PL_FLAG_SYSTEM;
160 126
161 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) 127 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
128 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
162 return -EINVAL; 129 return -EINVAL;
130 }
131
132 if (req->channel_hint) {
133 chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
134 if (IS_ERR(chan))
135 return PTR_ERR(chan);
136 }
163 137
164 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, 138 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
165 req->info.tile_mode, req->info.tile_flags, false, 139 req->info.tile_mode, req->info.tile_flags, false,
166 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), 140 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
167 &nvbo); 141 &nvbo);
142 if (chan)
143 nouveau_channel_put(&chan);
168 if (ret) 144 if (ret)
169 return ret; 145 return ret;
170 146
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
231 207
232 list_for_each_safe(entry, tmp, list) { 208 list_for_each_safe(entry, tmp, list) {
233 nvbo = list_entry(entry, struct nouveau_bo, entry); 209 nvbo = list_entry(entry, struct nouveau_bo, entry);
234 if (likely(fence)) { 210
235 struct nouveau_fence *prev_fence; 211 nouveau_bo_fence(nvbo, fence);
236
237 spin_lock(&nvbo->bo.bdev->fence_lock);
238 prev_fence = nvbo->bo.sync_obj;
239 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
240 spin_unlock(&nvbo->bo.bdev->fence_lock);
241 nouveau_fence_unref((void *)&prev_fence);
242 }
243 212
244 if (unlikely(nvbo->validate_mapped)) { 213 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap); 214 ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@ retry:
299 return -EINVAL; 268 return -EINVAL;
300 } 269 }
301 270
302 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); 271 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
303 if (ret) { 272 if (ret) {
304 validate_fini(op, NULL); 273 validate_fini(op, NULL);
305 if (ret == -EAGAIN) 274 if (unlikely(ret == -EAGAIN))
306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 275 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
307 drm_gem_object_unreference_unlocked(gem); 276 drm_gem_object_unreference_unlocked(gem);
308 if (ret) { 277 if (unlikely(ret)) {
309 NV_ERROR(dev, "fail reserve\n"); 278 if (ret != -ERESTARTSYS)
279 NV_ERROR(dev, "fail reserve\n");
310 return ret; 280 return ret;
311 } 281 }
312 goto retry; 282 goto retry;
@@ -331,25 +301,6 @@ retry:
331 validate_fini(op, NULL); 301 validate_fini(op, NULL);
332 return -EINVAL; 302 return -EINVAL;
333 } 303 }
334
335 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
336 validate_fini(op, NULL);
337
338 if (nvbo->cpu_filp == file_priv) {
339 NV_ERROR(dev, "bo %p mapped by process trying "
340 "to validate it!\n", nvbo);
341 return -EINVAL;
342 }
343
344 mutex_unlock(&drm_global_mutex);
345 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
346 mutex_lock(&drm_global_mutex);
347 if (ret) {
348 NV_ERROR(dev, "fail wait_cpu\n");
349 return ret;
350 }
351 goto retry;
352 }
353 } 304 }
354 305
355 return 0; 306 return 0;
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
383 } 334 }
384 335
385 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; 336 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
386 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 337 ret = nouveau_bo_validate(nvbo, true, false, false);
387 false, false, false);
388 nvbo->channel = NULL; 338 nvbo->channel = NULL;
389 if (unlikely(ret)) { 339 if (unlikely(ret)) {
390 NV_ERROR(dev, "fail ttm_validate\n"); 340 if (ret != -ERESTARTSYS)
341 NV_ERROR(dev, "fail ttm_validate\n");
391 return ret; 342 return ret;
392 } 343 }
393 344
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
439 390
440 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 391 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
441 if (unlikely(ret)) { 392 if (unlikely(ret)) {
442 NV_ERROR(dev, "validate_init\n"); 393 if (ret != -ERESTARTSYS)
394 NV_ERROR(dev, "validate_init\n");
443 return ret; 395 return ret;
444 } 396 }
445 397
446 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 398 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
447 if (unlikely(ret < 0)) { 399 if (unlikely(ret < 0)) {
448 NV_ERROR(dev, "validate vram_list\n"); 400 if (ret != -ERESTARTSYS)
401 NV_ERROR(dev, "validate vram_list\n");
449 validate_fini(op, NULL); 402 validate_fini(op, NULL);
450 return ret; 403 return ret;
451 } 404 }
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
453 406
454 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 407 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
455 if (unlikely(ret < 0)) { 408 if (unlikely(ret < 0)) {
456 NV_ERROR(dev, "validate gart_list\n"); 409 if (ret != -ERESTARTSYS)
410 NV_ERROR(dev, "validate gart_list\n");
457 validate_fini(op, NULL); 411 validate_fini(op, NULL);
458 return ret; 412 return ret;
459 } 413 }
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
461 415
462 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 416 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
463 if (unlikely(ret < 0)) { 417 if (unlikely(ret < 0)) {
464 NV_ERROR(dev, "validate both_list\n"); 418 if (ret != -ERESTARTSYS)
419 NV_ERROR(dev, "validate both_list\n");
465 validate_fini(op, NULL); 420 validate_fini(op, NULL);
466 return ret; 421 return ret;
467 } 422 }
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
585 struct nouveau_fence *fence = NULL; 540 struct nouveau_fence *fence = NULL;
586 int i, j, ret = 0, do_reloc = 0; 541 int i, j, ret = 0, do_reloc = 0;
587 542
588 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 543 chan = nouveau_channel_get(dev, file_priv, req->channel);
544 if (IS_ERR(chan))
545 return PTR_ERR(chan);
589 546
590 req->vram_available = dev_priv->fb_aper_free; 547 req->vram_available = dev_priv->fb_aper_free;
591 req->gart_available = dev_priv->gart_info.aper_free; 548 req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
595 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 552 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
596 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", 553 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
597 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 554 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
555 nouveau_channel_put(&chan);
598 return -EINVAL; 556 return -EINVAL;
599 } 557 }
600 558
601 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 559 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
602 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", 560 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
603 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 561 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
562 nouveau_channel_put(&chan);
604 return -EINVAL; 563 return -EINVAL;
605 } 564 }
606 565
607 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 566 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
608 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", 567 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
609 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 568 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
569 nouveau_channel_put(&chan);
610 return -EINVAL; 570 return -EINVAL;
611 } 571 }
612 572
613 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 573 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
614 if (IS_ERR(push)) 574 if (IS_ERR(push)) {
575 nouveau_channel_put(&chan);
615 return PTR_ERR(push); 576 return PTR_ERR(push);
577 }
616 578
617 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 579 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
618 if (IS_ERR(bo)) { 580 if (IS_ERR(bo)) {
619 kfree(push); 581 kfree(push);
582 nouveau_channel_put(&chan);
620 return PTR_ERR(bo); 583 return PTR_ERR(bo);
621 } 584 }
622 585
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
639 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 602 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
640 req->nr_buffers, &op, &do_reloc); 603 req->nr_buffers, &op, &do_reloc);
641 if (ret) { 604 if (ret) {
642 NV_ERROR(dev, "validate: %d\n", ret); 605 if (ret != -ERESTARTSYS)
606 NV_ERROR(dev, "validate: %d\n", ret);
643 goto out; 607 goto out;
644 } 608 }
645 609
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
732 696
733out: 697out:
734 validate_fini(&op, fence); 698 validate_fini(&op, fence);
735 nouveau_fence_unref((void**)&fence); 699 nouveau_fence_unref(&fence);
736 kfree(bo); 700 kfree(bo);
737 kfree(push); 701 kfree(push);
738 702
@@ -750,6 +714,7 @@ out_next:
750 req->suffix1 = 0x00000000; 714 req->suffix1 = 0x00000000;
751 } 715 }
752 716
717 nouveau_channel_put(&chan);
753 return ret; 718 return ret;
754} 719}
755 720
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
781 return -ENOENT; 746 return -ENOENT;
782 nvbo = nouveau_gem_object(gem); 747 nvbo = nouveau_gem_object(gem);
783 748
784 if (nvbo->cpu_filp) { 749 spin_lock(&nvbo->bo.bdev->fence_lock);
785 if (nvbo->cpu_filp == file_priv) 750 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
786 goto out; 751 spin_unlock(&nvbo->bo.bdev->fence_lock);
787
788 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
789 if (ret)
790 goto out;
791 }
792
793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
794 spin_lock(&nvbo->bo.bdev->fence_lock);
795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
796 spin_unlock(&nvbo->bo.bdev->fence_lock);
797 } else {
798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
799 if (ret == 0)
800 nvbo->cpu_filp = file_priv;
801 }
802
803out:
804 drm_gem_object_unreference_unlocked(gem); 752 drm_gem_object_unreference_unlocked(gem);
805 return ret; 753 return ret;
806} 754}
@@ -809,26 +757,7 @@ int
809nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 757nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
810 struct drm_file *file_priv) 758 struct drm_file *file_priv)
811{ 759{
812 struct drm_nouveau_gem_cpu_prep *req = data; 760 return 0;
813 struct drm_gem_object *gem;
814 struct nouveau_bo *nvbo;
815 int ret = -EINVAL;
816
817 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
818 if (!gem)
819 return -ENOENT;
820 nvbo = nouveau_gem_object(gem);
821
822 if (nvbo->cpu_filp != file_priv)
823 goto out;
824 nvbo->cpu_filp = NULL;
825
826 ttm_bo_synccpu_write_release(&nvbo->bo);
827 ret = 0;
828
829out:
830 drm_gem_object_unreference_unlocked(gem);
831 return ret;
832} 761}
833 762
834int 763int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a05c411..053edf9d2f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
954 954
955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
956 if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) 956 if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); 957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
958 else 958 else
959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); 959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
999 if (dev_priv->card_type == NV_10) { 999 if (dev_priv->card_type == NV_10) {
1000 /* Not waiting for vertical retrace before modifying 1000 /* Not waiting for vertical retrace before modifying
1001 CRE_53/CRE_54 causes lockups. */ 1001 CRE_53/CRE_54 causes lockups. */
1002 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 1002 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
1003 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1004 } 1004 }
1005 1005
1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); 1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
1017 1017
1018 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); 1018 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1019 1019
1020 /* Setting 1 on this value gives you interrupts for every vblank period. */ 1020 /* Enable vblank interrupts. */
1021 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); 1021 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
1022 (dev->vblank_enabled[head] ? 1 : 0));
1022 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); 1023 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1023} 1024}
1024 1025
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6c9d67..2ba7265bc967 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37#include "nouveau_reg.h" 37#include "nouveau_reg.h"
38#include "nouveau_ramht.h" 38#include "nouveau_ramht.h"
39#include <linux/ratelimit.h> 39#include "nouveau_util.h"
40
41/* needed for hotplug irq */
42#include "nouveau_connector.h"
43#include "nv50_display.h"
44
45static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
46
47static int nouveau_ratelimit(void)
48{
49 return __ratelimit(&nouveau_ratelimit_state);
50}
51 40
52void 41void
53nouveau_irq_preinstall(struct drm_device *dev) 42nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev)
57 /* Master disable */ 46 /* Master disable */
58 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 47 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
59 48
60 if (dev_priv->card_type >= NV_50) { 49 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
63 spin_lock_init(&dev_priv->hpd_state.lock);
64 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
65 }
66} 50}
67 51
68int 52int
69nouveau_irq_postinstall(struct drm_device *dev) 53nouveau_irq_postinstall(struct drm_device *dev)
70{ 54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56
71 /* Master enable */ 57 /* Master enable */
72 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); 58 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
59 if (dev_priv->msi_enabled)
60 nv_wr08(dev, 0x00088068, 0xff);
61
73 return 0; 62 return 0;
74} 63}
75 64
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev)
80 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 69 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
81} 70}
82 71
83static int 72irqreturn_t
84nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) 73nouveau_irq_handler(DRM_IRQ_ARGS)
85{
86 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
87 struct nouveau_pgraph_object_method *grm;
88 struct nouveau_pgraph_object_class *grc;
89
90 grc = dev_priv->engine.graph.grclass;
91 while (grc->id) {
92 if (grc->id == class)
93 break;
94 grc++;
95 }
96
97 if (grc->id != class || !grc->methods)
98 return -ENOENT;
99
100 grm = grc->methods;
101 while (grm->id) {
102 if (grm->id == mthd)
103 return grm->exec(chan, class, mthd, data);
104 grm++;
105 }
106
107 return -ENOENT;
108}
109
110static bool
111nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
112{
113 struct drm_device *dev = chan->dev;
114 const int subc = (addr >> 13) & 0x7;
115 const int mthd = addr & 0x1ffc;
116
117 if (mthd == 0x0000) {
118 struct nouveau_gpuobj *gpuobj;
119
120 gpuobj = nouveau_ramht_find(chan, data);
121 if (!gpuobj)
122 return false;
123
124 if (gpuobj->engine != NVOBJ_ENGINE_SW)
125 return false;
126
127 chan->sw_subchannel[subc] = gpuobj->class;
128 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
129 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
130 return true;
131 }
132
133 /* hw object */
134 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
135 return false;
136
137 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
138 return false;
139
140 return true;
141}
142
143static void
144nouveau_fifo_irq_handler(struct drm_device *dev)
145{
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_engine *engine = &dev_priv->engine;
148 uint32_t status, reassign;
149 int cnt = 0;
150
151 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
152 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
153 struct nouveau_channel *chan = NULL;
154 uint32_t chid, get;
155
156 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
157
158 chid = engine->fifo.channel_id(dev);
159 if (chid >= 0 && chid < engine->fifo.channels)
160 chan = dev_priv->fifos[chid];
161 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
162
163 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
164 uint32_t mthd, data;
165 int ptr;
166
167 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
168 * wrapping on my G80 chips, but CACHE1 isn't big
169 * enough for this much data.. Tests show that it
170 * wraps around to the start at GET=0x800.. No clue
171 * as to why..
172 */
173 ptr = (get & 0x7ff) >> 2;
174
175 if (dev_priv->card_type < NV_40) {
176 mthd = nv_rd32(dev,
177 NV04_PFIFO_CACHE1_METHOD(ptr));
178 data = nv_rd32(dev,
179 NV04_PFIFO_CACHE1_DATA(ptr));
180 } else {
181 mthd = nv_rd32(dev,
182 NV40_PFIFO_CACHE1_METHOD(ptr));
183 data = nv_rd32(dev,
184 NV40_PFIFO_CACHE1_DATA(ptr));
185 }
186
187 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
188 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
189 "Mthd 0x%04x Data 0x%08x\n",
190 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
191 data);
192 }
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
195 nv_wr32(dev, NV03_PFIFO_INTR_0,
196 NV_PFIFO_INTR_CACHE_ERROR);
197
198 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
199 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
200 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
201 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
202 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
203 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
204
205 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
206 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
207 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
208
209 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
210 }
211
212 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
213 u32 dma_get = nv_rd32(dev, 0x003244);
214 u32 dma_put = nv_rd32(dev, 0x003240);
215 u32 push = nv_rd32(dev, 0x003220);
216 u32 state = nv_rd32(dev, 0x003228);
217
218 if (dev_priv->card_type == NV_50) {
219 u32 ho_get = nv_rd32(dev, 0x003328);
220 u32 ho_put = nv_rd32(dev, 0x003320);
221 u32 ib_get = nv_rd32(dev, 0x003334);
222 u32 ib_put = nv_rd32(dev, 0x003330);
223
224 if (nouveau_ratelimit())
225 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
226 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
227 "State 0x%08x Push 0x%08x\n",
228 chid, ho_get, dma_get, ho_put,
229 dma_put, ib_get, ib_put, state,
230 push);
231
232 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
233 nv_wr32(dev, 0x003364, 0x00000000);
234 if (dma_get != dma_put || ho_get != ho_put) {
235 nv_wr32(dev, 0x003244, dma_put);
236 nv_wr32(dev, 0x003328, ho_put);
237 } else
238 if (ib_get != ib_put) {
239 nv_wr32(dev, 0x003334, ib_put);
240 }
241 } else {
242 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
243 "Put 0x%08x State 0x%08x Push 0x%08x\n",
244 chid, dma_get, dma_put, state, push);
245
246 if (dma_get != dma_put)
247 nv_wr32(dev, 0x003244, dma_put);
248 }
249
250 nv_wr32(dev, 0x003228, 0x00000000);
251 nv_wr32(dev, 0x003220, 0x00000001);
252 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
253 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
254 }
255
256 if (status & NV_PFIFO_INTR_SEMAPHORE) {
257 uint32_t sem;
258
259 status &= ~NV_PFIFO_INTR_SEMAPHORE;
260 nv_wr32(dev, NV03_PFIFO_INTR_0,
261 NV_PFIFO_INTR_SEMAPHORE);
262
263 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
264 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
265
266 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
267 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
268 }
269
270 if (dev_priv->card_type == NV_50) {
271 if (status & 0x00000010) {
272 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
273 status &= ~0x00000010;
274 nv_wr32(dev, 0x002100, 0x00000010);
275 }
276 }
277
278 if (status) {
279 if (nouveau_ratelimit())
280 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
281 status, chid);
282 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
283 status = 0;
284 }
285
286 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
287 }
288
289 if (status) {
290 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
291 nv_wr32(dev, 0x2140, 0);
292 nv_wr32(dev, 0x140, 0);
293 }
294
295 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
296}
297
298struct nouveau_bitfield_names {
299 uint32_t mask;
300 const char *name;
301};
302
303static struct nouveau_bitfield_names nstatus_names[] =
304{
305 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
306 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
307 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
308 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
309};
310
311static struct nouveau_bitfield_names nstatus_names_nv10[] =
312{
313 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
314 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
315 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
316 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
317};
318
319static struct nouveau_bitfield_names nsource_names[] =
320{
321 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
322 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
323 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
324 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
325 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
326 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
327 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
328 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
329 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
330 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
331 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
332 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
333 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
334 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
335 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
336 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
337 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
338 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
339 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
340};
341
342static void
343nouveau_print_bitfield_names_(uint32_t value,
344 const struct nouveau_bitfield_names *namelist,
345 const int namelist_len)
346{
347 /*
348 * Caller must have already printed the KERN_* log level for us.
349 * Also the caller is responsible for adding the newline.
350 */
351 int i;
352 for (i = 0; i < namelist_len; ++i) {
353 uint32_t mask = namelist[i].mask;
354 if (value & mask) {
355 printk(" %s", namelist[i].name);
356 value &= ~mask;
357 }
358 }
359 if (value)
360 printk(" (unknown bits 0x%08x)", value);
361}
362#define nouveau_print_bitfield_names(val, namelist) \
363 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
364
365struct nouveau_enum_names {
366 uint32_t value;
367 const char *name;
368};
369
370static void
371nouveau_print_enum_names_(uint32_t value,
372 const struct nouveau_enum_names *namelist,
373 const int namelist_len)
374{
375 /*
376 * Caller must have already printed the KERN_* log level for us.
377 * Also the caller is responsible for adding the newline.
378 */
379 int i;
380 for (i = 0; i < namelist_len; ++i) {
381 if (value == namelist[i].value) {
382 printk("%s", namelist[i].name);
383 return;
384 }
385 }
386 printk("unknown value 0x%08x", value);
387}
388#define nouveau_print_enum_names(val, namelist) \
389 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
390
391static int
392nouveau_graph_chid_from_grctx(struct drm_device *dev)
393{ 74{
75 struct drm_device *dev = (struct drm_device *)arg;
394 struct drm_nouveau_private *dev_priv = dev->dev_private; 76 struct drm_nouveau_private *dev_priv = dev->dev_private;
395 uint32_t inst; 77 unsigned long flags;
78 u32 stat;
396 int i; 79 int i;
397 80
398 if (dev_priv->card_type < NV_40) 81 stat = nv_rd32(dev, NV03_PMC_INTR_0);
399 return dev_priv->engine.fifo.channels; 82 if (!stat)
400 else 83 return IRQ_NONE;
401 if (dev_priv->card_type < NV_50) {
402 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
403
404 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
405 struct nouveau_channel *chan = dev_priv->fifos[i];
406
407 if (!chan || !chan->ramin_grctx)
408 continue;
409
410 if (inst == chan->ramin_grctx->pinst)
411 break;
412 }
413 } else {
414 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
415
416 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
417 struct nouveau_channel *chan = dev_priv->fifos[i];
418
419 if (!chan || !chan->ramin)
420 continue;
421
422 if (inst == chan->ramin->vinst)
423 break;
424 }
425 }
426
427
428 return i;
429}
430
431static int
432nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
433{
434 struct drm_nouveau_private *dev_priv = dev->dev_private;
435 struct nouveau_engine *engine = &dev_priv->engine;
436 int channel;
437
438 if (dev_priv->card_type < NV_10)
439 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
440 else
441 if (dev_priv->card_type < NV_40)
442 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
443 else
444 channel = nouveau_graph_chid_from_grctx(dev);
445
446 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
447 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
448 return -EINVAL;
449 }
450
451 *channel_ret = channel;
452 return 0;
453}
454
455struct nouveau_pgraph_trap {
456 int channel;
457 int class;
458 int subc, mthd, size;
459 uint32_t data, data2;
460 uint32_t nsource, nstatus;
461};
462
463static void
464nouveau_graph_trap_info(struct drm_device *dev,
465 struct nouveau_pgraph_trap *trap)
466{
467 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 uint32_t address;
469
470 trap->nsource = trap->nstatus = 0;
471 if (dev_priv->card_type < NV_50) {
472 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
473 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
474 }
475
476 if (nouveau_graph_trapped_channel(dev, &trap->channel))
477 trap->channel = -1;
478 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
479
480 trap->mthd = address & 0x1FFC;
481 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
482 if (dev_priv->card_type < NV_10) {
483 trap->subc = (address >> 13) & 0x7;
484 } else {
485 trap->subc = (address >> 16) & 0x7;
486 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
487 }
488
489 if (dev_priv->card_type < NV_10)
490 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
491 else if (dev_priv->card_type < NV_40)
492 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
493 else if (dev_priv->card_type < NV_50)
494 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
495 else
496 trap->class = nv_rd32(dev, 0x400814);
497}
498
499static void
500nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
501 struct nouveau_pgraph_trap *trap)
502{
503 struct drm_nouveau_private *dev_priv = dev->dev_private;
504 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
505
506 if (dev_priv->card_type < NV_50) {
507 NV_INFO(dev, "%s - nSource:", id);
508 nouveau_print_bitfield_names(nsource, nsource_names);
509 printk(", nStatus:");
510 if (dev_priv->card_type < NV_10)
511 nouveau_print_bitfield_names(nstatus, nstatus_names);
512 else
513 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
514 printk("\n");
515 }
516
517 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
518 "Data 0x%08x:0x%08x\n",
519 id, trap->channel, trap->subc,
520 trap->class, trap->mthd,
521 trap->data2, trap->data);
522}
523
524static int
525nouveau_pgraph_intr_swmthd(struct drm_device *dev,
526 struct nouveau_pgraph_trap *trap)
527{
528 struct drm_nouveau_private *dev_priv = dev->dev_private;
529
530 if (trap->channel < 0 ||
531 trap->channel >= dev_priv->engine.fifo.channels ||
532 !dev_priv->fifos[trap->channel])
533 return -ENODEV;
534
535 return nouveau_call_method(dev_priv->fifos[trap->channel],
536 trap->class, trap->mthd, trap->data);
537}
538
539static inline void
540nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
541{
542 struct nouveau_pgraph_trap trap;
543 int unhandled = 0;
544 84
545 nouveau_graph_trap_info(dev, &trap); 85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
86 for (i = 0; i < 32 && stat; i++) {
87 if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
88 continue;
546 89
547 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 90 dev_priv->irq_handler[i](dev);
548 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 91 stat &= ~(1 << i);
549 unhandled = 1;
550 } else {
551 unhandled = 1;
552 } 92 }
553 93
554 if (unhandled) 94 if (dev_priv->msi_enabled)
555 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 95 nv_wr08(dev, 0x00088068, 0xff);
556} 96 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
557
558
559static inline void
560nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
561{
562 struct nouveau_pgraph_trap trap;
563 int unhandled = 0;
564
565 nouveau_graph_trap_info(dev, &trap);
566 trap.nsource = nsource;
567
568 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
569 if (nouveau_pgraph_intr_swmthd(dev, &trap))
570 unhandled = 1;
571 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
572 uint32_t v = nv_rd32(dev, 0x402000);
573 nv_wr32(dev, 0x402000, v);
574
575 /* dump the error anyway for now: it's useful for
576 Gallium development */
577 unhandled = 1;
578 } else {
579 unhandled = 1;
580 }
581 97
582 if (unhandled && nouveau_ratelimit()) 98 if (stat && nouveau_ratelimit())
583 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); 99 NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
100 return IRQ_HANDLED;
584} 101}
585 102
586static inline void 103int
587nouveau_pgraph_intr_context_switch(struct drm_device *dev) 104nouveau_irq_init(struct drm_device *dev)
588{ 105{
589 struct drm_nouveau_private *dev_priv = dev->dev_private; 106 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 struct nouveau_engine *engine = &dev_priv->engine; 107 int ret;
591 uint32_t chid;
592
593 chid = engine->fifo.channel_id(dev);
594 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
595
596 switch (dev_priv->card_type) {
597 case NV_04:
598 nv04_graph_context_switch(dev);
599 break;
600 case NV_10:
601 nv10_graph_context_switch(dev);
602 break;
603 default:
604 NV_ERROR(dev, "Context switch not implemented\n");
605 break;
606 }
607}
608
609static void
610nouveau_pgraph_irq_handler(struct drm_device *dev)
611{
612 uint32_t status;
613
614 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
615 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
616
617 if (status & NV_PGRAPH_INTR_NOTIFY) {
618 nouveau_pgraph_intr_notify(dev, nsource);
619
620 status &= ~NV_PGRAPH_INTR_NOTIFY;
621 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
622 }
623
624 if (status & NV_PGRAPH_INTR_ERROR) {
625 nouveau_pgraph_intr_error(dev, nsource);
626 108
627 status &= ~NV_PGRAPH_INTR_ERROR; 109 if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
628 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); 110 ret = pci_enable_msi(dev->pdev);
111 if (ret == 0) {
112 NV_INFO(dev, "enabled MSI\n");
113 dev_priv->msi_enabled = true;
629 } 114 }
630
631 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
632 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
633 nv_wr32(dev, NV03_PGRAPH_INTR,
634 NV_PGRAPH_INTR_CONTEXT_SWITCH);
635
636 nouveau_pgraph_intr_context_switch(dev);
637 }
638
639 if (status) {
640 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
641 nv_wr32(dev, NV03_PGRAPH_INTR, status);
642 }
643
644 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
645 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
646 } 115 }
647 116
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 117 return drm_irq_install(dev);
649}
650
651static struct nouveau_enum_names nv50_mp_exec_error_names[] =
652{
653 { 3, "STACK_UNDERFLOW" },
654 { 4, "QUADON_ACTIVE" },
655 { 8, "TIMEOUT" },
656 { 0x10, "INVALID_OPCODE" },
657 { 0x40, "BREAKPOINT" },
658};
659
660static void
661nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
662{
663 struct drm_nouveau_private *dev_priv = dev->dev_private;
664 uint32_t units = nv_rd32(dev, 0x1540);
665 uint32_t addr, mp10, status, pc, oplow, ophigh;
666 int i;
667 int mps = 0;
668 for (i = 0; i < 4; i++) {
669 if (!(units & 1 << (i+24)))
670 continue;
671 if (dev_priv->chipset < 0xa0)
672 addr = 0x408200 + (tpid << 12) + (i << 7);
673 else
674 addr = 0x408100 + (tpid << 11) + (i << 7);
675 mp10 = nv_rd32(dev, addr + 0x10);
676 status = nv_rd32(dev, addr + 0x14);
677 if (!status)
678 continue;
679 if (display) {
680 nv_rd32(dev, addr + 0x20);
681 pc = nv_rd32(dev, addr + 0x24);
682 oplow = nv_rd32(dev, addr + 0x70);
683 ophigh= nv_rd32(dev, addr + 0x74);
684 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
685 "TP %d MP %d: ", tpid, i);
686 nouveau_print_enum_names(status,
687 nv50_mp_exec_error_names);
688 printk(" at %06x warp %d, opcode %08x %08x\n",
689 pc&0xffffff, pc >> 24,
690 oplow, ophigh);
691 }
692 nv_wr32(dev, addr + 0x10, mp10);
693 nv_wr32(dev, addr + 0x14, 0);
694 mps++;
695 }
696 if (!mps && display)
697 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
698 "No MPs claiming errors?\n", tpid);
699} 118}
700 119
701static void 120void
702nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, 121nouveau_irq_fini(struct drm_device *dev)
703 uint32_t ustatus_new, int display, const char *name)
704{ 122{
705 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
706 int tps = 0;
707 uint32_t units = nv_rd32(dev, 0x1540);
708 int i, r;
709 uint32_t ustatus_addr, ustatus;
710 for (i = 0; i < 16; i++) {
711 if (!(units & (1 << i)))
712 continue;
713 if (dev_priv->chipset < 0xa0)
714 ustatus_addr = ustatus_old + (i << 12);
715 else
716 ustatus_addr = ustatus_new + (i << 11);
717 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
718 if (!ustatus)
719 continue;
720 tps++;
721 switch (type) {
722 case 6: /* texture error... unknown for now */
723 nv50_fb_vm_trap(dev, display, name);
724 if (display) {
725 NV_ERROR(dev, "magic set %d:\n", i);
726 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
727 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
728 nv_rd32(dev, r));
729 }
730 break;
731 case 7: /* MP error */
732 if (ustatus & 0x00010000) {
733 nv50_pgraph_mp_trap(dev, i, display);
734 ustatus &= ~0x00010000;
735 }
736 break;
737 case 8: /* TPDMA error */
738 {
739 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
740 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
741 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
742 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
743 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
744 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
745 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
746 nv50_fb_vm_trap(dev, display, name);
747 /* 2d engine destination */
748 if (ustatus & 0x00000010) {
749 if (display) {
750 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
751 i, e14, e10);
752 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
753 i, e0c, e18, e1c, e20, e24);
754 }
755 ustatus &= ~0x00000010;
756 }
757 /* Render target */
758 if (ustatus & 0x00000040) {
759 if (display) {
760 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
761 i, e14, e10);
762 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
763 i, e0c, e18, e1c, e20, e24);
764 }
765 ustatus &= ~0x00000040;
766 }
767 /* CUDA memory: l[], g[] or stack. */
768 if (ustatus & 0x00000080) {
769 if (display) {
770 if (e18 & 0x80000000) {
771 /* g[] read fault? */
772 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
773 i, e14, e10 | ((e18 >> 24) & 0x1f));
774 e18 &= ~0x1f000000;
775 } else if (e18 & 0xc) {
776 /* g[] write fault? */
777 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
778 i, e14, e10 | ((e18 >> 7) & 0x1f));
779 e18 &= ~0x00000f80;
780 } else {
781 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
782 i, e14, e10);
783 }
784 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
785 i, e0c, e18, e1c, e20, e24);
786 }
787 ustatus &= ~0x00000080;
788 }
789 }
790 break;
791 }
792 if (ustatus) {
793 if (display)
794 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
795 }
796 nv_wr32(dev, ustatus_addr, 0xc0000000);
797 }
798
799 if (!tps && display)
800 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
801}
802
803static void
804nv50_pgraph_trap_handler(struct drm_device *dev)
805{
806 struct nouveau_pgraph_trap trap;
807 uint32_t status = nv_rd32(dev, 0x400108);
808 uint32_t ustatus;
809 int display = nouveau_ratelimit();
810
811
812 if (!status && display) {
813 nouveau_graph_trap_info(dev, &trap);
814 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
815 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
816 }
817
818 /* DISPATCH: Relays commands to other units and handles NOTIFY,
819 * COND, QUERY. If you get a trap from it, the command is still stuck
820 * in DISPATCH and you need to do something about it. */
821 if (status & 0x001) {
822 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
823 if (!ustatus && display) {
824 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
825 }
826
827 /* Known to be triggered by screwed up NOTIFY and COND... */
828 if (ustatus & 0x00000001) {
829 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
830 nv_wr32(dev, 0x400500, 0);
831 if (nv_rd32(dev, 0x400808) & 0x80000000) {
832 if (display) {
833 if (nouveau_graph_trapped_channel(dev, &trap.channel))
834 trap.channel = -1;
835 trap.class = nv_rd32(dev, 0x400814);
836 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
837 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
838 trap.data = nv_rd32(dev, 0x40080c);
839 trap.data2 = nv_rd32(dev, 0x400810);
840 nouveau_graph_dump_trap_info(dev,
841 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
842 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
843 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
844 }
845 nv_wr32(dev, 0x400808, 0);
846 } else if (display) {
847 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
848 }
849 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
850 nv_wr32(dev, 0x400848, 0);
851 ustatus &= ~0x00000001;
852 }
853 if (ustatus & 0x00000002) {
854 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
855 nv_wr32(dev, 0x400500, 0);
856 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
857 if (display) {
858 if (nouveau_graph_trapped_channel(dev, &trap.channel))
859 trap.channel = -1;
860 trap.class = nv_rd32(dev, 0x400814);
861 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
862 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
863 trap.data = nv_rd32(dev, 0x40085c);
864 trap.data2 = 0;
865 nouveau_graph_dump_trap_info(dev,
866 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
867 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
868 }
869 nv_wr32(dev, 0x40084c, 0);
870 } else if (display) {
871 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
872 }
873 ustatus &= ~0x00000002;
874 }
875 if (ustatus && display)
876 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
877 nv_wr32(dev, 0x400804, 0xc0000000);
878 nv_wr32(dev, 0x400108, 0x001);
879 status &= ~0x001;
880 }
881
882 /* TRAPs other than dispatch use the "normal" trap regs. */
883 if (status && display) {
884 nouveau_graph_trap_info(dev, &trap);
885 nouveau_graph_dump_trap_info(dev,
886 "PGRAPH_TRAP", &trap);
887 }
888
889 /* M2MF: Memory to memory copy engine. */
890 if (status & 0x002) {
891 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
892 if (!ustatus && display) {
893 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
894 }
895 if (ustatus & 0x00000001) {
896 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
897 ustatus &= ~0x00000001;
898 }
899 if (ustatus & 0x00000002) {
900 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
901 ustatus &= ~0x00000002;
902 }
903 if (ustatus & 0x00000004) {
904 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
905 ustatus &= ~0x00000004;
906 }
907 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
908 nv_rd32(dev, 0x406804),
909 nv_rd32(dev, 0x406808),
910 nv_rd32(dev, 0x40680c),
911 nv_rd32(dev, 0x406810));
912 if (ustatus && display)
913 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
914 /* No sane way found yet -- just reset the bugger. */
915 nv_wr32(dev, 0x400040, 2);
916 nv_wr32(dev, 0x400040, 0);
917 nv_wr32(dev, 0x406800, 0xc0000000);
918 nv_wr32(dev, 0x400108, 0x002);
919 status &= ~0x002;
920 }
921
922 /* VFETCH: Fetches data from vertex buffers. */
923 if (status & 0x004) {
924 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
925 if (!ustatus && display) {
926 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
927 }
928 if (ustatus & 0x00000001) {
929 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
930 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
931 nv_rd32(dev, 0x400c00),
932 nv_rd32(dev, 0x400c08),
933 nv_rd32(dev, 0x400c0c),
934 nv_rd32(dev, 0x400c10));
935 ustatus &= ~0x00000001;
936 }
937 if (ustatus && display)
938 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
939 nv_wr32(dev, 0x400c04, 0xc0000000);
940 nv_wr32(dev, 0x400108, 0x004);
941 status &= ~0x004;
942 }
943
944 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
945 if (status & 0x008) {
946 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
947 if (!ustatus && display) {
948 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
949 }
950 if (ustatus & 0x00000001) {
951 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
952 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
953 nv_rd32(dev, 0x401804),
954 nv_rd32(dev, 0x401808),
955 nv_rd32(dev, 0x40180c),
956 nv_rd32(dev, 0x401810));
957 ustatus &= ~0x00000001;
958 }
959 if (ustatus && display)
960 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
961 /* No sane way found yet -- just reset the bugger. */
962 nv_wr32(dev, 0x400040, 0x80);
963 nv_wr32(dev, 0x400040, 0);
964 nv_wr32(dev, 0x401800, 0xc0000000);
965 nv_wr32(dev, 0x400108, 0x008);
966 status &= ~0x008;
967 }
968
969 /* CCACHE: Handles code and c[] caches and fills them. */
970 if (status & 0x010) {
971 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
972 if (!ustatus && display) {
973 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
974 }
975 if (ustatus & 0x00000001) {
976 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
977 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
978 nv_rd32(dev, 0x405800),
979 nv_rd32(dev, 0x405804),
980 nv_rd32(dev, 0x405808),
981 nv_rd32(dev, 0x40580c),
982 nv_rd32(dev, 0x405810),
983 nv_rd32(dev, 0x405814),
984 nv_rd32(dev, 0x40581c));
985 ustatus &= ~0x00000001;
986 }
987 if (ustatus && display)
988 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
989 nv_wr32(dev, 0x405018, 0xc0000000);
990 nv_wr32(dev, 0x400108, 0x010);
991 status &= ~0x010;
992 }
993
994 /* Unknown, not seen yet... 0x402000 is the only trap status reg
995 * remaining, so try to handle it anyway. Perhaps related to that
996 * unknown DMA slot on tesla? */
997 if (status & 0x20) {
998 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
999 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
1000 if (display)
1001 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
1002 nv_wr32(dev, 0x402000, 0xc0000000);
1003 /* no status modifiction on purpose */
1004 }
1005
1006 /* TEXTURE: CUDA texturing units */
1007 if (status & 0x040) {
1008 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1009 "PGRAPH_TRAP_TEXTURE");
1010 nv_wr32(dev, 0x400108, 0x040);
1011 status &= ~0x040;
1012 }
1013
1014 /* MP: CUDA execution engines. */
1015 if (status & 0x080) {
1016 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1017 "PGRAPH_TRAP_MP");
1018 nv_wr32(dev, 0x400108, 0x080);
1019 status &= ~0x080;
1020 }
1021
1022 /* TPDMA: Handles TP-initiated uncached memory accesses:
1023 * l[], g[], stack, 2d surfaces, render targets. */
1024 if (status & 0x100) {
1025 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1026 "PGRAPH_TRAP_TPDMA");
1027 nv_wr32(dev, 0x400108, 0x100);
1028 status &= ~0x100;
1029 }
1030
1031 if (status) {
1032 if (display)
1033 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1034 status);
1035 nv_wr32(dev, 0x400108, status);
1036 }
1037}
1038
1039/* There must be a *lot* of these. Will take some time to gather them up. */
1040static struct nouveau_enum_names nv50_data_error_names[] =
1041{
1042 { 4, "INVALID_VALUE" },
1043 { 5, "INVALID_ENUM" },
1044 { 8, "INVALID_OBJECT" },
1045 { 0xc, "INVALID_BITFIELD" },
1046 { 0x28, "MP_NO_REG_SPACE" },
1047 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1048};
1049
1050static void
1051nv50_pgraph_irq_handler(struct drm_device *dev)
1052{
1053 struct nouveau_pgraph_trap trap;
1054 int unhandled = 0;
1055 uint32_t status;
1056
1057 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1058 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
1059 if (status & 0x00000001) {
1060 nouveau_graph_trap_info(dev, &trap);
1061 if (nouveau_ratelimit())
1062 nouveau_graph_dump_trap_info(dev,
1063 "PGRAPH_NOTIFY", &trap);
1064 status &= ~0x00000001;
1065 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1066 }
1067
1068 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1069 * when you write 0x200 to 0x50c0 method 0x31c. */
1070 if (status & 0x00000002) {
1071 nouveau_graph_trap_info(dev, &trap);
1072 if (nouveau_ratelimit())
1073 nouveau_graph_dump_trap_info(dev,
1074 "PGRAPH_COMPUTE_QUERY", &trap);
1075 status &= ~0x00000002;
1076 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1077 }
1078
1079 /* Unknown, never seen: 0x4 */
1080
1081 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1082 if (status & 0x00000010) {
1083 nouveau_graph_trap_info(dev, &trap);
1084 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1085 unhandled = 1;
1086 if (unhandled && nouveau_ratelimit())
1087 nouveau_graph_dump_trap_info(dev,
1088 "PGRAPH_ILLEGAL_MTHD", &trap);
1089 status &= ~0x00000010;
1090 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1091 }
1092
1093 /* ILLEGAL_CLASS: You used a wrong class. */
1094 if (status & 0x00000020) {
1095 nouveau_graph_trap_info(dev, &trap);
1096 if (nouveau_ratelimit())
1097 nouveau_graph_dump_trap_info(dev,
1098 "PGRAPH_ILLEGAL_CLASS", &trap);
1099 status &= ~0x00000020;
1100 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1101 }
1102
1103 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1104 if (status & 0x00000040) {
1105 nouveau_graph_trap_info(dev, &trap);
1106 if (nouveau_ratelimit())
1107 nouveau_graph_dump_trap_info(dev,
1108 "PGRAPH_DOUBLE_NOTIFY", &trap);
1109 status &= ~0x00000040;
1110 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1111 }
1112
1113 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
1114 if (status & 0x00001000) {
1115 nv_wr32(dev, 0x400500, 0x00000000);
1116 nv_wr32(dev, NV03_PGRAPH_INTR,
1117 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1118 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1119 NV40_PGRAPH_INTR_EN) &
1120 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1121 nv_wr32(dev, 0x400500, 0x00010001);
1122
1123 nv50_graph_context_switch(dev);
1124
1125 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1126 }
1127
1128 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1129 if (status & 0x00010000) {
1130 nouveau_graph_trap_info(dev, &trap);
1131 if (nouveau_ratelimit())
1132 nouveau_graph_dump_trap_info(dev,
1133 "PGRAPH_BUFFER_NOTIFY", &trap);
1134 status &= ~0x00010000;
1135 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1136 }
1137
1138 /* DATA_ERROR: Invalid value for this method, or invalid
1139 * state in current PGRAPH context for this operation */
1140 if (status & 0x00100000) {
1141 nouveau_graph_trap_info(dev, &trap);
1142 if (nouveau_ratelimit()) {
1143 nouveau_graph_dump_trap_info(dev,
1144 "PGRAPH_DATA_ERROR", &trap);
1145 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1146 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1147 nv50_data_error_names);
1148 printk("\n");
1149 }
1150 status &= ~0x00100000;
1151 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1152 }
1153 124
1154 /* TRAP: Something bad happened in the middle of command 125 drm_irq_uninstall(dev);
1155 * execution. Has a billion types, subtypes, and even 126 if (dev_priv->msi_enabled)
1156 * subsubtypes. */ 127 pci_disable_msi(dev->pdev);
1157 if (status & 0x00200000) {
1158 nv50_pgraph_trap_handler(dev);
1159 status &= ~0x00200000;
1160 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1161 }
1162
1163 /* Unknown, never seen: 0x00400000 */
1164
1165 /* SINGLE_STEP: Happens on every method if you turned on
1166 * single stepping in 40008c */
1167 if (status & 0x01000000) {
1168 nouveau_graph_trap_info(dev, &trap);
1169 if (nouveau_ratelimit())
1170 nouveau_graph_dump_trap_info(dev,
1171 "PGRAPH_SINGLE_STEP", &trap);
1172 status &= ~0x01000000;
1173 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1174 }
1175
1176 /* 0x02000000 happens when you pause a ctxprog...
1177 * but the only way this can happen that I know is by
1178 * poking the relevant MMIO register, and we don't
1179 * do that. */
1180
1181 if (status) {
1182 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1183 status);
1184 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1185 }
1186
1187 {
1188 const int isb = (1 << 16) | (1 << 0);
1189
1190 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1191 nv_wr32(dev, 0x400500,
1192 nv_rd32(dev, 0x400500) | isb);
1193 }
1194 }
1195
1196 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
1197 if (nv_rd32(dev, 0x400824) & (1 << 31))
1198 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1199} 128}
1200 129
1201static void 130void
1202nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) 131nouveau_irq_register(struct drm_device *dev, int status_bit,
132 void (*handler)(struct drm_device *))
1203{ 133{
1204 if (crtc & 1) 134 struct drm_nouveau_private *dev_priv = dev->dev_private;
1205 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); 135 unsigned long flags;
1206 136
1207 if (crtc & 2) 137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1208 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); 138 dev_priv->irq_handler[status_bit] = handler;
139 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1209} 140}
1210 141
1211irqreturn_t 142void
1212nouveau_irq_handler(DRM_IRQ_ARGS) 143nouveau_irq_unregister(struct drm_device *dev, int status_bit)
1213{ 144{
1214 struct drm_device *dev = (struct drm_device *)arg;
1215 struct drm_nouveau_private *dev_priv = dev->dev_private; 145 struct drm_nouveau_private *dev_priv = dev->dev_private;
1216 uint32_t status;
1217 unsigned long flags; 146 unsigned long flags;
1218 147
1219 status = nv_rd32(dev, NV03_PMC_INTR_0);
1220 if (!status)
1221 return IRQ_NONE;
1222
1223 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 148 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1224 149 dev_priv->irq_handler[status_bit] = NULL;
1225 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1226 nouveau_fifo_irq_handler(dev);
1227 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1228 }
1229
1230 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1231 if (dev_priv->card_type >= NV_50)
1232 nv50_pgraph_irq_handler(dev);
1233 else
1234 nouveau_pgraph_irq_handler(dev);
1235
1236 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1237 }
1238
1239 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1240 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1241 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1242 }
1243
1244 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1245 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1246 nv50_display_irq_handler(dev);
1247 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1248 NV_PMC_INTR_0_NV50_I2C_PENDING);
1249 }
1250
1251 if (status)
1252 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1253
1254 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 150 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1255
1256 return IRQ_HANDLED;
1257} 151}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b42..224181193a1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
36 36
37#include "nouveau_drv.h" 37#include "nouveau_drv.h"
38#include "nouveau_pm.h" 38#include "nouveau_pm.h"
39#include "nouveau_mm.h"
40#include "nouveau_vm.h"
39 41
40/* 42/*
41 * NV10-NV40 tiling helpers 43 * NV10-NV40 tiling helpers
42 */ 44 */
43 45
44static void 46static void
45nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 47nv10_mem_update_tile_region(struct drm_device *dev,
46 uint32_t size, uint32_t pitch) 48 struct nouveau_tile_reg *tile, uint32_t addr,
49 uint32_t size, uint32_t pitch, uint32_t flags)
47{ 50{
48 struct drm_nouveau_private *dev_priv = dev->dev_private; 51 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
52 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 55 int i = tile - dev_priv->tile.reg;
56 unsigned long save;
53 57
54 tile->addr = addr; 58 nouveau_fence_unref(&tile->fence);
55 tile->size = size;
56 tile->used = !!pitch;
57 nouveau_fence_unref((void **)&tile->fence);
58 59
60 if (tile->pitch)
61 pfb->free_tile_region(dev, i);
62
63 if (pitch)
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
59 pfifo->reassign(dev, false); 67 pfifo->reassign(dev, false);
60 pfifo->cache_pull(dev, false); 68 pfifo->cache_pull(dev, false);
61 69
62 nouveau_wait_for_idle(dev); 70 nouveau_wait_for_idle(dev);
63 71
64 pgraph->set_region_tiling(dev, i, addr, size, pitch); 72 pfb->set_tile_region(dev, i);
65 pfb->set_region_tiling(dev, i, addr, size, pitch); 73 pgraph->set_tile_region(dev, i);
66 74
67 pfifo->cache_pull(dev, true); 75 pfifo->cache_pull(dev, true);
68 pfifo->reassign(dev, true); 76 pfifo->reassign(dev, true);
77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
69} 78}
70 79
71struct nouveau_tile_reg * 80static struct nouveau_tile_reg *
72nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 81nv10_mem_get_tile_region(struct drm_device *dev, int i)
73 uint32_t pitch)
74{ 82{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 84 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
77 struct nouveau_tile_reg *found = NULL;
78 unsigned long i, flags;
79
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
81
82 for (i = 0; i < pfb->num_tiles; i++) {
83 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
84
85 if (tile->used)
86 /* Tile region in use. */
87 continue;
88 85
89 if (tile->fence && 86 spin_lock(&dev_priv->tile.lock);
90 !nouveau_fence_signalled(tile->fence, NULL))
91 /* Pending tile region. */
92 continue;
93
94 if (max(tile->addr, addr) <
95 min(tile->addr + tile->size, addr + size))
96 /* Kill an intersecting tile region. */
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
98
99 if (pitch && !found) {
100 /* Free tile region. */
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
102 found = tile;
103 }
104 }
105 87
106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 88 if (!tile->used &&
89 (!tile->fence || nouveau_fence_signalled(tile->fence)))
90 tile->used = true;
91 else
92 tile = NULL;
107 93
108 return found; 94 spin_unlock(&dev_priv->tile.lock);
95 return tile;
109} 96}
110 97
111void 98void
112nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, 99nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
113 struct nouveau_fence *fence) 100 struct nouveau_fence *fence)
114{
115 if (fence) {
116 /* Mark it as pending. */
117 tile->fence = fence;
118 nouveau_fence_ref(fence);
119 }
120
121 tile->used = false;
122}
123
124/*
125 * NV50 VM helpers
126 */
127int
128nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
129 uint32_t flags, uint64_t phys)
130{ 101{
131 struct drm_nouveau_private *dev_priv = dev->dev_private; 102 struct drm_nouveau_private *dev_priv = dev->dev_private;
132 struct nouveau_gpuobj *pgt;
133 unsigned block;
134 int i;
135 103
136 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; 104 if (tile) {
137 size = (size >> 16) << 1; 105 spin_lock(&dev_priv->tile.lock);
138 106 if (fence) {
139 phys |= ((uint64_t)flags << 32); 107 /* Mark it as pending. */
140 phys |= 1; 108 tile->fence = fence;
141 if (dev_priv->vram_sys_base) { 109 nouveau_fence_ref(fence);
142 phys += dev_priv->vram_sys_base;
143 phys |= 0x30;
144 }
145
146 while (size) {
147 unsigned offset_h = upper_32_bits(phys);
148 unsigned offset_l = lower_32_bits(phys);
149 unsigned pte, end;
150
151 for (i = 7; i >= 0; i--) {
152 block = 1 << (i + 1);
153 if (size >= block && !(virt & (block - 1)))
154 break;
155 } 110 }
156 offset_l |= (i << 7);
157
158 phys += block << 15;
159 size -= block;
160
161 while (block) {
162 pgt = dev_priv->vm_vram_pt[virt >> 14];
163 pte = virt & 0x3ffe;
164
165 end = pte + block;
166 if (end > 16384)
167 end = 16384;
168 block -= (end - pte);
169 virt += (end - pte);
170
171 while (pte < end) {
172 nv_wo32(pgt, (pte * 4) + 0, offset_l);
173 nv_wo32(pgt, (pte * 4) + 4, offset_h);
174 pte += 2;
175 }
176 }
177 }
178 111
179 dev_priv->engine.instmem.flush(dev); 112 tile->used = false;
180 dev_priv->engine.fifo.tlb_flush(dev); 113 spin_unlock(&dev_priv->tile.lock);
181 dev_priv->engine.graph.tlb_flush(dev); 114 }
182 nv50_vm_flush(dev, 6);
183 return 0;
184} 115}
185 116
186void 117struct nouveau_tile_reg *
187nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 118nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
119 uint32_t pitch, uint32_t flags)
188{ 120{
189 struct drm_nouveau_private *dev_priv = dev->dev_private; 121 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_gpuobj *pgt; 122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
191 unsigned pages, pte, end; 123 struct nouveau_tile_reg *tile, *found = NULL;
192 124 int i;
193 virt -= dev_priv->vm_vram_base;
194 pages = (size >> 16) << 1;
195 125
196 while (pages) { 126 for (i = 0; i < pfb->num_tiles; i++) {
197 pgt = dev_priv->vm_vram_pt[virt >> 29]; 127 tile = nv10_mem_get_tile_region(dev, i);
198 pte = (virt & 0x1ffe0000ULL) >> 15;
199 128
200 end = pte + pages; 129 if (pitch && !found) {
201 if (end > 16384) 130 found = tile;
202 end = 16384; 131 continue;
203 pages -= (end - pte);
204 virt += (end - pte) << 15;
205 132
206 while (pte < end) { 133 } else if (tile && tile->pitch) {
207 nv_wo32(pgt, (pte * 4), 0); 134 /* Kill an unused tile region. */
208 pte++; 135 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
209 } 136 }
137
138 nv10_mem_put_tile_region(dev, tile, NULL);
210 } 139 }
211 140
212 dev_priv->engine.instmem.flush(dev); 141 if (found)
213 dev_priv->engine.fifo.tlb_flush(dev); 142 nv10_mem_update_tile_region(dev, found, addr, size,
214 dev_priv->engine.graph.tlb_flush(dev); 143 pitch, flags);
215 nv50_vm_flush(dev, 6); 144 return found;
216} 145}
217 146
218/* 147/*
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
312 return 0; 241 return 0;
313} 242}
314 243
315static void 244int
316nv50_vram_preinit(struct drm_device *dev)
317{
318 struct drm_nouveau_private *dev_priv = dev->dev_private;
319 int i, parts, colbits, rowbitsa, rowbitsb, banks;
320 u64 rowsize, predicted;
321 u32 r0, r4, rt, ru;
322
323 r0 = nv_rd32(dev, 0x100200);
324 r4 = nv_rd32(dev, 0x100204);
325 rt = nv_rd32(dev, 0x100250);
326 ru = nv_rd32(dev, 0x001540);
327 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
328
329 for (i = 0, parts = 0; i < 8; i++) {
330 if (ru & (0x00010000 << i))
331 parts++;
332 }
333
334 colbits = (r4 & 0x0000f000) >> 12;
335 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
336 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
337 banks = ((r4 & 0x01000000) ? 8 : 4);
338
339 rowsize = parts * banks * (1 << colbits) * 8;
340 predicted = rowsize << rowbitsa;
341 if (r0 & 0x00000004)
342 predicted += rowsize << rowbitsb;
343
344 if (predicted != dev_priv->vram_size) {
345 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
346 (u32)(dev_priv->vram_size >> 20));
347 NV_WARN(dev, "we calculated %dMiB VRAM\n",
348 (u32)(predicted >> 20));
349 }
350
351 dev_priv->vram_rblock_size = rowsize >> 12;
352 if (rt & 1)
353 dev_priv->vram_rblock_size *= 3;
354
355 NV_DEBUG(dev, "rblock %lld bytes\n",
356 (u64)dev_priv->vram_rblock_size << 12);
357}
358
359static void
360nvaa_vram_preinit(struct drm_device *dev)
361{
362 struct drm_nouveau_private *dev_priv = dev->dev_private;
363
364 /* To our knowledge, there's no large scale reordering of pages
365 * that occurs on IGP chipsets.
366 */
367 dev_priv->vram_rblock_size = 1;
368}
369
370static int
371nouveau_mem_detect(struct drm_device *dev) 245nouveau_mem_detect(struct drm_device *dev)
372{ 246{
373 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,40 +255,25 @@ nouveau_mem_detect(struct drm_device *dev)
381 if (dev_priv->card_type < NV_50) { 255 if (dev_priv->card_type < NV_50) {
382 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 256 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
383 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; 257 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
384 } else
385 if (dev_priv->card_type < NV_C0) {
386 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
387 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
388 dev_priv->vram_size &= 0xffffffff00ll;
389
390 switch (dev_priv->chipset) {
391 case 0xaa:
392 case 0xac:
393 case 0xaf:
394 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
395 dev_priv->vram_sys_base <<= 12;
396 nvaa_vram_preinit(dev);
397 break;
398 default:
399 nv50_vram_preinit(dev);
400 break;
401 }
402 } else { 258 } else {
403 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 259 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
404 dev_priv->vram_size *= nv_rd32(dev, 0x121c74); 260 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
405 } 261 }
406 262
407 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
408 if (dev_priv->vram_sys_base) {
409 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
410 dev_priv->vram_sys_base);
411 }
412
413 if (dev_priv->vram_size) 263 if (dev_priv->vram_size)
414 return 0; 264 return 0;
415 return -ENOMEM; 265 return -ENOMEM;
416} 266}
417 267
268bool
269nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
270{
271 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
272 return true;
273
274 return false;
275}
276
418#if __OS_HAS_AGP 277#if __OS_HAS_AGP
419static unsigned long 278static unsigned long
420get_agp_mode(struct drm_device *dev, unsigned long mode) 279get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +406,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
547 if (ret) 406 if (ret)
548 return ret; 407 return ret;
549 408
550 ret = nouveau_mem_detect(dev);
551 if (ret)
552 return ret;
553
554 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); 409 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
555 410
556 ret = nouveau_ttm_global_init(dev_priv); 411 ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +421,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
566 return ret; 421 return ret;
567 } 422 }
568 423
569 dev_priv->fb_available_size = dev_priv->vram_size;
570 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
571 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
572 dev_priv->fb_mappable_pages =
573 pci_resource_len(dev->pdev, 1);
574 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
575
576 /* reserve space at end of VRAM for PRAMIN */ 424 /* reserve space at end of VRAM for PRAMIN */
577 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || 425 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
578 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) 426 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +431,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
583 else 431 else
584 dev_priv->ramin_rsvd_vram = (512 * 1024); 432 dev_priv->ramin_rsvd_vram = (512 * 1024);
585 433
434 ret = dev_priv->engine.vram.init(dev);
435 if (ret)
436 return ret;
437
438 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
439 if (dev_priv->vram_sys_base) {
440 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
441 dev_priv->vram_sys_base);
442 }
443
444 dev_priv->fb_available_size = dev_priv->vram_size;
445 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
446 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
447 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
448 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
449
586 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 450 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
587 dev_priv->fb_aper_free = dev_priv->fb_available_size; 451 dev_priv->fb_aper_free = dev_priv->fb_available_size;
588 452
@@ -799,3 +663,114 @@ nouveau_mem_timing_fini(struct drm_device *dev)
799 663
800 kfree(mem->timing); 664 kfree(mem->timing);
801} 665}
666
667static int
668nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
669{
670 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
671 struct nouveau_mm *mm;
672 u32 b_size;
673 int ret;
674
675 p_size = (p_size << PAGE_SHIFT) >> 12;
676 b_size = dev_priv->vram_rblock_size >> 12;
677
678 ret = nouveau_mm_init(&mm, 0, p_size, b_size);
679 if (ret)
680 return ret;
681
682 man->priv = mm;
683 return 0;
684}
685
686static int
687nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
688{
689 struct nouveau_mm *mm = man->priv;
690 int ret;
691
692 ret = nouveau_mm_fini(&mm);
693 if (ret)
694 return ret;
695
696 man->priv = NULL;
697 return 0;
698}
699
700static void
701nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
702 struct ttm_mem_reg *mem)
703{
704 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
705 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
706 struct drm_device *dev = dev_priv->dev;
707
708 vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
709}
710
711static int
712nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
713 struct ttm_buffer_object *bo,
714 struct ttm_placement *placement,
715 struct ttm_mem_reg *mem)
716{
717 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
718 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
719 struct drm_device *dev = dev_priv->dev;
720 struct nouveau_bo *nvbo = nouveau_bo(bo);
721 struct nouveau_vram *node;
722 u32 size_nc = 0;
723 int ret;
724
725 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
726 size_nc = 1 << nvbo->vma.node->type;
727
728 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
729 mem->page_alignment << PAGE_SHIFT, size_nc,
730 (nvbo->tile_flags >> 8) & 0xff, &node);
731 if (ret)
732 return ret;
733
734 mem->mm_node = node;
735 mem->start = node->offset >> PAGE_SHIFT;
736 return 0;
737}
738
739void
740nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
741{
742 struct nouveau_mm *mm = man->priv;
743 struct nouveau_mm_node *r;
744 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
745 int i;
746
747 mutex_lock(&mm->mutex);
748 list_for_each_entry(r, &mm->nodes, nl_entry) {
749 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
750 prefix, r->free ? "free" : "used", r->type,
751 ((u64)r->offset << 12),
752 (((u64)r->offset + r->length) << 12));
753 total += r->length;
754 ttotal[r->type] += r->length;
755 if (r->free)
756 tfree[r->type] += r->length;
757 else
758 tused[r->type] += r->length;
759 }
760 mutex_unlock(&mm->mutex);
761
762 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
763 for (i = 0; i < 3; i++) {
764 printk(KERN_DEBUG "%s type %d: 0x%010llx, "
765 "used 0x%010llx, free 0x%010llx\n", prefix,
766 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
767 }
768}
769
770const struct ttm_mem_type_manager_func nouveau_vram_manager = {
771 nouveau_vram_manager_init,
772 nouveau_vram_manager_fini,
773 nouveau_vram_manager_new,
774 nouveau_vram_manager_del,
775 nouveau_vram_manager_debug
776};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 000000000000..cdbb11eb701b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static inline void
30region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
31{
32 list_del(&a->nl_entry);
33 list_del(&a->fl_entry);
34 kfree(a);
35}
36
37static struct nouveau_mm_node *
38region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
39{
40 struct nouveau_mm_node *b;
41
42 if (a->length == size)
43 return a;
44
45 b = kmalloc(sizeof(*b), GFP_KERNEL);
46 if (unlikely(b == NULL))
47 return NULL;
48
49 b->offset = a->offset;
50 b->length = size;
51 b->free = a->free;
52 b->type = a->type;
53 a->offset += size;
54 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free)
57 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b;
59}
60
61static struct nouveau_mm_node *
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88
89void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{
92 u32 block_s, block_l;
93
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this);
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104
105 /* split off any still-typed region at the start */
106 if (block_s != this->offset) {
107 if (!region_split(rmm, this, block_s - this->offset))
108 return;
109 }
110
111 /* split off the soon-to-be-untyped block(s) */
112 block_l = rounddown(this->length, rmm->block_size);
113 if (block_l != this->length) {
114 this = region_split(rmm, this, block_l);
115 if (!this)
116 return;
117 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124}
125
126int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode)
129{
130 struct nouveau_mm_node *this, *tmp, *next;
131 u32 splitoff, avail, alloc;
132
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
135 if (this->nl_entry.next == &rmm->nodes)
136 next = NULL;
137
138 /* skip wrongly typed blocks */
139 if (this->type && this->type != type)
140 continue;
141
142 /* account for alignment */
143 splitoff = this->offset & (align - 1);
144 if (splitoff)
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214
215 this = region_split(rmm, this, alloc);
216 if (this == NULL)
217 return -ENOMEM;
218
219 this->free = false;
220 list_del(&this->fl_entry);
221 *pnode = this;
222 return 0;
223 }
224
225 return -ENOMEM;
226}
227
228int
229nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
230{
231 struct nouveau_mm *rmm;
232 struct nouveau_mm_node *heap;
233
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap)
236 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset;
240
241 rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
242 if (!rmm) {
243 kfree(heap);
244 return -ENOMEM;
245 }
246 rmm->block_size = block;
247 mutex_init(&rmm->mutex);
248 INIT_LIST_HEAD(&rmm->nodes);
249 INIT_LIST_HEAD(&rmm->free);
250 list_add(&heap->nl_entry, &rmm->nodes);
251 list_add(&heap->fl_entry, &rmm->free);
252
253 *prmm = rmm;
254 return 0;
255}
256
257int
258nouveau_mm_fini(struct nouveau_mm **prmm)
259{
260 struct nouveau_mm *rmm = *prmm;
261 struct nouveau_mm_node *heap =
262 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
263
264 if (!list_is_singular(&rmm->nodes))
265 return -EBUSY;
266
267 kfree(heap);
268 kfree(rmm);
269 *prmm = NULL;
270 return 0;
271}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 000000000000..250e642de0a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_REGION_H__
26#define __NOUVEAU_REGION_H__
27
28struct nouveau_mm_node {
29 struct list_head nl_entry;
30 struct list_head fl_entry;
31 struct list_head rl_entry;
32
33 bool free;
34 int type;
35
36 u32 offset;
37 u32 length;
38};
39
40struct nouveau_mm {
41 struct list_head nodes;
42 struct list_head free;
43
44 struct mutex mutex;
45
46 u32 block_size;
47};
48
49int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
50int nouveau_mm_fini(struct nouveau_mm **);
51int nouveau_mm_pre(struct nouveau_mm *);
52int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
53 u32 align, struct nouveau_mm_node **);
54void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
55
56int nv50_vram_init(struct drm_device *);
57int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
58 u32 memtype, struct nouveau_vram **);
59void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
60bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
61
62#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8c658b..a050b7b69782 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t *b_offset)
100{ 100{
101 struct drm_device *dev = chan->dev; 101 struct drm_device *dev = chan->dev;
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 struct nouveau_gpuobj *nobj = NULL; 102 struct nouveau_gpuobj *nobj = NULL;
104 struct drm_mm_node *mem; 103 struct drm_mm_node *mem;
105 uint32_t offset; 104 uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
113 return -ENOMEM; 112 return -ENOMEM;
114 } 113 }
115 114
116 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 115 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 116 target = NV_MEM_TARGET_VRAM;
118 target = NV_DMA_TARGET_VIDMEM; 117 else
119 } else 118 target = NV_MEM_TARGET_GART;
120 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { 119 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
121 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
122 dev_priv->card_type < NV_50) {
123 ret = nouveau_sgdma_get_page(dev, offset, &offset);
124 if (ret)
125 return ret;
126 target = NV_DMA_TARGET_PCI;
127 } else {
128 target = NV_DMA_TARGET_AGP;
129 if (dev_priv->card_type >= NV_50)
130 offset += dev_priv->vm_gart_base;
131 }
132 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
134 chan->notifier_bo->bo.mem.mem_type);
135 return -EINVAL;
136 }
137 offset += mem->start; 120 offset += mem->start;
138 121
139 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, 122 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
140 mem->size, NV_DMA_ACCESS_RW, target, 123 mem->size, NV_MEM_ACCESS_RW, target,
141 &nobj); 124 &nobj);
142 if (ret) { 125 if (ret) {
143 drm_mm_put_block(mem); 126 drm_mm_put_block(mem);
@@ -185,11 +168,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
185 struct nouveau_channel *chan; 168 struct nouveau_channel *chan;
186 int ret; 169 int ret;
187 170
188 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); 171 chan = nouveau_channel_get(dev, file_priv, na->channel);
172 if (IS_ERR(chan))
173 return PTR_ERR(chan);
189 174
190 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 175 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
191 if (ret) 176 nouveau_channel_put(&chan);
192 return ret; 177 return ret;
193
194 return 0;
195} 178}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572adca02a..55c9fdcfa67f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_ramht.h" 37#include "nouveau_ramht.h"
38#include "nouveau_vm.h"
39
40struct nouveau_gpuobj_method {
41 struct list_head head;
42 u32 mthd;
43 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
44};
45
46struct nouveau_gpuobj_class {
47 struct list_head head;
48 struct list_head methods;
49 u32 id;
50 u32 engine;
51};
52
53int
54nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
55{
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 struct nouveau_gpuobj_class *oc;
58
59 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
60 if (!oc)
61 return -ENOMEM;
62
63 INIT_LIST_HEAD(&oc->methods);
64 oc->id = class;
65 oc->engine = engine;
66 list_add(&oc->head, &dev_priv->classes);
67 return 0;
68}
69
70int
71nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
72 int (*exec)(struct nouveau_channel *, u32, u32, u32))
73{
74 struct drm_nouveau_private *dev_priv = dev->dev_private;
75 struct nouveau_gpuobj_method *om;
76 struct nouveau_gpuobj_class *oc;
77
78 list_for_each_entry(oc, &dev_priv->classes, head) {
79 if (oc->id == class)
80 goto found;
81 }
82
83 return -EINVAL;
84
85found:
86 om = kzalloc(sizeof(*om), GFP_KERNEL);
87 if (!om)
88 return -ENOMEM;
89
90 om->mthd = mthd;
91 om->exec = exec;
92 list_add(&om->head, &oc->methods);
93 return 0;
94}
95
96int
97nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
98 u32 class, u32 mthd, u32 data)
99{
100 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
101 struct nouveau_gpuobj_method *om;
102 struct nouveau_gpuobj_class *oc;
103
104 list_for_each_entry(oc, &dev_priv->classes, head) {
105 if (oc->id != class)
106 continue;
107
108 list_for_each_entry(om, &oc->methods, head) {
109 if (om->mthd == mthd)
110 return om->exec(chan, class, mthd, data);
111 }
112 }
113
114 return -ENOENT;
115}
116
117int
118nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
119 u32 class, u32 mthd, u32 data)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_channel *chan = NULL;
123 unsigned long flags;
124 int ret = -EINVAL;
125
126 spin_lock_irqsave(&dev_priv->channels.lock, flags);
127 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
128 chan = dev_priv->channels.ptr[chid];
129 if (chan)
130 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
131 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
132 return ret;
133}
38 134
39/* NVidia uses context objects to drive drawing operations. 135/* NVidia uses context objects to drive drawing operations.
40 136
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
73 struct nouveau_gpuobj **gpuobj_ret) 169 struct nouveau_gpuobj **gpuobj_ret)
74{ 170{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 171 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine; 172 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
77 struct nouveau_gpuobj *gpuobj; 173 struct nouveau_gpuobj *gpuobj;
78 struct drm_mm_node *ramin = NULL; 174 struct drm_mm_node *ramin = NULL;
79 int ret; 175 int ret, i;
80 176
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", 177 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags); 178 chan ? chan->id : -1, size, align, flags);
83 179
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 180 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj) 181 if (!gpuobj)
89 return -ENOMEM; 182 return -ENOMEM;
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
98 spin_unlock(&dev_priv->ramin_lock); 191 spin_unlock(&dev_priv->ramin_lock);
99 192
100 if (chan) { 193 if (chan) {
101 NV_DEBUG(dev, "channel heap\n");
102
103 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); 194 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
104 if (ramin) 195 if (ramin)
105 ramin = drm_mm_get_block(ramin, size, align); 196 ramin = drm_mm_get_block(ramin, size, align);
106
107 if (!ramin) { 197 if (!ramin) {
108 nouveau_gpuobj_ref(NULL, &gpuobj); 198 nouveau_gpuobj_ref(NULL, &gpuobj);
109 return -ENOMEM; 199 return -ENOMEM;
110 } 200 }
111 } else {
112 NV_DEBUG(dev, "global heap\n");
113
114 /* allocate backing pages, sets vinst */
115 ret = engine->instmem.populate(dev, gpuobj, &size);
116 if (ret) {
117 nouveau_gpuobj_ref(NULL, &gpuobj);
118 return ret;
119 }
120
121 /* try and get aperture space */
122 do {
123 if (drm_mm_pre_get(&dev_priv->ramin_heap))
124 return -ENOMEM;
125 201
126 spin_lock(&dev_priv->ramin_lock); 202 gpuobj->pinst = chan->ramin->pinst;
127 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, 203 if (gpuobj->pinst != ~0)
128 align, 0); 204 gpuobj->pinst += ramin->start;
129 if (ramin == NULL) {
130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj);
132 return -ENOMEM;
133 }
134
135 ramin = drm_mm_get_block_atomic(ramin, size, align);
136 spin_unlock(&dev_priv->ramin_lock);
137 } while (ramin == NULL);
138
139 /* on nv50 it's ok to fail, we have a fallback path */
140 if (!ramin && dev_priv->card_type < NV_50) {
141 nouveau_gpuobj_ref(NULL, &gpuobj);
142 return -ENOMEM;
143 }
144 }
145 205
146 /* if we got a chunk of the aperture, map pages into it */ 206 gpuobj->cinst = ramin->start;
147 gpuobj->im_pramin = ramin; 207 gpuobj->vinst = ramin->start + chan->ramin->vinst;
148 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { 208 gpuobj->node = ramin;
149 ret = engine->instmem.bind(dev, gpuobj); 209 } else {
210 ret = instmem->get(gpuobj, size, align);
150 if (ret) { 211 if (ret) {
151 nouveau_gpuobj_ref(NULL, &gpuobj); 212 nouveau_gpuobj_ref(NULL, &gpuobj);
152 return ret; 213 return ret;
153 } 214 }
154 }
155 215
156 /* calculate the various different addresses for the object */ 216 ret = -ENOSYS;
157 if (chan) { 217 if (!(flags & NVOBJ_FLAG_DONT_MAP))
158 gpuobj->pinst = chan->ramin->pinst; 218 ret = instmem->map(gpuobj);
159 if (gpuobj->pinst != ~0) 219 if (ret)
160 gpuobj->pinst += gpuobj->im_pramin->start;
161
162 if (dev_priv->card_type < NV_50) {
163 gpuobj->cinst = gpuobj->pinst;
164 } else {
165 gpuobj->cinst = gpuobj->im_pramin->start;
166 gpuobj->vinst = gpuobj->im_pramin->start +
167 chan->ramin->vinst;
168 }
169 } else {
170 if (gpuobj->im_pramin)
171 gpuobj->pinst = gpuobj->im_pramin->start;
172 else
173 gpuobj->pinst = ~0; 220 gpuobj->pinst = ~0;
174 gpuobj->cinst = 0xdeadbeef; 221
222 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
175 } 223 }
176 224
177 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 225 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
178 int i;
179
180 for (i = 0; i < gpuobj->size; i += 4) 226 for (i = 0; i < gpuobj->size; i += 4)
181 nv_wo32(gpuobj, i, 0); 227 nv_wo32(gpuobj, i, 0);
182 engine->instmem.flush(dev); 228 instmem->flush(dev);
183 } 229 }
184 230
185 231
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev)
195 NV_DEBUG(dev, "\n"); 241 NV_DEBUG(dev, "\n");
196 242
197 INIT_LIST_HEAD(&dev_priv->gpuobj_list); 243 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
244 INIT_LIST_HEAD(&dev_priv->classes);
198 spin_lock_init(&dev_priv->ramin_lock); 245 spin_lock_init(&dev_priv->ramin_lock);
199 dev_priv->ramin_base = ~0; 246 dev_priv->ramin_base = ~0;
200 247
@@ -205,9 +252,20 @@ void
205nouveau_gpuobj_takedown(struct drm_device *dev) 252nouveau_gpuobj_takedown(struct drm_device *dev)
206{ 253{
207 struct drm_nouveau_private *dev_priv = dev->dev_private; 254 struct drm_nouveau_private *dev_priv = dev->dev_private;
255 struct nouveau_gpuobj_method *om, *tm;
256 struct nouveau_gpuobj_class *oc, *tc;
208 257
209 NV_DEBUG(dev, "\n"); 258 NV_DEBUG(dev, "\n");
210 259
260 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
261 list_for_each_entry_safe(om, tm, &oc->methods, head) {
262 list_del(&om->head);
263 kfree(om);
264 }
265 list_del(&oc->head);
266 kfree(oc);
267 }
268
211 BUG_ON(!list_empty(&dev_priv->gpuobj_list)); 269 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
212} 270}
213 271
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref)
219 container_of(ref, struct nouveau_gpuobj, refcount); 277 container_of(ref, struct nouveau_gpuobj, refcount);
220 struct drm_device *dev = gpuobj->dev; 278 struct drm_device *dev = gpuobj->dev;
221 struct drm_nouveau_private *dev_priv = dev->dev_private; 279 struct drm_nouveau_private *dev_priv = dev->dev_private;
222 struct nouveau_engine *engine = &dev_priv->engine; 280 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
223 int i; 281 int i;
224 282
225 NV_DEBUG(dev, "gpuobj %p\n", gpuobj); 283 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
226 284
227 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 285 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
228 for (i = 0; i < gpuobj->size; i += 4) 286 for (i = 0; i < gpuobj->size; i += 4)
229 nv_wo32(gpuobj, i, 0); 287 nv_wo32(gpuobj, i, 0);
230 engine->instmem.flush(dev); 288 instmem->flush(dev);
231 } 289 }
232 290
233 if (gpuobj->dtor) 291 if (gpuobj->dtor)
234 gpuobj->dtor(dev, gpuobj); 292 gpuobj->dtor(dev, gpuobj);
235 293
236 if (gpuobj->im_backing) 294 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
237 engine->instmem.clear(dev, gpuobj); 295 if (gpuobj->node) {
296 instmem->unmap(gpuobj);
297 instmem->put(gpuobj);
298 }
299 } else {
300 if (gpuobj->node) {
301 spin_lock(&dev_priv->ramin_lock);
302 drm_mm_put_block(gpuobj->node);
303 spin_unlock(&dev_priv->ramin_lock);
304 }
305 }
238 306
239 spin_lock(&dev_priv->ramin_lock); 307 spin_lock(&dev_priv->ramin_lock);
240 if (gpuobj->im_pramin)
241 drm_mm_put_block(gpuobj->im_pramin);
242 list_del(&gpuobj->list); 308 list_del(&gpuobj->list);
243 spin_unlock(&dev_priv->ramin_lock); 309 spin_unlock(&dev_priv->ramin_lock);
244 310
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
278 kref_init(&gpuobj->refcount); 344 kref_init(&gpuobj->refcount);
279 gpuobj->size = size; 345 gpuobj->size = size;
280 gpuobj->pinst = pinst; 346 gpuobj->pinst = pinst;
281 gpuobj->cinst = 0xdeadbeef; 347 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
282 gpuobj->vinst = vinst; 348 gpuobj->vinst = vinst;
283 349
284 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 350 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
335 The method below creates a DMA object in instance RAM and returns a handle 401 The method below creates a DMA object in instance RAM and returns a handle
336 to it that can be used to set up context objects. 402 to it that can be used to set up context objects.
337*/ 403*/
338int 404
339nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, 405void
340 uint64_t offset, uint64_t size, int access, 406nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
341 int target, struct nouveau_gpuobj **gpuobj) 407 u64 base, u64 size, int target, int access,
408 u32 type, u32 comp)
342{ 409{
343 struct drm_device *dev = chan->dev; 410 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
344 struct drm_nouveau_private *dev_priv = dev->dev_private; 411 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
345 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 412 u32 flags0;
346 int ret;
347 413
348 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", 414 flags0 = (comp << 29) | (type << 22) | class;
349 chan->id, class, offset, size); 415 flags0 |= 0x00100000;
350 NV_DEBUG(dev, "access=%d target=%d\n", access, target); 416
417 switch (access) {
418 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
419 case NV_MEM_ACCESS_RW:
420 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
421 default:
422 break;
423 }
351 424
352 switch (target) { 425 switch (target) {
353 case NV_DMA_TARGET_AGP: 426 case NV_MEM_TARGET_VRAM:
354 offset += dev_priv->gart_info.aper_base; 427 flags0 |= 0x00010000;
428 break;
429 case NV_MEM_TARGET_PCI:
430 flags0 |= 0x00020000;
431 break;
432 case NV_MEM_TARGET_PCI_NOSNOOP:
433 flags0 |= 0x00030000;
355 break; 434 break;
435 case NV_MEM_TARGET_GART:
436 base += dev_priv->gart_info.aper_base;
356 default: 437 default:
438 flags0 &= ~0x00100000;
357 break; 439 break;
358 } 440 }
359 441
360 ret = nouveau_gpuobj_new(dev, chan, 442 /* convert to base + limit */
361 nouveau_gpuobj_class_instmem_size(dev, class), 443 size = (base + size) - 1;
362 16, NVOBJ_FLAG_ZERO_ALLOC |
363 NVOBJ_FLAG_ZERO_FREE, gpuobj);
364 if (ret) {
365 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
366 return ret;
367 }
368 444
369 if (dev_priv->card_type < NV_50) { 445 nv_wo32(obj, offset + 0x00, flags0);
370 uint32_t frame, adjust, pte_flags = 0; 446 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
371 447 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
372 if (access != NV_DMA_ACCESS_RO) 448 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
373 pte_flags |= (1<<1); 449 upper_32_bits(base));
374 adjust = offset & 0x00000fff; 450 nv_wo32(obj, offset + 0x10, 0x00000000);
375 frame = offset & ~0x00000fff; 451 nv_wo32(obj, offset + 0x14, 0x00000000);
376
377 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
378 (access << 14) | (target << 16) |
379 class));
380 nv_wo32(*gpuobj, 4, size - 1);
381 nv_wo32(*gpuobj, 8, frame | pte_flags);
382 nv_wo32(*gpuobj, 12, frame | pte_flags);
383 } else {
384 uint64_t limit = offset + size - 1;
385 uint32_t flags0, flags5;
386 452
387 if (target == NV_DMA_TARGET_VIDMEM) { 453 pinstmem->flush(obj->dev);
388 flags0 = 0x00190000; 454}
389 flags5 = 0x00010000;
390 } else {
391 flags0 = 0x7fc00000;
392 flags5 = 0x00080000;
393 }
394 455
395 nv_wo32(*gpuobj, 0, flags0 | class); 456int
396 nv_wo32(*gpuobj, 4, lower_32_bits(limit)); 457nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
397 nv_wo32(*gpuobj, 8, lower_32_bits(offset)); 458 int target, int access, u32 type, u32 comp,
398 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | 459 struct nouveau_gpuobj **pobj)
399 (upper_32_bits(offset) & 0xff)); 460{
400 nv_wo32(*gpuobj, 20, flags5); 461 struct drm_device *dev = chan->dev;
401 } 462 int ret;
402 463
403 instmem->flush(dev); 464 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
465 if (ret)
466 return ret;
404 467
405 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 468 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
406 (*gpuobj)->class = class; 469 access, type, comp);
407 return 0; 470 return 0;
408} 471}
409 472
410int 473int
411nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, 474nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
412 uint64_t offset, uint64_t size, int access, 475 u64 size, int access, int target,
413 struct nouveau_gpuobj **gpuobj, 476 struct nouveau_gpuobj **pobj)
414 uint32_t *o_ret)
415{ 477{
478 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
416 struct drm_device *dev = chan->dev; 479 struct drm_device *dev = chan->dev;
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 480 struct nouveau_gpuobj *obj;
481 u32 flags0, flags2;
418 int ret; 482 int ret;
419 483
420 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || 484 if (dev_priv->card_type >= NV_50) {
421 (dev_priv->card_type >= NV_50 && 485 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
422 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { 486 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
423 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 487
424 offset + dev_priv->vm_gart_base, 488 return nv50_gpuobj_dma_new(chan, class, base, size,
425 size, access, NV_DMA_TARGET_AGP, 489 target, access, type, comp, pobj);
426 gpuobj); 490 }
427 if (o_ret) 491
428 *o_ret = 0; 492 if (target == NV_MEM_TARGET_GART) {
429 } else 493 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
430 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 494 target = NV_MEM_TARGET_PCI_NOSNOOP;
431 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); 495 base += dev_priv->gart_info.aper_base;
432 if (offset & ~0xffffffffULL) { 496 } else
433 NV_ERROR(dev, "obj offset exceeds 32-bits\n"); 497 if (base != 0) {
434 return -EINVAL; 498 base = nouveau_sgdma_get_physical(dev, base);
499 target = NV_MEM_TARGET_PCI;
500 } else {
501 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
502 return 0;
435 } 503 }
436 if (o_ret)
437 *o_ret = (uint32_t)offset;
438 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
439 } else {
440 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
441 return -EINVAL;
442 } 504 }
443 505
444 return ret; 506 flags0 = class;
507 flags0 |= 0x00003000; /* PT present, PT linear */
508 flags2 = 0;
509
510 switch (target) {
511 case NV_MEM_TARGET_PCI:
512 flags0 |= 0x00020000;
513 break;
514 case NV_MEM_TARGET_PCI_NOSNOOP:
515 flags0 |= 0x00030000;
516 break;
517 default:
518 break;
519 }
520
521 switch (access) {
522 case NV_MEM_ACCESS_RO:
523 flags0 |= 0x00004000;
524 break;
525 case NV_MEM_ACCESS_WO:
526 flags0 |= 0x00008000;
527 default:
528 flags2 |= 0x00000002;
529 break;
530 }
531
532 flags0 |= (base & 0x00000fff) << 20;
533 flags2 |= (base & 0xfffff000);
534
535 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
536 if (ret)
537 return ret;
538
539 nv_wo32(obj, 0x00, flags0);
540 nv_wo32(obj, 0x04, size - 1);
541 nv_wo32(obj, 0x08, flags2);
542 nv_wo32(obj, 0x0c, flags2);
543
544 obj->engine = NVOBJ_ENGINE_SW;
545 obj->class = class;
546 *pobj = obj;
547 return 0;
445} 548}
446 549
447/* Context objects in the instance RAM have the following structure. 550/* Context objects in the instance RAM have the following structure.
@@ -495,82 +598,122 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
495 entry[5]: 598 entry[5]:
496 set to 0? 599 set to 0?
497*/ 600*/
601static int
602nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
603 struct nouveau_gpuobj **gpuobj_ret)
604{
605 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
606 struct nouveau_gpuobj *gpuobj;
607
608 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
609 if (!gpuobj)
610 return -ENOMEM;
611 gpuobj->dev = chan->dev;
612 gpuobj->engine = NVOBJ_ENGINE_SW;
613 gpuobj->class = class;
614 kref_init(&gpuobj->refcount);
615 gpuobj->cinst = 0x40;
616
617 spin_lock(&dev_priv->ramin_lock);
618 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
619 spin_unlock(&dev_priv->ramin_lock);
620 *gpuobj_ret = gpuobj;
621 return 0;
622}
623
498int 624int
499nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, 625nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
500 struct nouveau_gpuobj **gpuobj)
501{ 626{
627 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
502 struct drm_device *dev = chan->dev; 628 struct drm_device *dev = chan->dev;
503 struct drm_nouveau_private *dev_priv = dev->dev_private; 629 struct nouveau_gpuobj_class *oc;
630 struct nouveau_gpuobj *gpuobj;
504 int ret; 631 int ret;
505 632
506 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); 633 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
507 634
635 list_for_each_entry(oc, &dev_priv->classes, head) {
636 if (oc->id == class)
637 goto found;
638 }
639
640 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
641 return -EINVAL;
642
643found:
644 switch (oc->engine) {
645 case NVOBJ_ENGINE_SW:
646 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
647 if (ret)
648 return ret;
649 goto insert;
650 case NVOBJ_ENGINE_GR:
651 if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) {
652 struct nouveau_pgraph_engine *pgraph =
653 &dev_priv->engine.graph;
654
655 ret = pgraph->create_context(chan);
656 if (ret)
657 return ret;
658 }
659 break;
660 case NVOBJ_ENGINE_CRYPT:
661 if (!chan->crypt_ctx) {
662 struct nouveau_crypt_engine *pcrypt =
663 &dev_priv->engine.crypt;
664
665 ret = pcrypt->create_context(chan);
666 if (ret)
667 return ret;
668 }
669 break;
670 }
671
508 ret = nouveau_gpuobj_new(dev, chan, 672 ret = nouveau_gpuobj_new(dev, chan,
509 nouveau_gpuobj_class_instmem_size(dev, class), 673 nouveau_gpuobj_class_instmem_size(dev, class),
510 16, 674 16,
511 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 675 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
512 gpuobj); 676 &gpuobj);
513 if (ret) { 677 if (ret) {
514 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); 678 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
515 return ret; 679 return ret;
516 } 680 }
517 681
518 if (dev_priv->card_type >= NV_50) { 682 if (dev_priv->card_type >= NV_50) {
519 nv_wo32(*gpuobj, 0, class); 683 nv_wo32(gpuobj, 0, class);
520 nv_wo32(*gpuobj, 20, 0x00010000); 684 nv_wo32(gpuobj, 20, 0x00010000);
521 } else { 685 } else {
522 switch (class) { 686 switch (class) {
523 case NV_CLASS_NULL: 687 case NV_CLASS_NULL:
524 nv_wo32(*gpuobj, 0, 0x00001030); 688 nv_wo32(gpuobj, 0, 0x00001030);
525 nv_wo32(*gpuobj, 4, 0xFFFFFFFF); 689 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
526 break; 690 break;
527 default: 691 default:
528 if (dev_priv->card_type >= NV_40) { 692 if (dev_priv->card_type >= NV_40) {
529 nv_wo32(*gpuobj, 0, class); 693 nv_wo32(gpuobj, 0, class);
530#ifdef __BIG_ENDIAN 694#ifdef __BIG_ENDIAN
531 nv_wo32(*gpuobj, 8, 0x01000000); 695 nv_wo32(gpuobj, 8, 0x01000000);
532#endif 696#endif
533 } else { 697 } else {
534#ifdef __BIG_ENDIAN 698#ifdef __BIG_ENDIAN
535 nv_wo32(*gpuobj, 0, class | 0x00080000); 699 nv_wo32(gpuobj, 0, class | 0x00080000);
536#else 700#else
537 nv_wo32(*gpuobj, 0, class); 701 nv_wo32(gpuobj, 0, class);
538#endif 702#endif
539 } 703 }
540 } 704 }
541 } 705 }
542 dev_priv->engine.instmem.flush(dev); 706 dev_priv->engine.instmem.flush(dev);
543 707
544 (*gpuobj)->engine = NVOBJ_ENGINE_GR; 708 gpuobj->engine = oc->engine;
545 (*gpuobj)->class = class; 709 gpuobj->class = oc->id;
546 return 0;
547}
548
549int
550nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
551 struct nouveau_gpuobj **gpuobj_ret)
552{
553 struct drm_nouveau_private *dev_priv;
554 struct nouveau_gpuobj *gpuobj;
555
556 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
557 return -EINVAL;
558 dev_priv = chan->dev->dev_private;
559
560 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
561 if (!gpuobj)
562 return -ENOMEM;
563 gpuobj->dev = chan->dev;
564 gpuobj->engine = NVOBJ_ENGINE_SW;
565 gpuobj->class = class;
566 kref_init(&gpuobj->refcount);
567 gpuobj->cinst = 0x40;
568 710
569 spin_lock(&dev_priv->ramin_lock); 711insert:
570 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 712 ret = nouveau_ramht_insert(chan, handle, gpuobj);
571 spin_unlock(&dev_priv->ramin_lock); 713 if (ret)
572 *gpuobj_ret = gpuobj; 714 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
573 return 0; 715 nouveau_gpuobj_ref(NULL, &gpuobj);
716 return ret;
574} 717}
575 718
576static int 719static int
@@ -585,7 +728,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
585 NV_DEBUG(dev, "ch%d\n", chan->id); 728 NV_DEBUG(dev, "ch%d\n", chan->id);
586 729
587 /* Base amount for object storage (4KiB enough?) */ 730 /* Base amount for object storage (4KiB enough?) */
588 size = 0x1000; 731 size = 0x2000;
589 base = 0; 732 base = 0;
590 733
591 /* PGRAPH context */ 734 /* PGRAPH context */
@@ -624,9 +767,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
624{ 767{
625 struct drm_device *dev = chan->dev; 768 struct drm_device *dev = chan->dev;
626 struct drm_nouveau_private *dev_priv = dev->dev_private; 769 struct drm_nouveau_private *dev_priv = dev->dev_private;
627 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
628 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 770 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
629 int ret, i; 771 int ret;
630 772
631 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 773 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
632 774
@@ -637,16 +779,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
637 return ret; 779 return ret;
638 } 780 }
639 781
640 /* NV50 VM 782 /* NV50/NVC0 VM
641 * - Allocate per-channel page-directory 783 * - Allocate per-channel page-directory
642 * - Map GART and VRAM into the channel's address space at the 784 * - Link with shared channel VM
643 * locations determined during init.
644 */ 785 */
645 if (dev_priv->card_type >= NV_50) { 786 if (dev_priv->chan_vm) {
646 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 787 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
647 u64 vm_vinst = chan->ramin->vinst + pgd_offs; 788 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
648 u32 vm_pinst = chan->ramin->pinst; 789 u32 vm_pinst = chan->ramin->pinst;
649 u32 pde;
650 790
651 if (vm_pinst != ~0) 791 if (vm_pinst != ~0)
652 vm_pinst += pgd_offs; 792 vm_pinst += pgd_offs;
@@ -655,29 +795,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
655 0, &chan->vm_pd); 795 0, &chan->vm_pd);
656 if (ret) 796 if (ret)
657 return ret; 797 return ret;
658 for (i = 0; i < 0x4000; i += 8) {
659 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
660 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
661 }
662
663 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
664 &chan->vm_gart_pt);
665 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
666 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
667 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
668
669 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
670 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
671 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
672 &chan->vm_vram_pt[i]);
673
674 nv_wo32(chan->vm_pd, pde + 0,
675 chan->vm_vram_pt[i]->vinst | 0x61);
676 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
677 pde += 8;
678 }
679 798
680 instmem->flush(dev); 799 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
681 } 800 }
682 801
683 /* RAMHT */ 802 /* RAMHT */
@@ -700,9 +819,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
700 /* VRAM ctxdma */ 819 /* VRAM ctxdma */
701 if (dev_priv->card_type >= NV_50) { 820 if (dev_priv->card_type >= NV_50) {
702 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 821 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
703 0, dev_priv->vm_end, 822 0, (1ULL << 40), NV_MEM_ACCESS_RW,
704 NV_DMA_ACCESS_RW, 823 NV_MEM_TARGET_VM, &vram);
705 NV_DMA_TARGET_AGP, &vram);
706 if (ret) { 824 if (ret) {
707 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 825 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
708 return ret; 826 return ret;
@@ -710,8 +828,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
710 } else { 828 } else {
711 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 829 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
712 0, dev_priv->fb_available_size, 830 0, dev_priv->fb_available_size,
713 NV_DMA_ACCESS_RW, 831 NV_MEM_ACCESS_RW,
714 NV_DMA_TARGET_VIDMEM, &vram); 832 NV_MEM_TARGET_VRAM, &vram);
715 if (ret) { 833 if (ret) {
716 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 834 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
717 return ret; 835 return ret;
@@ -728,21 +846,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
728 /* TT memory ctxdma */ 846 /* TT memory ctxdma */
729 if (dev_priv->card_type >= NV_50) { 847 if (dev_priv->card_type >= NV_50) {
730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
731 0, dev_priv->vm_end, 849 0, (1ULL << 40), NV_MEM_ACCESS_RW,
732 NV_DMA_ACCESS_RW, 850 NV_MEM_TARGET_VM, &tt);
733 NV_DMA_TARGET_AGP, &tt);
734 if (ret) {
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
736 return ret;
737 }
738 } else
739 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
740 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
741 dev_priv->gart_info.aper_size,
742 NV_DMA_ACCESS_RW, &tt, NULL);
743 } else { 851 } else {
744 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); 852 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
745 ret = -EINVAL; 853 0, dev_priv->gart_info.aper_size,
854 NV_MEM_ACCESS_RW,
855 NV_MEM_TARGET_GART, &tt);
746 } 856 }
747 857
748 if (ret) { 858 if (ret) {
@@ -763,9 +873,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
763void 873void
764nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 874nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
765{ 875{
766 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
767 struct drm_device *dev = chan->dev; 876 struct drm_device *dev = chan->dev;
768 int i;
769 877
770 NV_DEBUG(dev, "ch%d\n", chan->id); 878 NV_DEBUG(dev, "ch%d\n", chan->id);
771 879
@@ -774,10 +882,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
774 882
775 nouveau_ramht_ref(NULL, &chan->ramht, chan); 883 nouveau_ramht_ref(NULL, &chan->ramht, chan);
776 884
885 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
777 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 886 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
778 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
779 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
780 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
781 887
782 if (chan->ramin_heap.free_stack.next) 888 if (chan->ramin_heap.free_stack.next)
783 drm_mm_takedown(&chan->ramin_heap); 889 drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +897,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
791 struct nouveau_gpuobj *gpuobj; 897 struct nouveau_gpuobj *gpuobj;
792 int i; 898 int i;
793 899
794 if (dev_priv->card_type < NV_50) {
795 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
796 if (!dev_priv->susres.ramin_copy)
797 return -ENOMEM;
798
799 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
800 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
801 return 0;
802 }
803
804 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { 900 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
805 if (!gpuobj->im_backing) 901 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
806 continue; 902 continue;
807 903
808 gpuobj->im_backing_suspend = vmalloc(gpuobj->size); 904 gpuobj->suspend = vmalloc(gpuobj->size);
809 if (!gpuobj->im_backing_suspend) { 905 if (!gpuobj->suspend) {
810 nouveau_gpuobj_resume(dev); 906 nouveau_gpuobj_resume(dev);
811 return -ENOMEM; 907 return -ENOMEM;
812 } 908 }
813 909
814 for (i = 0; i < gpuobj->size; i += 4) 910 for (i = 0; i < gpuobj->size; i += 4)
815 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); 911 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
816 } 912 }
817 913
818 return 0; 914 return 0;
819} 915}
820 916
821void 917void
822nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
823{
824 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 struct nouveau_gpuobj *gpuobj;
826
827 if (dev_priv->card_type < NV_50) {
828 vfree(dev_priv->susres.ramin_copy);
829 dev_priv->susres.ramin_copy = NULL;
830 return;
831 }
832
833 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
834 if (!gpuobj->im_backing_suspend)
835 continue;
836
837 vfree(gpuobj->im_backing_suspend);
838 gpuobj->im_backing_suspend = NULL;
839 }
840}
841
842void
843nouveau_gpuobj_resume(struct drm_device *dev) 918nouveau_gpuobj_resume(struct drm_device *dev)
844{ 919{
845 struct drm_nouveau_private *dev_priv = dev->dev_private; 920 struct drm_nouveau_private *dev_priv = dev->dev_private;
846 struct nouveau_gpuobj *gpuobj; 921 struct nouveau_gpuobj *gpuobj;
847 int i; 922 int i;
848 923
849 if (dev_priv->card_type < NV_50) {
850 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
851 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
852 nouveau_gpuobj_suspend_cleanup(dev);
853 return;
854 }
855
856 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { 924 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
857 if (!gpuobj->im_backing_suspend) 925 if (!gpuobj->suspend)
858 continue; 926 continue;
859 927
860 for (i = 0; i < gpuobj->size; i += 4) 928 for (i = 0; i < gpuobj->size; i += 4)
861 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); 929 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
862 dev_priv->engine.instmem.flush(dev); 930
931 vfree(gpuobj->suspend);
932 gpuobj->suspend = NULL;
863 } 933 }
864 934
865 nouveau_gpuobj_suspend_cleanup(dev); 935 dev_priv->engine.instmem.flush(dev);
866} 936}
867 937
868int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, 938int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
869 struct drm_file *file_priv) 939 struct drm_file *file_priv)
870{ 940{
871 struct drm_nouveau_private *dev_priv = dev->dev_private;
872 struct drm_nouveau_grobj_alloc *init = data; 941 struct drm_nouveau_grobj_alloc *init = data;
873 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
874 struct nouveau_pgraph_object_class *grc;
875 struct nouveau_gpuobj *gr = NULL;
876 struct nouveau_channel *chan; 942 struct nouveau_channel *chan;
877 int ret; 943 int ret;
878 944
879 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
880
881 if (init->handle == ~0) 945 if (init->handle == ~0)
882 return -EINVAL; 946 return -EINVAL;
883 947
884 grc = pgraph->grclass; 948 chan = nouveau_channel_get(dev, file_priv, init->channel);
885 while (grc->id) { 949 if (IS_ERR(chan))
886 if (grc->id == init->class) 950 return PTR_ERR(chan);
887 break;
888 grc++;
889 }
890 951
891 if (!grc->id) { 952 if (nouveau_ramht_find(chan, init->handle)) {
892 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); 953 ret = -EEXIST;
893 return -EPERM; 954 goto out;
894 } 955 }
895 956
896 if (nouveau_ramht_find(chan, init->handle)) 957 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
897 return -EEXIST;
898
899 if (!grc->software)
900 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
901 else
902 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
903 if (ret) { 958 if (ret) {
904 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", 959 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
905 ret, init->channel, init->handle); 960 ret, init->channel, init->handle);
906 return ret;
907 }
908
909 ret = nouveau_ramht_insert(chan, init->handle, gr);
910 nouveau_gpuobj_ref(NULL, &gr);
911 if (ret) {
912 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
913 ret, init->channel, init->handle);
914 return ret;
915 } 961 }
916 962
917 return 0; 963out:
964 nouveau_channel_put(&chan);
965 return ret;
918} 966}
919 967
920int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, 968int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
921 struct drm_file *file_priv) 969 struct drm_file *file_priv)
922{ 970{
923 struct drm_nouveau_gpuobj_free *objfree = data; 971 struct drm_nouveau_gpuobj_free *objfree = data;
924 struct nouveau_gpuobj *gpuobj;
925 struct nouveau_channel *chan; 972 struct nouveau_channel *chan;
973 int ret;
926 974
927 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); 975 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
976 if (IS_ERR(chan))
977 return PTR_ERR(chan);
928 978
929 gpuobj = nouveau_ramht_find(chan, objfree->handle); 979 /* Synchronize with the user channel */
930 if (!gpuobj) 980 nouveau_channel_idle(chan);
931 return -ENOENT;
932 981
933 nouveau_ramht_remove(chan, objfree->handle); 982 ret = nouveau_ramht_remove(chan, objfree->handle);
934 return 0; 983 nouveau_channel_put(&chan);
984 return ret;
935} 985}
936 986
937u32 987u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158f5825..d93814160bcf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30#ifdef CONFIG_ACPI
31#include <linux/acpi.h>
32#endif
33#include <linux/power_supply.h>
30#include <linux/hwmon.h> 34#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
32 36
@@ -446,6 +450,25 @@ nouveau_hwmon_fini(struct drm_device *dev)
446#endif 450#endif
447} 451}
448 452
453#ifdef CONFIG_ACPI
454static int
455nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
456{
457 struct drm_nouveau_private *dev_priv =
458 container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
459 struct drm_device *dev = dev_priv->dev;
460 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
461
462 if (strcmp(entry->device_class, "ac_adapter") == 0) {
463 bool ac = power_supply_is_system_supplied();
464
465 NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
466 }
467
468 return NOTIFY_OK;
469}
470#endif
471
449int 472int
450nouveau_pm_init(struct drm_device *dev) 473nouveau_pm_init(struct drm_device *dev)
451{ 474{
@@ -485,6 +508,10 @@ nouveau_pm_init(struct drm_device *dev)
485 508
486 nouveau_sysfs_init(dev); 509 nouveau_sysfs_init(dev);
487 nouveau_hwmon_init(dev); 510 nouveau_hwmon_init(dev);
511#ifdef CONFIG_ACPI
512 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
513 register_acpi_notifier(&pm->acpi_nb);
514#endif
488 515
489 return 0; 516 return 0;
490} 517}
@@ -503,6 +530,9 @@ nouveau_pm_fini(struct drm_device *dev)
503 nouveau_perf_fini(dev); 530 nouveau_perf_fini(dev);
504 nouveau_volt_fini(dev); 531 nouveau_volt_fini(dev);
505 532
533#ifdef CONFIG_ACPI
534 unregister_acpi_notifier(&pm->acpi_nb);
535#endif
506 nouveau_hwmon_fini(dev); 536 nouveau_hwmon_fini(dev);
507 nouveau_sysfs_fini(dev); 537 nouveau_sysfs_fini(dev);
508} 538}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d8580927ca4..bef3e6910418 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); 104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105 105
106 if (dev_priv->card_type < NV_40) { 106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | 107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | 108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); 109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else 110 } else
111 if (dev_priv->card_type < NV_50) { 111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->cinst >> 4) | 112 ctx = (gpuobj->pinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | 113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); 114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else { 115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { 116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | 2; 117 ctx = (gpuobj->cinst << 10) | chan->id;
118 } else { 118 } else {
119 ctx = (gpuobj->cinst >> 4) | 119 ctx = (gpuobj->cinst >> 4) |
120 ((gpuobj->engine << 120 ((gpuobj->engine <<
@@ -214,18 +214,19 @@ out:
214 spin_unlock_irqrestore(&chan->ramht->lock, flags); 214 spin_unlock_irqrestore(&chan->ramht->lock, flags);
215} 215}
216 216
217void 217int
218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
219{ 219{
220 struct nouveau_ramht_entry *entry; 220 struct nouveau_ramht_entry *entry;
221 221
222 entry = nouveau_ramht_remove_entry(chan, handle); 222 entry = nouveau_ramht_remove_entry(chan, handle);
223 if (!entry) 223 if (!entry)
224 return; 224 return -ENOENT;
225 225
226 nouveau_ramht_remove_hash(chan, entry->handle); 226 nouveau_ramht_remove_hash(chan, entry->handle);
227 nouveau_gpuobj_ref(NULL, &entry->gpuobj); 227 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
228 kfree(entry); 228 kfree(entry);
229 return 0;
229} 230}
230 231
231struct nouveau_gpuobj * 232struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e1a8f1..c82de98fee0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
48 48
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, 49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
50 struct nouveau_gpuobj *); 50 struct nouveau_gpuobj *);
51extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); 51extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
52extern struct nouveau_gpuobj * 52extern struct nouveau_gpuobj *
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); 53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
54 54
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541ca9e5..04e8fb795269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
45# define NV04_PFB_REF_CMD_REFRESH (1 << 0) 45# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
46#define NV04_PFB_PRE 0x001002d4 46#define NV04_PFB_PRE 0x001002d4
47# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) 47# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
48#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
49# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
50# define NV20_PFB_ZCOMP_EN (1 << 31)
51# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
52# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
48#define NV10_PFB_CLOSE_PAGE2 0x0010033c 53#define NV10_PFB_CLOSE_PAGE2 0x0010033c
49#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) 54#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
50#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) 55#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
74# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 79# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
75# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 80# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
76 81
77/* DMA object defines */
78#define NV_DMA_ACCESS_RW 0
79#define NV_DMA_ACCESS_RO 1
80#define NV_DMA_ACCESS_WO 2
81#define NV_DMA_TARGET_VIDMEM 0
82#define NV_DMA_TARGET_PCI 2
83#define NV_DMA_TARGET_AGP 3
84/* The following is not a real value used by the card, it's changed by
85 * nouveau_object_dma_create */
86#define NV_DMA_TARGET_PCI_NONLINEAR 8
87
88/* Some object classes we care about in the drm */ 82/* Some object classes we care about in the drm */
89#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 83#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
90#define NV_CLASS_DMA_TO_MEMORY 0x00000003 84#define NV_CLASS_DMA_TO_MEMORY 0x00000003
@@ -332,6 +326,7 @@
332#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 326#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
333#define NV03_PGRAPH_STATUS 0x004006B0 327#define NV03_PGRAPH_STATUS 0x004006B0
334#define NV04_PGRAPH_STATUS 0x00400700 328#define NV04_PGRAPH_STATUS 0x00400700
329# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
335#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 330#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
336#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 331#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
337#define NV04_PGRAPH_SURFACE 0x0040070C 332#define NV04_PGRAPH_SURFACE 0x0040070C
@@ -378,6 +373,7 @@
378#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) 373#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
379#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) 374#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
380#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) 375#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
376#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
381#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 377#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
382#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 378#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
383#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 379#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
714#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 710#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
715#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 711#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
716#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 712#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
717#define NV50_PDISPLAY_INTR_EN 0x0061002c 713#define NV50_PDISPLAY_INTR_EN_0 0x00610028
718#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c 714#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
719#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) 715#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
720#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 716#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
721#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 717#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
722#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 718#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
723#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 719#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
724#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 720#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
721#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
725#define NV50_PDISPLAY_UNK30_CTRL 0x00610030 722#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
726#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 723#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
727#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 724#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
728#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 725#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
729#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 726#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
730#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 727#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
731#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) 728#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
732#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 729#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
733#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 730#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
734#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 731#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
735#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) 732#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
736#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 733#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
737#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 734#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
738#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 735#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
739#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 736#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
740#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) 737#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
741#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) 738#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
742 739
743#define NV50_PDISPLAY_CURSOR 0x00610270 740#define NV50_PDISPLAY_CURSOR 0x00610270
744#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) 741#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
746#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 743#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
747#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 744#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
748 745
749#define NV50_PDISPLAY_CTRL_STATE 0x00610300 746#define NV50_PDISPLAY_PIO_CTRL 0x00610300
750#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 747#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
751#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc 748#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
752#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 749#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
753#define NV50_PDISPLAY_CTRL_VAL 0x00610304 750#define NV50_PDISPLAY_PIO_DATA 0x00610304
754#define NV50_PDISPLAY_UNK_380 0x00610380
755#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
756#define NV50_PDISPLAY_UNK_388 0x00610388
757#define NV50_PDISPLAY_UNK_38C 0x0061038c
758 751
759#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) 752#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
760#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) 753#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac97007038..9a250eb53098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
14 dma_addr_t *pages; 14 dma_addr_t *pages;
15 unsigned nr_pages; 15 unsigned nr_pages;
16 16
17 unsigned pte_start; 17 u64 offset;
18 bool bound; 18 bool bound;
19}; 19};
20 20
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
74 } 74 }
75} 75}
76 76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87}
88
89static int 77static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 78nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{ 79{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
97 85
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 86 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
99 87
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); 88 nvbe->offset = mem->start << PAGE_SHIFT;
101 nvbe->pte_start = pte; 89 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) { 90 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 91 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset); 92 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110 pte += 1;
111 } else {
112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114 pte += 2;
115 }
116 93
94 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
95 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
117 dma_offset += NV_CTXDMA_PAGE_SIZE; 96 dma_offset += NV_CTXDMA_PAGE_SIZE;
118 } 97 }
119 } 98 }
120 dev_priv->engine.instmem.flush(nvbe->dev);
121
122 if (dev_priv->card_type == NV_50) {
123 dev_priv->engine.fifo.tlb_flush(dev);
124 dev_priv->engine.graph.tlb_flush(dev);
125 }
126 99
127 nvbe->bound = true; 100 nvbe->bound = true;
128 return 0; 101 return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
142 if (!nvbe->bound) 115 if (!nvbe->bound)
143 return 0; 116 return 0;
144 117
145 pte = nvbe->pte_start; 118 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
146 for (i = 0; i < nvbe->nr_pages; i++) { 119 for (i = 0; i < nvbe->nr_pages; i++) {
147 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; 120 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
148 121 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
149 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
150 if (dev_priv->card_type < NV_50) {
151 nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
152 pte += 1;
153 } else {
154 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
156 pte += 2;
157 }
158
159 dma_offset += NV_CTXDMA_PAGE_SIZE;
160 }
161 }
162 dev_priv->engine.instmem.flush(nvbe->dev);
163
164 if (dev_priv->card_type == NV_50) {
165 dev_priv->engine.fifo.tlb_flush(dev);
166 dev_priv->engine.graph.tlb_flush(dev);
167 } 122 }
168 123
169 nvbe->bound = false; 124 nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
186 } 141 }
187} 142}
188 143
144static int
145nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
146{
147 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
149
150 nvbe->offset = mem->start << PAGE_SHIFT;
151
152 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
153 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
154 nvbe->bound = true;
155 return 0;
156}
157
158static int
159nv50_sgdma_unbind(struct ttm_backend *be)
160{
161 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
162 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
163
164 if (!nvbe->bound)
165 return 0;
166
167 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
168 nvbe->nr_pages << PAGE_SHIFT);
169 nvbe->bound = false;
170 return 0;
171}
172
189static struct ttm_backend_func nouveau_sgdma_backend = { 173static struct ttm_backend_func nouveau_sgdma_backend = {
190 .populate = nouveau_sgdma_populate, 174 .populate = nouveau_sgdma_populate,
191 .clear = nouveau_sgdma_clear, 175 .clear = nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
194 .destroy = nouveau_sgdma_destroy 178 .destroy = nouveau_sgdma_destroy
195}; 179};
196 180
181static struct ttm_backend_func nv50_sgdma_backend = {
182 .populate = nouveau_sgdma_populate,
183 .clear = nouveau_sgdma_clear,
184 .bind = nv50_sgdma_bind,
185 .unbind = nv50_sgdma_unbind,
186 .destroy = nouveau_sgdma_destroy
187};
188
197struct ttm_backend * 189struct ttm_backend *
198nouveau_sgdma_init_ttm(struct drm_device *dev) 190nouveau_sgdma_init_ttm(struct drm_device *dev)
199{ 191{
200 struct drm_nouveau_private *dev_priv = dev->dev_private; 192 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct nouveau_sgdma_be *nvbe; 193 struct nouveau_sgdma_be *nvbe;
202 194
203 if (!dev_priv->gart_info.sg_ctxdma)
204 return NULL;
205
206 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 195 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
207 if (!nvbe) 196 if (!nvbe)
208 return NULL; 197 return NULL;
209 198
210 nvbe->dev = dev; 199 nvbe->dev = dev;
211 200
212 nvbe->backend.func = &nouveau_sgdma_backend; 201 if (dev_priv->card_type < NV_50)
213 202 nvbe->backend.func = &nouveau_sgdma_backend;
203 else
204 nvbe->backend.func = &nv50_sgdma_backend;
214 return &nvbe->backend; 205 return &nvbe->backend;
215} 206}
216 207
@@ -218,7 +209,6 @@ int
218nouveau_sgdma_init(struct drm_device *dev) 209nouveau_sgdma_init(struct drm_device *dev)
219{ 210{
220 struct drm_nouveau_private *dev_priv = dev->dev_private; 211 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct pci_dev *pdev = dev->pdev;
222 struct nouveau_gpuobj *gpuobj = NULL; 212 struct nouveau_gpuobj *gpuobj = NULL;
223 uint32_t aper_size, obj_size; 213 uint32_t aper_size, obj_size;
224 int i, ret; 214 int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
231 221
232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 222 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
233 obj_size += 8; /* ctxdma header */ 223 obj_size += 8; /* ctxdma header */
234 } else {
235 /* 1 entire VM page table */
236 aper_size = (512 * 1024 * 1024);
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
238 }
239
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
249 alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
250 if (!dev_priv->gart_info.sg_dummy_page) {
251 nouveau_gpuobj_ref(NULL, &gpuobj);
252 return -ENOMEM;
253 }
254 224
255 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); 225 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
256 dev_priv->gart_info.sg_dummy_bus = 226 NVOBJ_FLAG_ZERO_ALLOC |
257 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, 227 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
258 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 228 if (ret) {
259 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { 229 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
260 nouveau_gpuobj_ref(NULL, &gpuobj); 230 return ret;
261 return -EFAULT; 231 }
262 }
263 232
264 if (dev_priv->card_type < NV_50) {
265 /* special case, allocated from global instmem heap so
266 * cinst is invalid, we use it on all channels though so
267 * cinst needs to be valid, set it the same as pinst
268 */
269 gpuobj->cinst = gpuobj->pinst;
270
271 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
272 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
273 * on those cards? */
274 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | 233 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
275 (1 << 12) /* PT present */ | 234 (1 << 12) /* PT present */ |
276 (0 << 13) /* PT *not* linear */ | 235 (0 << 13) /* PT *not* linear */ |
277 (NV_DMA_ACCESS_RW << 14) | 236 (0 << 14) /* RW */ |
278 (NV_DMA_TARGET_PCI << 16)); 237 (2 << 16) /* PCI */);
279 nv_wo32(gpuobj, 4, aper_size - 1); 238 nv_wo32(gpuobj, 4, aper_size - 1);
280 for (i = 2; i < 2 + (aper_size >> 12); i++) { 239 for (i = 2; i < 2 + (aper_size >> 12); i++)
281 nv_wo32(gpuobj, i * 4, 240 nv_wo32(gpuobj, i * 4, 0x00000000);
282 dev_priv->gart_info.sg_dummy_bus | 3); 241
283 } 242 dev_priv->gart_info.sg_ctxdma = gpuobj;
284 } else { 243 dev_priv->gart_info.aper_base = 0;
285 for (i = 0; i < obj_size; i += 8) { 244 dev_priv->gart_info.aper_size = aper_size;
286 nv_wo32(gpuobj, i + 0, 0x00000000); 245 } else
287 nv_wo32(gpuobj, i + 4, 0x00000000); 246 if (dev_priv->chan_vm) {
288 } 247 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
248 12, NV_MEM_ACCESS_RW,
249 &dev_priv->gart_info.vma);
250 if (ret)
251 return ret;
252
253 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
254 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
289 } 255 }
290 dev_priv->engine.instmem.flush(dev);
291 256
292 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 257 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
293 dev_priv->gart_info.aper_base = 0;
294 dev_priv->gart_info.aper_size = aper_size;
295 dev_priv->gart_info.sg_ctxdma = gpuobj;
296 return 0; 258 return 0;
297} 259}
298 260
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
301{ 263{
302 struct drm_nouveau_private *dev_priv = dev->dev_private; 264 struct drm_nouveau_private *dev_priv = dev->dev_private;
303 265
304 if (dev_priv->gart_info.sg_dummy_page) {
305 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
306 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
307 unlock_page(dev_priv->gart_info.sg_dummy_page);
308 __free_page(dev_priv->gart_info.sg_dummy_page);
309 dev_priv->gart_info.sg_dummy_page = NULL;
310 dev_priv->gart_info.sg_dummy_bus = 0;
311 }
312
313 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 266 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267 nouveau_vm_put(&dev_priv->gart_info.vma);
314} 268}
315 269
316int 270uint32_t
317nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) 271nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
318{ 272{
319 struct drm_nouveau_private *dev_priv = dev->dev_private; 273 struct drm_nouveau_private *dev_priv = dev->dev_private;
320 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 274 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
321 int pte; 275 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
322 276
323 pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; 277 BUG_ON(dev_priv->card_type >= NV_50);
324 if (dev_priv->card_type < NV_50) {
325 *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
326 return 0;
327 }
328 278
329 NV_ERROR(dev, "Unimplemented on NV50\n"); 279 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
330 return -EINVAL; 280 (offset & NV_CTXDMA_PAGE_MASK);
331} 281}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755567e5..8eac943e8fd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
53 engine->instmem.takedown = nv04_instmem_takedown; 53 engine->instmem.takedown = nv04_instmem_takedown;
54 engine->instmem.suspend = nv04_instmem_suspend; 54 engine->instmem.suspend = nv04_instmem_suspend;
55 engine->instmem.resume = nv04_instmem_resume; 55 engine->instmem.resume = nv04_instmem_resume;
56 engine->instmem.populate = nv04_instmem_populate; 56 engine->instmem.get = nv04_instmem_get;
57 engine->instmem.clear = nv04_instmem_clear; 57 engine->instmem.put = nv04_instmem_put;
58 engine->instmem.bind = nv04_instmem_bind; 58 engine->instmem.map = nv04_instmem_map;
59 engine->instmem.unbind = nv04_instmem_unbind; 59 engine->instmem.unmap = nv04_instmem_unmap;
60 engine->instmem.flush = nv04_instmem_flush; 60 engine->instmem.flush = nv04_instmem_flush;
61 engine->mc.init = nv04_mc_init; 61 engine->mc.init = nv04_mc_init;
62 engine->mc.takedown = nv04_mc_takedown; 62 engine->mc.takedown = nv04_mc_takedown;
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
65 engine->timer.takedown = nv04_timer_takedown; 65 engine->timer.takedown = nv04_timer_takedown;
66 engine->fb.init = nv04_fb_init; 66 engine->fb.init = nv04_fb_init;
67 engine->fb.takedown = nv04_fb_takedown; 67 engine->fb.takedown = nv04_fb_takedown;
68 engine->graph.grclass = nv04_graph_grclass;
69 engine->graph.init = nv04_graph_init; 68 engine->graph.init = nv04_graph_init;
70 engine->graph.takedown = nv04_graph_takedown; 69 engine->graph.takedown = nv04_graph_takedown;
71 engine->graph.fifo_access = nv04_graph_fifo_access; 70 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
76 engine->graph.unload_context = nv04_graph_unload_context; 75 engine->graph.unload_context = nv04_graph_unload_context;
77 engine->fifo.channels = 16; 76 engine->fifo.channels = 16;
78 engine->fifo.init = nv04_fifo_init; 77 engine->fifo.init = nv04_fifo_init;
79 engine->fifo.takedown = nouveau_stub_takedown; 78 engine->fifo.takedown = nv04_fifo_fini;
80 engine->fifo.disable = nv04_fifo_disable; 79 engine->fifo.disable = nv04_fifo_disable;
81 engine->fifo.enable = nv04_fifo_enable; 80 engine->fifo.enable = nv04_fifo_enable;
82 engine->fifo.reassign = nv04_fifo_reassign; 81 engine->fifo.reassign = nv04_fifo_reassign;
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
99 engine->pm.clock_get = nv04_pm_clock_get; 98 engine->pm.clock_get = nv04_pm_clock_get;
100 engine->pm.clock_pre = nv04_pm_clock_pre; 99 engine->pm.clock_pre = nv04_pm_clock_pre;
101 engine->pm.clock_set = nv04_pm_clock_set; 100 engine->pm.clock_set = nv04_pm_clock_set;
101 engine->crypt.init = nouveau_stub_init;
102 engine->crypt.takedown = nouveau_stub_takedown;
103 engine->vram.init = nouveau_mem_detect;
104 engine->vram.flags_valid = nouveau_mem_flags_valid;
102 break; 105 break;
103 case 0x10: 106 case 0x10:
104 engine->instmem.init = nv04_instmem_init; 107 engine->instmem.init = nv04_instmem_init;
105 engine->instmem.takedown = nv04_instmem_takedown; 108 engine->instmem.takedown = nv04_instmem_takedown;
106 engine->instmem.suspend = nv04_instmem_suspend; 109 engine->instmem.suspend = nv04_instmem_suspend;
107 engine->instmem.resume = nv04_instmem_resume; 110 engine->instmem.resume = nv04_instmem_resume;
108 engine->instmem.populate = nv04_instmem_populate; 111 engine->instmem.get = nv04_instmem_get;
109 engine->instmem.clear = nv04_instmem_clear; 112 engine->instmem.put = nv04_instmem_put;
110 engine->instmem.bind = nv04_instmem_bind; 113 engine->instmem.map = nv04_instmem_map;
111 engine->instmem.unbind = nv04_instmem_unbind; 114 engine->instmem.unmap = nv04_instmem_unmap;
112 engine->instmem.flush = nv04_instmem_flush; 115 engine->instmem.flush = nv04_instmem_flush;
113 engine->mc.init = nv04_mc_init; 116 engine->mc.init = nv04_mc_init;
114 engine->mc.takedown = nv04_mc_takedown; 117 engine->mc.takedown = nv04_mc_takedown;
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
117 engine->timer.takedown = nv04_timer_takedown; 120 engine->timer.takedown = nv04_timer_takedown;
118 engine->fb.init = nv10_fb_init; 121 engine->fb.init = nv10_fb_init;
119 engine->fb.takedown = nv10_fb_takedown; 122 engine->fb.takedown = nv10_fb_takedown;
120 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 123 engine->fb.init_tile_region = nv10_fb_init_tile_region;
121 engine->graph.grclass = nv10_graph_grclass; 124 engine->fb.set_tile_region = nv10_fb_set_tile_region;
125 engine->fb.free_tile_region = nv10_fb_free_tile_region;
122 engine->graph.init = nv10_graph_init; 126 engine->graph.init = nv10_graph_init;
123 engine->graph.takedown = nv10_graph_takedown; 127 engine->graph.takedown = nv10_graph_takedown;
124 engine->graph.channel = nv10_graph_channel; 128 engine->graph.channel = nv10_graph_channel;
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
127 engine->graph.fifo_access = nv04_graph_fifo_access; 131 engine->graph.fifo_access = nv04_graph_fifo_access;
128 engine->graph.load_context = nv10_graph_load_context; 132 engine->graph.load_context = nv10_graph_load_context;
129 engine->graph.unload_context = nv10_graph_unload_context; 133 engine->graph.unload_context = nv10_graph_unload_context;
130 engine->graph.set_region_tiling = nv10_graph_set_region_tiling; 134 engine->graph.set_tile_region = nv10_graph_set_tile_region;
131 engine->fifo.channels = 32; 135 engine->fifo.channels = 32;
132 engine->fifo.init = nv10_fifo_init; 136 engine->fifo.init = nv10_fifo_init;
133 engine->fifo.takedown = nouveau_stub_takedown; 137 engine->fifo.takedown = nv04_fifo_fini;
134 engine->fifo.disable = nv04_fifo_disable; 138 engine->fifo.disable = nv04_fifo_disable;
135 engine->fifo.enable = nv04_fifo_enable; 139 engine->fifo.enable = nv04_fifo_enable;
136 engine->fifo.reassign = nv04_fifo_reassign; 140 engine->fifo.reassign = nv04_fifo_reassign;
137 engine->fifo.cache_pull = nv04_fifo_cache_pull; 141 engine->fifo.cache_pull = nv04_fifo_cache_pull;
138 engine->fifo.channel_id = nv10_fifo_channel_id; 142 engine->fifo.channel_id = nv10_fifo_channel_id;
139 engine->fifo.create_context = nv10_fifo_create_context; 143 engine->fifo.create_context = nv10_fifo_create_context;
140 engine->fifo.destroy_context = nv10_fifo_destroy_context; 144 engine->fifo.destroy_context = nv04_fifo_destroy_context;
141 engine->fifo.load_context = nv10_fifo_load_context; 145 engine->fifo.load_context = nv10_fifo_load_context;
142 engine->fifo.unload_context = nv10_fifo_unload_context; 146 engine->fifo.unload_context = nv10_fifo_unload_context;
143 engine->display.early_init = nv04_display_early_init; 147 engine->display.early_init = nv04_display_early_init;
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
153 engine->pm.clock_get = nv04_pm_clock_get; 157 engine->pm.clock_get = nv04_pm_clock_get;
154 engine->pm.clock_pre = nv04_pm_clock_pre; 158 engine->pm.clock_pre = nv04_pm_clock_pre;
155 engine->pm.clock_set = nv04_pm_clock_set; 159 engine->pm.clock_set = nv04_pm_clock_set;
160 engine->crypt.init = nouveau_stub_init;
161 engine->crypt.takedown = nouveau_stub_takedown;
162 engine->vram.init = nouveau_mem_detect;
163 engine->vram.flags_valid = nouveau_mem_flags_valid;
156 break; 164 break;
157 case 0x20: 165 case 0x20:
158 engine->instmem.init = nv04_instmem_init; 166 engine->instmem.init = nv04_instmem_init;
159 engine->instmem.takedown = nv04_instmem_takedown; 167 engine->instmem.takedown = nv04_instmem_takedown;
160 engine->instmem.suspend = nv04_instmem_suspend; 168 engine->instmem.suspend = nv04_instmem_suspend;
161 engine->instmem.resume = nv04_instmem_resume; 169 engine->instmem.resume = nv04_instmem_resume;
162 engine->instmem.populate = nv04_instmem_populate; 170 engine->instmem.get = nv04_instmem_get;
163 engine->instmem.clear = nv04_instmem_clear; 171 engine->instmem.put = nv04_instmem_put;
164 engine->instmem.bind = nv04_instmem_bind; 172 engine->instmem.map = nv04_instmem_map;
165 engine->instmem.unbind = nv04_instmem_unbind; 173 engine->instmem.unmap = nv04_instmem_unmap;
166 engine->instmem.flush = nv04_instmem_flush; 174 engine->instmem.flush = nv04_instmem_flush;
167 engine->mc.init = nv04_mc_init; 175 engine->mc.init = nv04_mc_init;
168 engine->mc.takedown = nv04_mc_takedown; 176 engine->mc.takedown = nv04_mc_takedown;
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
171 engine->timer.takedown = nv04_timer_takedown; 179 engine->timer.takedown = nv04_timer_takedown;
172 engine->fb.init = nv10_fb_init; 180 engine->fb.init = nv10_fb_init;
173 engine->fb.takedown = nv10_fb_takedown; 181 engine->fb.takedown = nv10_fb_takedown;
174 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 182 engine->fb.init_tile_region = nv10_fb_init_tile_region;
175 engine->graph.grclass = nv20_graph_grclass; 183 engine->fb.set_tile_region = nv10_fb_set_tile_region;
184 engine->fb.free_tile_region = nv10_fb_free_tile_region;
176 engine->graph.init = nv20_graph_init; 185 engine->graph.init = nv20_graph_init;
177 engine->graph.takedown = nv20_graph_takedown; 186 engine->graph.takedown = nv20_graph_takedown;
178 engine->graph.channel = nv10_graph_channel; 187 engine->graph.channel = nv10_graph_channel;
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
181 engine->graph.fifo_access = nv04_graph_fifo_access; 190 engine->graph.fifo_access = nv04_graph_fifo_access;
182 engine->graph.load_context = nv20_graph_load_context; 191 engine->graph.load_context = nv20_graph_load_context;
183 engine->graph.unload_context = nv20_graph_unload_context; 192 engine->graph.unload_context = nv20_graph_unload_context;
184 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 193 engine->graph.set_tile_region = nv20_graph_set_tile_region;
185 engine->fifo.channels = 32; 194 engine->fifo.channels = 32;
186 engine->fifo.init = nv10_fifo_init; 195 engine->fifo.init = nv10_fifo_init;
187 engine->fifo.takedown = nouveau_stub_takedown; 196 engine->fifo.takedown = nv04_fifo_fini;
188 engine->fifo.disable = nv04_fifo_disable; 197 engine->fifo.disable = nv04_fifo_disable;
189 engine->fifo.enable = nv04_fifo_enable; 198 engine->fifo.enable = nv04_fifo_enable;
190 engine->fifo.reassign = nv04_fifo_reassign; 199 engine->fifo.reassign = nv04_fifo_reassign;
191 engine->fifo.cache_pull = nv04_fifo_cache_pull; 200 engine->fifo.cache_pull = nv04_fifo_cache_pull;
192 engine->fifo.channel_id = nv10_fifo_channel_id; 201 engine->fifo.channel_id = nv10_fifo_channel_id;
193 engine->fifo.create_context = nv10_fifo_create_context; 202 engine->fifo.create_context = nv10_fifo_create_context;
194 engine->fifo.destroy_context = nv10_fifo_destroy_context; 203 engine->fifo.destroy_context = nv04_fifo_destroy_context;
195 engine->fifo.load_context = nv10_fifo_load_context; 204 engine->fifo.load_context = nv10_fifo_load_context;
196 engine->fifo.unload_context = nv10_fifo_unload_context; 205 engine->fifo.unload_context = nv10_fifo_unload_context;
197 engine->display.early_init = nv04_display_early_init; 206 engine->display.early_init = nv04_display_early_init;
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
207 engine->pm.clock_get = nv04_pm_clock_get; 216 engine->pm.clock_get = nv04_pm_clock_get;
208 engine->pm.clock_pre = nv04_pm_clock_pre; 217 engine->pm.clock_pre = nv04_pm_clock_pre;
209 engine->pm.clock_set = nv04_pm_clock_set; 218 engine->pm.clock_set = nv04_pm_clock_set;
219 engine->crypt.init = nouveau_stub_init;
220 engine->crypt.takedown = nouveau_stub_takedown;
221 engine->vram.init = nouveau_mem_detect;
222 engine->vram.flags_valid = nouveau_mem_flags_valid;
210 break; 223 break;
211 case 0x30: 224 case 0x30:
212 engine->instmem.init = nv04_instmem_init; 225 engine->instmem.init = nv04_instmem_init;
213 engine->instmem.takedown = nv04_instmem_takedown; 226 engine->instmem.takedown = nv04_instmem_takedown;
214 engine->instmem.suspend = nv04_instmem_suspend; 227 engine->instmem.suspend = nv04_instmem_suspend;
215 engine->instmem.resume = nv04_instmem_resume; 228 engine->instmem.resume = nv04_instmem_resume;
216 engine->instmem.populate = nv04_instmem_populate; 229 engine->instmem.get = nv04_instmem_get;
217 engine->instmem.clear = nv04_instmem_clear; 230 engine->instmem.put = nv04_instmem_put;
218 engine->instmem.bind = nv04_instmem_bind; 231 engine->instmem.map = nv04_instmem_map;
219 engine->instmem.unbind = nv04_instmem_unbind; 232 engine->instmem.unmap = nv04_instmem_unmap;
220 engine->instmem.flush = nv04_instmem_flush; 233 engine->instmem.flush = nv04_instmem_flush;
221 engine->mc.init = nv04_mc_init; 234 engine->mc.init = nv04_mc_init;
222 engine->mc.takedown = nv04_mc_takedown; 235 engine->mc.takedown = nv04_mc_takedown;
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
225 engine->timer.takedown = nv04_timer_takedown; 238 engine->timer.takedown = nv04_timer_takedown;
226 engine->fb.init = nv30_fb_init; 239 engine->fb.init = nv30_fb_init;
227 engine->fb.takedown = nv30_fb_takedown; 240 engine->fb.takedown = nv30_fb_takedown;
228 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 241 engine->fb.init_tile_region = nv30_fb_init_tile_region;
229 engine->graph.grclass = nv30_graph_grclass; 242 engine->fb.set_tile_region = nv10_fb_set_tile_region;
243 engine->fb.free_tile_region = nv30_fb_free_tile_region;
230 engine->graph.init = nv30_graph_init; 244 engine->graph.init = nv30_graph_init;
231 engine->graph.takedown = nv20_graph_takedown; 245 engine->graph.takedown = nv20_graph_takedown;
232 engine->graph.fifo_access = nv04_graph_fifo_access; 246 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
235 engine->graph.destroy_context = nv20_graph_destroy_context; 249 engine->graph.destroy_context = nv20_graph_destroy_context;
236 engine->graph.load_context = nv20_graph_load_context; 250 engine->graph.load_context = nv20_graph_load_context;
237 engine->graph.unload_context = nv20_graph_unload_context; 251 engine->graph.unload_context = nv20_graph_unload_context;
238 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 252 engine->graph.set_tile_region = nv20_graph_set_tile_region;
239 engine->fifo.channels = 32; 253 engine->fifo.channels = 32;
240 engine->fifo.init = nv10_fifo_init; 254 engine->fifo.init = nv10_fifo_init;
241 engine->fifo.takedown = nouveau_stub_takedown; 255 engine->fifo.takedown = nv04_fifo_fini;
242 engine->fifo.disable = nv04_fifo_disable; 256 engine->fifo.disable = nv04_fifo_disable;
243 engine->fifo.enable = nv04_fifo_enable; 257 engine->fifo.enable = nv04_fifo_enable;
244 engine->fifo.reassign = nv04_fifo_reassign; 258 engine->fifo.reassign = nv04_fifo_reassign;
245 engine->fifo.cache_pull = nv04_fifo_cache_pull; 259 engine->fifo.cache_pull = nv04_fifo_cache_pull;
246 engine->fifo.channel_id = nv10_fifo_channel_id; 260 engine->fifo.channel_id = nv10_fifo_channel_id;
247 engine->fifo.create_context = nv10_fifo_create_context; 261 engine->fifo.create_context = nv10_fifo_create_context;
248 engine->fifo.destroy_context = nv10_fifo_destroy_context; 262 engine->fifo.destroy_context = nv04_fifo_destroy_context;
249 engine->fifo.load_context = nv10_fifo_load_context; 263 engine->fifo.load_context = nv10_fifo_load_context;
250 engine->fifo.unload_context = nv10_fifo_unload_context; 264 engine->fifo.unload_context = nv10_fifo_unload_context;
251 engine->display.early_init = nv04_display_early_init; 265 engine->display.early_init = nv04_display_early_init;
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
263 engine->pm.clock_set = nv04_pm_clock_set; 277 engine->pm.clock_set = nv04_pm_clock_set;
264 engine->pm.voltage_get = nouveau_voltage_gpio_get; 278 engine->pm.voltage_get = nouveau_voltage_gpio_get;
265 engine->pm.voltage_set = nouveau_voltage_gpio_set; 279 engine->pm.voltage_set = nouveau_voltage_gpio_set;
280 engine->crypt.init = nouveau_stub_init;
281 engine->crypt.takedown = nouveau_stub_takedown;
282 engine->vram.init = nouveau_mem_detect;
283 engine->vram.flags_valid = nouveau_mem_flags_valid;
266 break; 284 break;
267 case 0x40: 285 case 0x40:
268 case 0x60: 286 case 0x60:
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
270 engine->instmem.takedown = nv04_instmem_takedown; 288 engine->instmem.takedown = nv04_instmem_takedown;
271 engine->instmem.suspend = nv04_instmem_suspend; 289 engine->instmem.suspend = nv04_instmem_suspend;
272 engine->instmem.resume = nv04_instmem_resume; 290 engine->instmem.resume = nv04_instmem_resume;
273 engine->instmem.populate = nv04_instmem_populate; 291 engine->instmem.get = nv04_instmem_get;
274 engine->instmem.clear = nv04_instmem_clear; 292 engine->instmem.put = nv04_instmem_put;
275 engine->instmem.bind = nv04_instmem_bind; 293 engine->instmem.map = nv04_instmem_map;
276 engine->instmem.unbind = nv04_instmem_unbind; 294 engine->instmem.unmap = nv04_instmem_unmap;
277 engine->instmem.flush = nv04_instmem_flush; 295 engine->instmem.flush = nv04_instmem_flush;
278 engine->mc.init = nv40_mc_init; 296 engine->mc.init = nv40_mc_init;
279 engine->mc.takedown = nv40_mc_takedown; 297 engine->mc.takedown = nv40_mc_takedown;
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
282 engine->timer.takedown = nv04_timer_takedown; 300 engine->timer.takedown = nv04_timer_takedown;
283 engine->fb.init = nv40_fb_init; 301 engine->fb.init = nv40_fb_init;
284 engine->fb.takedown = nv40_fb_takedown; 302 engine->fb.takedown = nv40_fb_takedown;
285 engine->fb.set_region_tiling = nv40_fb_set_region_tiling; 303 engine->fb.init_tile_region = nv30_fb_init_tile_region;
286 engine->graph.grclass = nv40_graph_grclass; 304 engine->fb.set_tile_region = nv40_fb_set_tile_region;
305 engine->fb.free_tile_region = nv30_fb_free_tile_region;
287 engine->graph.init = nv40_graph_init; 306 engine->graph.init = nv40_graph_init;
288 engine->graph.takedown = nv40_graph_takedown; 307 engine->graph.takedown = nv40_graph_takedown;
289 engine->graph.fifo_access = nv04_graph_fifo_access; 308 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
292 engine->graph.destroy_context = nv40_graph_destroy_context; 311 engine->graph.destroy_context = nv40_graph_destroy_context;
293 engine->graph.load_context = nv40_graph_load_context; 312 engine->graph.load_context = nv40_graph_load_context;
294 engine->graph.unload_context = nv40_graph_unload_context; 313 engine->graph.unload_context = nv40_graph_unload_context;
295 engine->graph.set_region_tiling = nv40_graph_set_region_tiling; 314 engine->graph.set_tile_region = nv40_graph_set_tile_region;
296 engine->fifo.channels = 32; 315 engine->fifo.channels = 32;
297 engine->fifo.init = nv40_fifo_init; 316 engine->fifo.init = nv40_fifo_init;
298 engine->fifo.takedown = nouveau_stub_takedown; 317 engine->fifo.takedown = nv04_fifo_fini;
299 engine->fifo.disable = nv04_fifo_disable; 318 engine->fifo.disable = nv04_fifo_disable;
300 engine->fifo.enable = nv04_fifo_enable; 319 engine->fifo.enable = nv04_fifo_enable;
301 engine->fifo.reassign = nv04_fifo_reassign; 320 engine->fifo.reassign = nv04_fifo_reassign;
302 engine->fifo.cache_pull = nv04_fifo_cache_pull; 321 engine->fifo.cache_pull = nv04_fifo_cache_pull;
303 engine->fifo.channel_id = nv10_fifo_channel_id; 322 engine->fifo.channel_id = nv10_fifo_channel_id;
304 engine->fifo.create_context = nv40_fifo_create_context; 323 engine->fifo.create_context = nv40_fifo_create_context;
305 engine->fifo.destroy_context = nv40_fifo_destroy_context; 324 engine->fifo.destroy_context = nv04_fifo_destroy_context;
306 engine->fifo.load_context = nv40_fifo_load_context; 325 engine->fifo.load_context = nv40_fifo_load_context;
307 engine->fifo.unload_context = nv40_fifo_unload_context; 326 engine->fifo.unload_context = nv40_fifo_unload_context;
308 engine->display.early_init = nv04_display_early_init; 327 engine->display.early_init = nv04_display_early_init;
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
321 engine->pm.voltage_get = nouveau_voltage_gpio_get; 340 engine->pm.voltage_get = nouveau_voltage_gpio_get;
322 engine->pm.voltage_set = nouveau_voltage_gpio_set; 341 engine->pm.voltage_set = nouveau_voltage_gpio_set;
323 engine->pm.temp_get = nv40_temp_get; 342 engine->pm.temp_get = nv40_temp_get;
343 engine->crypt.init = nouveau_stub_init;
344 engine->crypt.takedown = nouveau_stub_takedown;
345 engine->vram.init = nouveau_mem_detect;
346 engine->vram.flags_valid = nouveau_mem_flags_valid;
324 break; 347 break;
325 case 0x50: 348 case 0x50:
326 case 0x80: /* gotta love NVIDIA's consistency.. */ 349 case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
330 engine->instmem.takedown = nv50_instmem_takedown; 353 engine->instmem.takedown = nv50_instmem_takedown;
331 engine->instmem.suspend = nv50_instmem_suspend; 354 engine->instmem.suspend = nv50_instmem_suspend;
332 engine->instmem.resume = nv50_instmem_resume; 355 engine->instmem.resume = nv50_instmem_resume;
333 engine->instmem.populate = nv50_instmem_populate; 356 engine->instmem.get = nv50_instmem_get;
334 engine->instmem.clear = nv50_instmem_clear; 357 engine->instmem.put = nv50_instmem_put;
335 engine->instmem.bind = nv50_instmem_bind; 358 engine->instmem.map = nv50_instmem_map;
336 engine->instmem.unbind = nv50_instmem_unbind; 359 engine->instmem.unmap = nv50_instmem_unmap;
337 if (dev_priv->chipset == 0x50) 360 if (dev_priv->chipset == 0x50)
338 engine->instmem.flush = nv50_instmem_flush; 361 engine->instmem.flush = nv50_instmem_flush;
339 else 362 else
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
345 engine->timer.takedown = nv04_timer_takedown; 368 engine->timer.takedown = nv04_timer_takedown;
346 engine->fb.init = nv50_fb_init; 369 engine->fb.init = nv50_fb_init;
347 engine->fb.takedown = nv50_fb_takedown; 370 engine->fb.takedown = nv50_fb_takedown;
348 engine->graph.grclass = nv50_graph_grclass;
349 engine->graph.init = nv50_graph_init; 371 engine->graph.init = nv50_graph_init;
350 engine->graph.takedown = nv50_graph_takedown; 372 engine->graph.takedown = nv50_graph_takedown;
351 engine->graph.fifo_access = nv50_graph_fifo_access; 373 engine->graph.fifo_access = nv50_graph_fifo_access;
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
381 engine->display.init = nv50_display_init; 403 engine->display.init = nv50_display_init;
382 engine->display.destroy = nv50_display_destroy; 404 engine->display.destroy = nv50_display_destroy;
383 engine->gpio.init = nv50_gpio_init; 405 engine->gpio.init = nv50_gpio_init;
384 engine->gpio.takedown = nouveau_stub_takedown; 406 engine->gpio.takedown = nv50_gpio_fini;
385 engine->gpio.get = nv50_gpio_get; 407 engine->gpio.get = nv50_gpio_get;
386 engine->gpio.set = nv50_gpio_set; 408 engine->gpio.set = nv50_gpio_set;
409 engine->gpio.irq_register = nv50_gpio_irq_register;
410 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
387 engine->gpio.irq_enable = nv50_gpio_irq_enable; 411 engine->gpio.irq_enable = nv50_gpio_irq_enable;
388 switch (dev_priv->chipset) { 412 switch (dev_priv->chipset) {
389 case 0xa3: 413 case 0x84:
390 case 0xa5: 414 case 0x86:
391 case 0xa8: 415 case 0x92:
392 case 0xaf: 416 case 0x94:
393 engine->pm.clock_get = nva3_pm_clock_get; 417 case 0x96:
394 engine->pm.clock_pre = nva3_pm_clock_pre; 418 case 0x98:
395 engine->pm.clock_set = nva3_pm_clock_set; 419 case 0xa0:
396 break; 420 case 0xaa:
397 default: 421 case 0xac:
422 case 0x50:
398 engine->pm.clock_get = nv50_pm_clock_get; 423 engine->pm.clock_get = nv50_pm_clock_get;
399 engine->pm.clock_pre = nv50_pm_clock_pre; 424 engine->pm.clock_pre = nv50_pm_clock_pre;
400 engine->pm.clock_set = nv50_pm_clock_set; 425 engine->pm.clock_set = nv50_pm_clock_set;
401 break; 426 break;
427 default:
428 engine->pm.clock_get = nva3_pm_clock_get;
429 engine->pm.clock_pre = nva3_pm_clock_pre;
430 engine->pm.clock_set = nva3_pm_clock_set;
431 break;
402 } 432 }
403 engine->pm.voltage_get = nouveau_voltage_gpio_get; 433 engine->pm.voltage_get = nouveau_voltage_gpio_get;
404 engine->pm.voltage_set = nouveau_voltage_gpio_set; 434 engine->pm.voltage_set = nouveau_voltage_gpio_set;
@@ -406,16 +436,38 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
406 engine->pm.temp_get = nv84_temp_get; 436 engine->pm.temp_get = nv84_temp_get;
407 else 437 else
408 engine->pm.temp_get = nv40_temp_get; 438 engine->pm.temp_get = nv40_temp_get;
439 switch (dev_priv->chipset) {
440 case 0x84:
441 case 0x86:
442 case 0x92:
443 case 0x94:
444 case 0x96:
445 case 0xa0:
446 engine->crypt.init = nv84_crypt_init;
447 engine->crypt.takedown = nv84_crypt_fini;
448 engine->crypt.create_context = nv84_crypt_create_context;
449 engine->crypt.destroy_context = nv84_crypt_destroy_context;
450 engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
451 break;
452 default:
453 engine->crypt.init = nouveau_stub_init;
454 engine->crypt.takedown = nouveau_stub_takedown;
455 break;
456 }
457 engine->vram.init = nv50_vram_init;
458 engine->vram.get = nv50_vram_new;
459 engine->vram.put = nv50_vram_del;
460 engine->vram.flags_valid = nv50_vram_flags_valid;
409 break; 461 break;
410 case 0xC0: 462 case 0xC0:
411 engine->instmem.init = nvc0_instmem_init; 463 engine->instmem.init = nvc0_instmem_init;
412 engine->instmem.takedown = nvc0_instmem_takedown; 464 engine->instmem.takedown = nvc0_instmem_takedown;
413 engine->instmem.suspend = nvc0_instmem_suspend; 465 engine->instmem.suspend = nvc0_instmem_suspend;
414 engine->instmem.resume = nvc0_instmem_resume; 466 engine->instmem.resume = nvc0_instmem_resume;
415 engine->instmem.populate = nvc0_instmem_populate; 467 engine->instmem.get = nvc0_instmem_get;
416 engine->instmem.clear = nvc0_instmem_clear; 468 engine->instmem.put = nvc0_instmem_put;
417 engine->instmem.bind = nvc0_instmem_bind; 469 engine->instmem.map = nvc0_instmem_map;
418 engine->instmem.unbind = nvc0_instmem_unbind; 470 engine->instmem.unmap = nvc0_instmem_unmap;
419 engine->instmem.flush = nvc0_instmem_flush; 471 engine->instmem.flush = nvc0_instmem_flush;
420 engine->mc.init = nv50_mc_init; 472 engine->mc.init = nv50_mc_init;
421 engine->mc.takedown = nv50_mc_takedown; 473 engine->mc.takedown = nv50_mc_takedown;
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
424 engine->timer.takedown = nv04_timer_takedown; 476 engine->timer.takedown = nv04_timer_takedown;
425 engine->fb.init = nvc0_fb_init; 477 engine->fb.init = nvc0_fb_init;
426 engine->fb.takedown = nvc0_fb_takedown; 478 engine->fb.takedown = nvc0_fb_takedown;
427 engine->graph.grclass = NULL; //nvc0_graph_grclass;
428 engine->graph.init = nvc0_graph_init; 479 engine->graph.init = nvc0_graph_init;
429 engine->graph.takedown = nvc0_graph_takedown; 480 engine->graph.takedown = nvc0_graph_takedown;
430 engine->graph.fifo_access = nvc0_graph_fifo_access; 481 engine->graph.fifo_access = nvc0_graph_fifo_access;
@@ -453,7 +504,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
453 engine->gpio.takedown = nouveau_stub_takedown; 504 engine->gpio.takedown = nouveau_stub_takedown;
454 engine->gpio.get = nv50_gpio_get; 505 engine->gpio.get = nv50_gpio_get;
455 engine->gpio.set = nv50_gpio_set; 506 engine->gpio.set = nv50_gpio_set;
507 engine->gpio.irq_register = nv50_gpio_irq_register;
508 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
456 engine->gpio.irq_enable = nv50_gpio_irq_enable; 509 engine->gpio.irq_enable = nv50_gpio_irq_enable;
510 engine->crypt.init = nouveau_stub_init;
511 engine->crypt.takedown = nouveau_stub_takedown;
512 engine->vram.init = nouveau_mem_detect;
513 engine->vram.flags_valid = nouveau_mem_flags_valid;
457 break; 514 break;
458 default: 515 default:
459 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); 516 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -495,7 +552,7 @@ nouveau_card_init_channel(struct drm_device *dev)
495 552
496 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 553 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
497 0, dev_priv->vram_size, 554 0, dev_priv->vram_size,
498 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 555 NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
499 &gpuobj); 556 &gpuobj);
500 if (ret) 557 if (ret)
501 goto out_err; 558 goto out_err;
@@ -505,9 +562,10 @@ nouveau_card_init_channel(struct drm_device *dev)
505 if (ret) 562 if (ret)
506 goto out_err; 563 goto out_err;
507 564
508 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, 565 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
509 dev_priv->gart_info.aper_size, 566 0, dev_priv->gart_info.aper_size,
510 NV_DMA_ACCESS_RW, &gpuobj, NULL); 567 NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
568 &gpuobj);
511 if (ret) 569 if (ret)
512 goto out_err; 570 goto out_err;
513 571
@@ -516,11 +574,11 @@ nouveau_card_init_channel(struct drm_device *dev)
516 if (ret) 574 if (ret)
517 goto out_err; 575 goto out_err;
518 576
577 mutex_unlock(&dev_priv->channel->mutex);
519 return 0; 578 return 0;
520 579
521out_err: 580out_err:
522 nouveau_channel_free(dev_priv->channel); 581 nouveau_channel_put(&dev_priv->channel);
523 dev_priv->channel = NULL;
524 return ret; 582 return ret;
525} 583}
526 584
@@ -567,6 +625,8 @@ nouveau_card_init(struct drm_device *dev)
567 if (ret) 625 if (ret)
568 goto out; 626 goto out;
569 engine = &dev_priv->engine; 627 engine = &dev_priv->engine;
628 spin_lock_init(&dev_priv->channels.lock);
629 spin_lock_init(&dev_priv->tile.lock);
570 spin_lock_init(&dev_priv->context_switch_lock); 630 spin_lock_init(&dev_priv->context_switch_lock);
571 631
572 /* Make the CRTCs and I2C buses accessible */ 632 /* Make the CRTCs and I2C buses accessible */
@@ -625,26 +685,28 @@ nouveau_card_init(struct drm_device *dev)
625 if (ret) 685 if (ret)
626 goto out_fb; 686 goto out_fb;
627 687
688 /* PCRYPT */
689 ret = engine->crypt.init(dev);
690 if (ret)
691 goto out_graph;
692
628 /* PFIFO */ 693 /* PFIFO */
629 ret = engine->fifo.init(dev); 694 ret = engine->fifo.init(dev);
630 if (ret) 695 if (ret)
631 goto out_graph; 696 goto out_crypt;
632 } 697 }
633 698
634 ret = engine->display.create(dev); 699 ret = engine->display.create(dev);
635 if (ret) 700 if (ret)
636 goto out_fifo; 701 goto out_fifo;
637 702
638 /* this call irq_preinstall, register irq handler and 703 ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
639 * call irq_postinstall
640 */
641 ret = drm_irq_install(dev);
642 if (ret) 704 if (ret)
643 goto out_display; 705 goto out_vblank;
644 706
645 ret = drm_vblank_init(dev, 0); 707 ret = nouveau_irq_init(dev);
646 if (ret) 708 if (ret)
647 goto out_irq; 709 goto out_vblank;
648 710
649 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 711 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
650 712
@@ -669,12 +731,16 @@ nouveau_card_init(struct drm_device *dev)
669out_fence: 731out_fence:
670 nouveau_fence_fini(dev); 732 nouveau_fence_fini(dev);
671out_irq: 733out_irq:
672 drm_irq_uninstall(dev); 734 nouveau_irq_fini(dev);
673out_display: 735out_vblank:
736 drm_vblank_cleanup(dev);
674 engine->display.destroy(dev); 737 engine->display.destroy(dev);
675out_fifo: 738out_fifo:
676 if (!nouveau_noaccel) 739 if (!nouveau_noaccel)
677 engine->fifo.takedown(dev); 740 engine->fifo.takedown(dev);
741out_crypt:
742 if (!nouveau_noaccel)
743 engine->crypt.takedown(dev);
678out_graph: 744out_graph:
679 if (!nouveau_noaccel) 745 if (!nouveau_noaccel)
680 engine->graph.takedown(dev); 746 engine->graph.takedown(dev);
@@ -713,12 +779,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
713 779
714 if (!engine->graph.accel_blocked) { 780 if (!engine->graph.accel_blocked) {
715 nouveau_fence_fini(dev); 781 nouveau_fence_fini(dev);
716 nouveau_channel_free(dev_priv->channel); 782 nouveau_channel_put_unlocked(&dev_priv->channel);
717 dev_priv->channel = NULL;
718 } 783 }
719 784
720 if (!nouveau_noaccel) { 785 if (!nouveau_noaccel) {
721 engine->fifo.takedown(dev); 786 engine->fifo.takedown(dev);
787 engine->crypt.takedown(dev);
722 engine->graph.takedown(dev); 788 engine->graph.takedown(dev);
723 } 789 }
724 engine->fb.takedown(dev); 790 engine->fb.takedown(dev);
@@ -737,7 +803,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
737 nouveau_gpuobj_takedown(dev); 803 nouveau_gpuobj_takedown(dev);
738 nouveau_mem_vram_fini(dev); 804 nouveau_mem_vram_fini(dev);
739 805
740 drm_irq_uninstall(dev); 806 nouveau_irq_fini(dev);
807 drm_vblank_cleanup(dev);
741 808
742 nouveau_pm_fini(dev); 809 nouveau_pm_fini(dev);
743 nouveau_bios_takedown(dev); 810 nouveau_bios_takedown(dev);
@@ -1024,21 +1091,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1024 else 1091 else
1025 getparam->value = NV_PCI; 1092 getparam->value = NV_PCI;
1026 break; 1093 break;
1027 case NOUVEAU_GETPARAM_FB_PHYSICAL:
1028 getparam->value = dev_priv->fb_phys;
1029 break;
1030 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
1031 getparam->value = dev_priv->gart_info.aper_base;
1032 break;
1033 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
1034 if (dev->sg) {
1035 getparam->value = (unsigned long)dev->sg->virtual;
1036 } else {
1037 NV_ERROR(dev, "Requested PCIGART address, "
1038 "while no PCIGART was created\n");
1039 return -EINVAL;
1040 }
1041 break;
1042 case NOUVEAU_GETPARAM_FB_SIZE: 1094 case NOUVEAU_GETPARAM_FB_SIZE:
1043 getparam->value = dev_priv->fb_available_size; 1095 getparam->value = dev_priv->fb_available_size;
1044 break; 1096 break;
@@ -1046,7 +1098,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1046 getparam->value = dev_priv->gart_info.aper_size; 1098 getparam->value = dev_priv->gart_info.aper_size;
1047 break; 1099 break;
1048 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 1100 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
1049 getparam->value = dev_priv->vm_vram_base; 1101 getparam->value = 0; /* deprecated */
1050 break; 1102 break;
1051 case NOUVEAU_GETPARAM_PTIMER_TIME: 1103 case NOUVEAU_GETPARAM_PTIMER_TIME:
1052 getparam->value = dev_priv->engine.timer.read(dev); 1104 getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1106,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1054 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 1106 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
1055 getparam->value = 1; 1107 getparam->value = 1;
1056 break; 1108 break;
1109 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
1110 getparam->value = (dev_priv->card_type < NV_50);
1111 break;
1057 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1112 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1058 /* NV40 and NV50 versions are quite different, but register 1113 /* NV40 and NV50 versions are quite different, but register
1059 * address is the same. User is supposed to know the card 1114 * address is the same. User is supposed to know the card
@@ -1087,8 +1142,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
1087} 1142}
1088 1143
1089/* Wait until (value(reg) & mask) == val, up until timeout has hit */ 1144/* Wait until (value(reg) & mask) == val, up until timeout has hit */
1090bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, 1145bool
1091 uint32_t reg, uint32_t mask, uint32_t val) 1146nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
1147 uint32_t reg, uint32_t mask, uint32_t val)
1092{ 1148{
1093 struct drm_nouveau_private *dev_priv = dev->dev_private; 1149 struct drm_nouveau_private *dev_priv = dev->dev_private;
1094 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 1150 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1158,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
1102 return false; 1158 return false;
1103} 1159}
1104 1160
1161/* Wait until (value(reg) & mask) != val, up until timeout has hit */
1162bool
1163nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
1164 uint32_t reg, uint32_t mask, uint32_t val)
1165{
1166 struct drm_nouveau_private *dev_priv = dev->dev_private;
1167 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1168 uint64_t start = ptimer->read(dev);
1169
1170 do {
1171 if ((nv_rd32(dev, reg) & mask) != val)
1172 return true;
1173 } while (ptimer->read(dev) - start < timeout);
1174
1175 return false;
1176}
1177
1105/* Waits for PGRAPH to go completely idle */ 1178/* Waits for PGRAPH to go completely idle */
1106bool nouveau_wait_for_idle(struct drm_device *dev) 1179bool nouveau_wait_for_idle(struct drm_device *dev)
1107{ 1180{
1108 if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { 1181 struct drm_nouveau_private *dev_priv = dev->dev_private;
1182 uint32_t mask = ~0;
1183
1184 if (dev_priv->card_type == NV_40)
1185 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1186
1187 if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
1109 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", 1188 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
1110 nv_rd32(dev, NV04_PGRAPH_STATUS)); 1189 nv_rd32(dev, NV04_PGRAPH_STATUS));
1111 return false; 1190 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 000000000000..fbe0fb13bc1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include <linux/ratelimit.h>
29
30#include "nouveau_util.h"
31
32static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
33
34void
35nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
36{
37 while (bf->name) {
38 if (value & bf->mask) {
39 printk(" %s", bf->name);
40 value &= ~bf->mask;
41 }
42
43 bf++;
44 }
45
46 if (value)
47 printk(" (unknown bits 0x%08x)", value);
48}
49
50void
51nouveau_enum_print(const struct nouveau_enum *en, u32 value)
52{
53 while (en->name) {
54 if (value == en->value) {
55 printk("%s", en->name);
56 return;
57 }
58
59 en++;
60 }
61
62 printk("(unknown enum 0x%08x)", value);
63}
64
65int
66nouveau_ratelimit(void)
67{
68 return __ratelimit(&nouveau_ratelimit_state);
69}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 000000000000..d9ceaea26f4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef __NOUVEAU_UTIL_H__
29#define __NOUVEAU_UTIL_H__
30
31struct nouveau_bitfield {
32 u32 mask;
33 const char *name;
34};
35
36struct nouveau_enum {
37 u32 value;
38 const char *name;
39};
40
41void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
42void nouveau_enum_print(const struct nouveau_enum *, u32 value);
43int nouveau_ratelimit(void);
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 000000000000..07ab1749cf7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,421 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nouveau_vm.h"
29
30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
32{
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 u32 offset = vma->node->offset + (delta >> 12);
36 u32 bits = vma->node->type - 12;
37 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
38 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
39 u32 max = 1 << (vm->pgt_bits - bits);
40 u32 end, len;
41
42 list_for_each_entry(r, &vram->regions, rl_entry) {
43 u64 phys = (u64)r->offset << 12;
44 u32 num = r->length >> bits;
45
46 while (num) {
47 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
48
49 end = (pte + num);
50 if (unlikely(end >= max))
51 end = max;
52 len = end - pte;
53
54 vm->map(vma, pgt, vram, pte, len, phys);
55
56 num -= len;
57 pte += len;
58 if (unlikely(end >= max)) {
59 pde++;
60 pte = 0;
61 }
62 }
63 }
64
65 vm->flush(vm);
66}
67
68void
69nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
70{
71 nouveau_vm_map_at(vma, 0, vram);
72}
73
74void
75nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
76 dma_addr_t *list)
77{
78 struct nouveau_vm *vm = vma->vm;
79 u32 offset = vma->node->offset + (delta >> 12);
80 u32 bits = vma->node->type - 12;
81 u32 num = length >> vma->node->type;
82 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
83 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
84 u32 max = 1 << (vm->pgt_bits - bits);
85 u32 end, len;
86
87 while (num) {
88 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
89
90 end = (pte + num);
91 if (unlikely(end >= max))
92 end = max;
93 len = end - pte;
94
95 vm->map_sg(vma, pgt, pte, list, len);
96
97 num -= len;
98 pte += len;
99 list += len;
100 if (unlikely(end >= max)) {
101 pde++;
102 pte = 0;
103 }
104 }
105
106 vm->flush(vm);
107}
108
109void
110nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
111{
112 struct nouveau_vm *vm = vma->vm;
113 u32 offset = vma->node->offset + (delta >> 12);
114 u32 bits = vma->node->type - 12;
115 u32 num = length >> vma->node->type;
116 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
117 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
118 u32 max = 1 << (vm->pgt_bits - bits);
119 u32 end, len;
120
121 while (num) {
122 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
123
124 end = (pte + num);
125 if (unlikely(end >= max))
126 end = max;
127 len = end - pte;
128
129 vm->unmap(pgt, pte, len);
130
131 num -= len;
132 pte += len;
133 if (unlikely(end >= max)) {
134 pde++;
135 pte = 0;
136 }
137 }
138
139 vm->flush(vm);
140}
141
142void
143nouveau_vm_unmap(struct nouveau_vma *vma)
144{
145 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
146}
147
148static void
149nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
150{
151 struct nouveau_vm_pgd *vpgd;
152 struct nouveau_vm_pgt *vpgt;
153 struct nouveau_gpuobj *pgt;
154 u32 pde;
155
156 for (pde = fpde; pde <= lpde; pde++) {
157 vpgt = &vm->pgt[pde - vm->fpde];
158 if (--vpgt->refcount)
159 continue;
160
161 list_for_each_entry(vpgd, &vm->pgd_list, head) {
162 vm->unmap_pgt(vpgd->obj, pde);
163 }
164
165 pgt = vpgt->obj;
166 vpgt->obj = NULL;
167
168 mutex_unlock(&vm->mm->mutex);
169 nouveau_gpuobj_ref(NULL, &pgt);
170 mutex_lock(&vm->mm->mutex);
171 }
172}
173
174static int
175nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
176{
177 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
178 struct nouveau_vm_pgd *vpgd;
179 struct nouveau_gpuobj *pgt;
180 u32 pgt_size;
181 int ret;
182
183 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
184 pgt_size *= 8;
185
186 mutex_unlock(&vm->mm->mutex);
187 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
188 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
189 mutex_lock(&vm->mm->mutex);
190 if (unlikely(ret))
191 return ret;
192
193 /* someone beat us to filling the PDE while we didn't have the lock */
194 if (unlikely(vpgt->refcount++)) {
195 mutex_unlock(&vm->mm->mutex);
196 nouveau_gpuobj_ref(NULL, &pgt);
197 mutex_lock(&vm->mm->mutex);
198 return 0;
199 }
200
201 list_for_each_entry(vpgd, &vm->pgd_list, head) {
202 vm->map_pgt(vpgd->obj, type, pde, pgt);
203 }
204
205 vpgt->page_shift = type;
206 vpgt->obj = pgt;
207 return 0;
208}
209
210int
211nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
212 u32 access, struct nouveau_vma *vma)
213{
214 u32 align = (1 << page_shift) >> 12;
215 u32 msize = size >> 12;
216 u32 fpde, lpde, pde;
217 int ret;
218
219 mutex_lock(&vm->mm->mutex);
220 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
221 if (unlikely(ret != 0)) {
222 mutex_unlock(&vm->mm->mutex);
223 return ret;
224 }
225
226 fpde = (vma->node->offset >> vm->pgt_bits);
227 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
228 for (pde = fpde; pde <= lpde; pde++) {
229 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
230
231 if (likely(vpgt->refcount)) {
232 vpgt->refcount++;
233 continue;
234 }
235
236 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
237 if (ret) {
238 if (pde != fpde)
239 nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
240 nouveau_mm_put(vm->mm, vma->node);
241 mutex_unlock(&vm->mm->mutex);
242 vma->node = NULL;
243 return ret;
244 }
245 }
246 mutex_unlock(&vm->mm->mutex);
247
248 vma->vm = vm;
249 vma->offset = (u64)vma->node->offset << 12;
250 vma->access = access;
251 return 0;
252}
253
254void
255nouveau_vm_put(struct nouveau_vma *vma)
256{
257 struct nouveau_vm *vm = vma->vm;
258 u32 fpde, lpde;
259
260 if (unlikely(vma->node == NULL))
261 return;
262 fpde = (vma->node->offset >> vm->pgt_bits);
263 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
264
265 mutex_lock(&vm->mm->mutex);
266 nouveau_mm_put(vm->mm, vma->node);
267 vma->node = NULL;
268 nouveau_vm_unmap_pgt(vm, fpde, lpde);
269 mutex_unlock(&vm->mm->mutex);
270}
271
272int
273nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
274 u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
275 struct nouveau_vm **pvm)
276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_vm *vm;
279 u64 mm_length = (offset + length) - mm_offset;
280 u32 block;
281 int ret;
282
283 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
284 if (!vm)
285 return -ENOMEM;
286
287 if (dev_priv->card_type == NV_50) {
288 vm->map_pgt = nv50_vm_map_pgt;
289 vm->unmap_pgt = nv50_vm_unmap_pgt;
290 vm->map = nv50_vm_map;
291 vm->map_sg = nv50_vm_map_sg;
292 vm->unmap = nv50_vm_unmap;
293 vm->flush = nv50_vm_flush;
294 } else {
295 kfree(vm);
296 return -ENOSYS;
297 }
298
299 vm->fpde = offset >> pgt_bits;
300 vm->lpde = (offset + length - 1) >> pgt_bits;
301 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
302 if (!vm->pgt) {
303 kfree(vm);
304 return -ENOMEM;
305 }
306
307 INIT_LIST_HEAD(&vm->pgd_list);
308 vm->dev = dev;
309 vm->refcount = 1;
310 vm->pgt_bits = pgt_bits - 12;
311 vm->spg_shift = spg_shift;
312 vm->lpg_shift = lpg_shift;
313
314 block = (1 << pgt_bits);
315 if (length < block)
316 block = length;
317
318 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
319 block >> 12);
320 if (ret) {
321 kfree(vm);
322 return ret;
323 }
324
325 *pvm = vm;
326 return 0;
327}
328
329static int
330nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
331{
332 struct nouveau_vm_pgd *vpgd;
333 int i;
334
335 if (!pgd)
336 return 0;
337
338 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
339 if (!vpgd)
340 return -ENOMEM;
341
342 nouveau_gpuobj_ref(pgd, &vpgd->obj);
343
344 mutex_lock(&vm->mm->mutex);
345 for (i = vm->fpde; i <= vm->lpde; i++) {
346 struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
347
348 if (!vpgt->obj) {
349 vm->unmap_pgt(pgd, i);
350 continue;
351 }
352
353 vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
354 }
355 list_add(&vpgd->head, &vm->pgd_list);
356 mutex_unlock(&vm->mm->mutex);
357 return 0;
358}
359
360static void
361nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
362{
363 struct nouveau_vm_pgd *vpgd, *tmp;
364
365 if (!pgd)
366 return;
367
368 mutex_lock(&vm->mm->mutex);
369 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
370 if (vpgd->obj != pgd)
371 continue;
372
373 list_del(&vpgd->head);
374 nouveau_gpuobj_ref(NULL, &vpgd->obj);
375 kfree(vpgd);
376 }
377 mutex_unlock(&vm->mm->mutex);
378}
379
380static void
381nouveau_vm_del(struct nouveau_vm *vm)
382{
383 struct nouveau_vm_pgd *vpgd, *tmp;
384
385 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
386 nouveau_vm_unlink(vm, vpgd->obj);
387 }
388 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
389
390 kfree(vm->pgt);
391 kfree(vm);
392}
393
394int
395nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
396 struct nouveau_gpuobj *pgd)
397{
398 struct nouveau_vm *vm;
399 int ret;
400
401 vm = ref;
402 if (vm) {
403 ret = nouveau_vm_link(vm, pgd);
404 if (ret)
405 return ret;
406
407 vm->refcount++;
408 }
409
410 vm = *ptr;
411 *ptr = ref;
412
413 if (vm) {
414 nouveau_vm_unlink(vm, pgd);
415
416 if (--vm->refcount == 0)
417 nouveau_vm_del(vm);
418 }
419
420 return 0;
421}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 000000000000..b6755cfa7b71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_VM_H__
26#define __NOUVEAU_VM_H__
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32
33struct nouveau_vm_pgt {
34 struct nouveau_gpuobj *obj;
35 u32 page_shift;
36 u32 refcount;
37};
38
39struct nouveau_vm_pgd {
40 struct list_head head;
41 struct nouveau_gpuobj *obj;
42};
43
44struct nouveau_vma {
45 struct nouveau_vm *vm;
46 struct nouveau_mm_node *node;
47 u64 offset;
48 u32 access;
49};
50
51struct nouveau_vm {
52 struct drm_device *dev;
53 struct nouveau_mm *mm;
54 int refcount;
55
56 struct list_head pgd_list;
57 atomic_t pgraph_refs;
58 atomic_t pcrypt_refs;
59
60 struct nouveau_vm_pgt *pgt;
61 u32 fpde;
62 u32 lpde;
63
64 u32 pgt_bits;
65 u8 spg_shift;
66 u8 lpg_shift;
67
68 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
69 struct nouveau_gpuobj *pgt);
70 void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde);
71 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
72 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
74 u32 pte, dma_addr_t *, u32 cnt);
75 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
76 void (*flush)(struct nouveau_vm *);
77};
78
79/* nouveau_vm.c */
80int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
81 u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
82 struct nouveau_vm **);
83int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
84 struct nouveau_gpuobj *pgd);
85int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
86 u32 access, struct nouveau_vma *);
87void nouveau_vm_put(struct nouveau_vma *);
88void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
89void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
90void nouveau_vm_unmap(struct nouveau_vma *);
91void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
92void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
93 dma_addr_t *);
94
95/* nv50_vm.c */
96void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
97 struct nouveau_gpuobj *pgt);
98void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde);
99void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
100 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
101void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
102 u32 pte, dma_addr_t *, u32 cnt);
103void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
104void nv50_vm_flush(struct nouveau_vm *);
105void nv50_vm_flush_engine(struct drm_device *, int engine);
106
107#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e180741629..297505eb98d5 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
551 if (dev_priv->card_type >= NV_30) 551 if (dev_priv->card_type >= NV_30)
552 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 552 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
553 553
554 regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; 554 if (dev_priv->card_type >= NV_10)
555 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
556 else
557 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
555 558
556 /* Some misc regs */ 559 /* Some misc regs */
557 if (dev_priv->card_type == NV_40) { 560 if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
669 if (nv_two_heads(dev)) 672 if (nv_two_heads(dev))
670 NVSetOwner(dev, nv_crtc->index); 673 NVSetOwner(dev, nv_crtc->index);
671 674
675 drm_vblank_pre_modeset(dev, nv_crtc->index);
672 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 676 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
673 677
674 NVBlankScreen(dev, nv_crtc->index, true); 678 NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
701#endif 705#endif
702 706
703 funcs->dpms(crtc, DRM_MODE_DPMS_ON); 707 funcs->dpms(crtc, DRM_MODE_DPMS_ON);
708 drm_vblank_post_modeset(dev, nv_crtc->index);
704} 709}
705 710
706static void nv_crtc_destroy(struct drm_crtc *crtc) 711static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
986 .cursor_move = nv04_crtc_cursor_move, 991 .cursor_move = nv04_crtc_cursor_move,
987 .gamma_set = nv_crtc_gamma_set, 992 .gamma_set = nv_crtc_gamma_set,
988 .set_config = drm_crtc_helper_set_config, 993 .set_config = drm_crtc_helper_set_config,
994 .page_flip = nouveau_crtc_page_flip,
989 .destroy = nv_crtc_destroy, 995 .destroy = nv_crtc_destroy,
990}; 996};
991 997
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f2ffcc..e000455e06d0 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
74 * use a 10ms timeout (guards against crtc being inactive, in 74 * use a 10ms timeout (guards against crtc being inactive, in
75 * which case blank state would never change) 75 * which case blank state would never change)
76 */ 76 */
77 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 77 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
78 0x00000001, 0x00000000)) 78 0x00000001, 0x00000000))
79 return -EBUSY; 79 return -EBUSY;
80 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 80 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
81 0x00000001, 0x00000001)) 81 0x00000001, 0x00000001))
82 return -EBUSY; 82 return -EBUSY;
83 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 83 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
84 0x00000001, 0x00000000)) 84 0x00000001, 0x00000000))
85 return -EBUSY; 85 return -EBUSY;
86 86
87 udelay(100); 87 udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf772e3c..1715e1464b7d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34 34
35static void nv04_vblank_crtc0_isr(struct drm_device *);
36static void nv04_vblank_crtc1_isr(struct drm_device *);
37
35static void 38static void
36nv04_display_store_initial_head_owner(struct drm_device *dev) 39nv04_display_store_initial_head_owner(struct drm_device *dev)
37{ 40{
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
197 func->save(encoder); 200 func->save(encoder);
198 } 201 }
199 202
203 nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
204 nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
200 return 0; 205 return 0;
201} 206}
202 207
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
208 213
209 NV_DEBUG_KMS(dev, "\n"); 214 NV_DEBUG_KMS(dev, "\n");
210 215
216 nouveau_irq_unregister(dev, 24);
217 nouveau_irq_unregister(dev, 25);
218
211 /* Turn every CRTC off. */ 219 /* Turn every CRTC off. */
212 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 220 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
213 struct drm_mode_set modeset = { 221 struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
258 return 0; 266 return 0;
259} 267}
260 268
269static void
270nv04_vblank_crtc0_isr(struct drm_device *dev)
271{
272 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
273 drm_handle_vblank(dev, 0);
274}
275
276static void
277nv04_vblank_crtc1_isr(struct drm_device *dev)
278{
279 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
280 drm_handle_vblank(dev, 1);
281}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c9388bc1..7a1189371096 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
28#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30 30
31void 31int
32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
33{ 33{
34 struct nouveau_fbdev *nfbdev = info->par; 34 struct nouveau_fbdev *nfbdev = info->par;
35 struct drm_device *dev = nfbdev->dev; 35 struct drm_device *dev = nfbdev->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_channel *chan = dev_priv->channel; 37 struct nouveau_channel *chan = dev_priv->channel;
38 int ret;
38 39
39 if (info->state != FBINFO_STATE_RUNNING) 40 ret = RING_SPACE(chan, 4);
40 return; 41 if (ret)
41 42 return ret;
42 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
43 nouveau_fbcon_gpu_lockup(info);
44 }
45
46 if (info->flags & FBINFO_HWACCEL_DISABLED) {
47 cfb_copyarea(info, region);
48 return;
49 }
50 43
51 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); 44 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
52 OUT_RING(chan, (region->sy << 16) | region->sx); 45 OUT_RING(chan, (region->sy << 16) | region->sx);
53 OUT_RING(chan, (region->dy << 16) | region->dx); 46 OUT_RING(chan, (region->dy << 16) | region->dx);
54 OUT_RING(chan, (region->height << 16) | region->width); 47 OUT_RING(chan, (region->height << 16) | region->width);
55 FIRE_RING(chan); 48 FIRE_RING(chan);
49 return 0;
56} 50}
57 51
58void 52int
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 53nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{ 54{
61 struct nouveau_fbdev *nfbdev = info->par; 55 struct nouveau_fbdev *nfbdev = info->par;
62 struct drm_device *dev = nfbdev->dev; 56 struct drm_device *dev = nfbdev->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 57 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel; 58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret;
65 60
66 if (info->state != FBINFO_STATE_RUNNING) 61 ret = RING_SPACE(chan, 7);
67 return; 62 if (ret)
68 63 return ret;
69 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
70 nouveau_fbcon_gpu_lockup(info);
71 }
72
73 if (info->flags & FBINFO_HWACCEL_DISABLED) {
74 cfb_fillrect(info, rect);
75 return;
76 }
77 64
78 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 65 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
79 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
87 OUT_RING(chan, (rect->dx << 16) | rect->dy); 74 OUT_RING(chan, (rect->dx << 16) | rect->dy);
88 OUT_RING(chan, (rect->width << 16) | rect->height); 75 OUT_RING(chan, (rect->width << 16) | rect->height);
89 FIRE_RING(chan); 76 FIRE_RING(chan);
77 return 0;
90} 78}
91 79
92void 80int
93nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 81nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
94{ 82{
95 struct nouveau_fbdev *nfbdev = info->par; 83 struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
101 uint32_t dsize; 89 uint32_t dsize;
102 uint32_t width; 90 uint32_t width;
103 uint32_t *data = (uint32_t *)image->data; 91 uint32_t *data = (uint32_t *)image->data;
92 int ret;
104 93
105 if (info->state != FBINFO_STATE_RUNNING) 94 if (image->depth != 1)
106 return; 95 return -ENODEV;
107
108 if (image->depth != 1) {
109 cfb_imageblit(info, image);
110 return;
111 }
112
113 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
114 nouveau_fbcon_gpu_lockup(info);
115 }
116 96
117 if (info->flags & FBINFO_HWACCEL_DISABLED) { 97 ret = RING_SPACE(chan, 8);
118 cfb_imageblit(info, image); 98 if (ret)
119 return; 99 return ret;
120 }
121 100
122 width = ALIGN(image->width, 8); 101 width = ALIGN(image->width, 8);
123 dsize = ALIGN(width * image->height, 32) >> 5; 102 dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
144 while (dsize) { 123 while (dsize) {
145 int iter_len = dsize > 128 ? 128 : dsize; 124 int iter_len = dsize > 128 ? 128 : dsize;
146 125
147 if (RING_SPACE(chan, iter_len + 1)) { 126 ret = RING_SPACE(chan, iter_len + 1);
148 nouveau_fbcon_gpu_lockup(info); 127 if (ret)
149 cfb_imageblit(info, image); 128 return ret;
150 return;
151 }
152 129
153 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); 130 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
154 OUT_RINGp(chan, data, iter_len); 131 OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
157 } 134 }
158 135
159 FIRE_RING(chan); 136 FIRE_RING(chan);
160} 137 return 0;
161
162static int
163nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_gpuobj *obj = NULL;
167 int ret;
168
169 ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
170 if (ret)
171 return ret;
172
173 ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
174 nouveau_gpuobj_ref(NULL, &obj);
175 return ret;
176} 138}
177 139
178int 140int
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
214 return -EINVAL; 176 return -EINVAL;
215 } 177 }
216 178
217 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? 179 ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
218 0x0062 : 0x0042, NvCtxSurf2D); 180 dev_priv->card_type >= NV_10 ?
181 0x0062 : 0x0042);
219 if (ret) 182 if (ret)
220 return ret; 183 return ret;
221 184
222 ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); 185 ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
223 if (ret) 186 if (ret)
224 return ret; 187 return ret;
225 188
226 ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); 189 ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
227 if (ret) 190 if (ret)
228 return ret; 191 return ret;
229 192
230 ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); 193 ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
231 if (ret) 194 if (ret)
232 return ret; 195 return ret;
233 196
234 ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); 197 ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
235 if (ret) 198 if (ret)
236 return ret; 199 return ret;
237 200
238 ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? 201 ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
239 0x009f : 0x005f, NvImageBlit); 202 dev_priv->chipset >= 0x11 ?
203 0x009f : 0x005f);
240 if (ret) 204 if (ret)
241 return ret; 205 return ret;
242 206
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b7ddcd..f89d104698df 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_util.h"
31 32
32#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) 33#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
33#define NV04_RAMFC__SIZE 32 34#define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
128 if (ret) 129 if (ret)
129 return ret; 130 return ret;
130 131
132 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
133 NV03_USER(chan->id), PAGE_SIZE);
134 if (!chan->user)
135 return -ENOMEM;
136
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132 138
133 /* Setup initial state */ 139 /* Setup initial state */
@@ -151,10 +157,31 @@ void
151nv04_fifo_destroy_context(struct nouveau_channel *chan) 157nv04_fifo_destroy_context(struct nouveau_channel *chan)
152{ 158{
153 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
162 unsigned long flags;
154 163
155 nv_wr32(dev, NV04_PFIFO_MODE, 164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
156 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); 165 pfifo->reassign(dev, false);
157 166
167 /* Unload the context if it's the currently active one */
168 if (pfifo->channel_id(dev) == chan->id) {
169 pfifo->disable(dev);
170 pfifo->unload_context(dev);
171 pfifo->enable(dev);
172 }
173
174 /* Keep it from being rescheduled */
175 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
176
177 pfifo->reassign(dev, true);
178 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
179
180 /* Free the channel resources */
181 if (chan->user) {
182 iounmap(chan->user);
183 chan->user = NULL;
184 }
158 nouveau_gpuobj_ref(NULL, &chan->ramfc); 185 nouveau_gpuobj_ref(NULL, &chan->ramfc);
159} 186}
160 187
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
208 if (chid < 0 || chid >= dev_priv->engine.fifo.channels) 235 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
209 return 0; 236 return 0;
210 237
211 chan = dev_priv->fifos[chid]; 238 chan = dev_priv->channels.ptr[chid];
212 if (!chan) { 239 if (!chan) {
213 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); 240 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
214 return -EINVAL; 241 return -EINVAL;
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
267static void 294static void
268nv04_fifo_init_intr(struct drm_device *dev) 295nv04_fifo_init_intr(struct drm_device *dev)
269{ 296{
297 nouveau_irq_register(dev, 8, nv04_fifo_isr);
270 nv_wr32(dev, 0x002100, 0xffffffff); 298 nv_wr32(dev, 0x002100, 0xffffffff);
271 nv_wr32(dev, 0x002140, 0xffffffff); 299 nv_wr32(dev, 0x002140, 0xffffffff);
272} 300}
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
289 pfifo->reassign(dev, true); 317 pfifo->reassign(dev, true);
290 318
291 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 319 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
292 if (dev_priv->fifos[i]) { 320 if (dev_priv->channels.ptr[i]) {
293 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 321 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
294 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 322 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
295 } 323 }
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
298 return 0; 326 return 0;
299} 327}
300 328
329void
330nv04_fifo_fini(struct drm_device *dev)
331{
332 nv_wr32(dev, 0x2140, 0x00000000);
333 nouveau_irq_unregister(dev, 8);
334}
335
336static bool
337nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_channel *chan = NULL;
341 struct nouveau_gpuobj *obj;
342 unsigned long flags;
343 const int subc = (addr >> 13) & 0x7;
344 const int mthd = addr & 0x1ffc;
345 bool handled = false;
346 u32 engine;
347
348 spin_lock_irqsave(&dev_priv->channels.lock, flags);
349 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
350 chan = dev_priv->channels.ptr[chid];
351 if (unlikely(!chan))
352 goto out;
353
354 switch (mthd) {
355 case 0x0000: /* bind object to subchannel */
356 obj = nouveau_ramht_find(chan, data);
357 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
358 break;
359
360 chan->sw_subchannel[subc] = obj->class;
361 engine = 0x0000000f << (subc * 4);
362
363 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
364 handled = true;
365 break;
366 default:
367 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
368 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
369 break;
370
371 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
372 mthd, data))
373 handled = true;
374 break;
375 }
376
377out:
378 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
379 return handled;
380}
381
382void
383nv04_fifo_isr(struct drm_device *dev)
384{
385 struct drm_nouveau_private *dev_priv = dev->dev_private;
386 struct nouveau_engine *engine = &dev_priv->engine;
387 uint32_t status, reassign;
388 int cnt = 0;
389
390 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
391 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
392 uint32_t chid, get;
393
394 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
395
396 chid = engine->fifo.channel_id(dev);
397 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
398
399 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
400 uint32_t mthd, data;
401 int ptr;
402
403 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
404 * wrapping on my G80 chips, but CACHE1 isn't big
405 * enough for this much data.. Tests show that it
406 * wraps around to the start at GET=0x800.. No clue
407 * as to why..
408 */
409 ptr = (get & 0x7ff) >> 2;
410
411 if (dev_priv->card_type < NV_40) {
412 mthd = nv_rd32(dev,
413 NV04_PFIFO_CACHE1_METHOD(ptr));
414 data = nv_rd32(dev,
415 NV04_PFIFO_CACHE1_DATA(ptr));
416 } else {
417 mthd = nv_rd32(dev,
418 NV40_PFIFO_CACHE1_METHOD(ptr));
419 data = nv_rd32(dev,
420 NV40_PFIFO_CACHE1_DATA(ptr));
421 }
422
423 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
424 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
425 "Mthd 0x%04x Data 0x%08x\n",
426 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
427 data);
428 }
429
430 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
431 nv_wr32(dev, NV03_PFIFO_INTR_0,
432 NV_PFIFO_INTR_CACHE_ERROR);
433
434 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
435 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
436 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
437 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
438 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
439 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
440
441 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
442 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
443 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
444
445 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
446 }
447
448 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
449 u32 dma_get = nv_rd32(dev, 0x003244);
450 u32 dma_put = nv_rd32(dev, 0x003240);
451 u32 push = nv_rd32(dev, 0x003220);
452 u32 state = nv_rd32(dev, 0x003228);
453
454 if (dev_priv->card_type == NV_50) {
455 u32 ho_get = nv_rd32(dev, 0x003328);
456 u32 ho_put = nv_rd32(dev, 0x003320);
457 u32 ib_get = nv_rd32(dev, 0x003334);
458 u32 ib_put = nv_rd32(dev, 0x003330);
459
460 if (nouveau_ratelimit())
461 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
462 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
463 "State 0x%08x Push 0x%08x\n",
464 chid, ho_get, dma_get, ho_put,
465 dma_put, ib_get, ib_put, state,
466 push);
467
468 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
469 nv_wr32(dev, 0x003364, 0x00000000);
470 if (dma_get != dma_put || ho_get != ho_put) {
471 nv_wr32(dev, 0x003244, dma_put);
472 nv_wr32(dev, 0x003328, ho_put);
473 } else
474 if (ib_get != ib_put) {
475 nv_wr32(dev, 0x003334, ib_put);
476 }
477 } else {
478 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
479 "Put 0x%08x State 0x%08x Push 0x%08x\n",
480 chid, dma_get, dma_put, state, push);
481
482 if (dma_get != dma_put)
483 nv_wr32(dev, 0x003244, dma_put);
484 }
485
486 nv_wr32(dev, 0x003228, 0x00000000);
487 nv_wr32(dev, 0x003220, 0x00000001);
488 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
489 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
490 }
491
492 if (status & NV_PFIFO_INTR_SEMAPHORE) {
493 uint32_t sem;
494
495 status &= ~NV_PFIFO_INTR_SEMAPHORE;
496 nv_wr32(dev, NV03_PFIFO_INTR_0,
497 NV_PFIFO_INTR_SEMAPHORE);
498
499 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
500 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
501
502 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
503 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
504 }
505
506 if (dev_priv->card_type == NV_50) {
507 if (status & 0x00000010) {
508 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
509 status &= ~0x00000010;
510 nv_wr32(dev, 0x002100, 0x00000010);
511 }
512 }
513
514 if (status) {
515 if (nouveau_ratelimit())
516 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
517 status, chid);
518 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
519 status = 0;
520 }
521
522 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
523 }
524
525 if (status) {
526 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
527 nv_wr32(dev, 0x2140, 0);
528 nv_wr32(dev, 0x140, 0);
529 }
530
531 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
532}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c8973421b635..af75015068d6 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
26#include "drm.h" 26#include "drm.h"
27#include "nouveau_drm.h" 27#include "nouveau_drm.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30#include "nouveau_util.h"
31
32static int nv04_graph_register(struct drm_device *dev);
33static void nv04_graph_isr(struct drm_device *dev);
29 34
30static uint32_t nv04_graph_ctx_regs[] = { 35static uint32_t nv04_graph_ctx_regs[] = {
31 0x0040053c, 36 0x0040053c,
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
357 if (chid >= dev_priv->engine.fifo.channels) 362 if (chid >= dev_priv->engine.fifo.channels)
358 return NULL; 363 return NULL;
359 364
360 return dev_priv->fifos[chid]; 365 return dev_priv->channels.ptr[chid];
361} 366}
362 367
363void 368static void
364nv04_graph_context_switch(struct drm_device *dev) 369nv04_graph_context_switch(struct drm_device *dev)
365{ 370{
366 struct drm_nouveau_private *dev_priv = dev->dev_private; 371 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
368 struct nouveau_channel *chan = NULL; 373 struct nouveau_channel *chan = NULL;
369 int chid; 374 int chid;
370 375
371 pgraph->fifo_access(dev, false);
372 nouveau_wait_for_idle(dev); 376 nouveau_wait_for_idle(dev);
373 377
374 /* If previous context is valid, we need to save it */ 378 /* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
376 380
377 /* Load context for next channel */ 381 /* Load context for next channel */
378 chid = dev_priv->engine.fifo.channel_id(dev); 382 chid = dev_priv->engine.fifo.channel_id(dev);
379 chan = dev_priv->fifos[chid]; 383 chan = dev_priv->channels.ptr[chid];
380 if (chan) 384 if (chan)
381 nv04_graph_load_context(chan); 385 nv04_graph_load_context(chan);
382
383 pgraph->fifo_access(dev, true);
384} 386}
385 387
386static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) 388static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
412 414
413void nv04_graph_destroy_context(struct nouveau_channel *chan) 415void nv04_graph_destroy_context(struct nouveau_channel *chan)
414{ 416{
417 struct drm_device *dev = chan->dev;
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
415 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 420 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
421 unsigned long flags;
422
423 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
424 pgraph->fifo_access(dev, false);
425
426 /* Unload the context if it's the currently active one */
427 if (pgraph->channel(dev) == chan)
428 pgraph->unload_context(dev);
416 429
430 /* Free the context resources */
417 kfree(pgraph_ctx); 431 kfree(pgraph_ctx);
418 chan->pgraph_ctx = NULL; 432 chan->pgraph_ctx = NULL;
433
434 pgraph->fifo_access(dev, true);
435 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
419} 436}
420 437
421int nv04_graph_load_context(struct nouveau_channel *chan) 438int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
468{ 485{
469 struct drm_nouveau_private *dev_priv = dev->dev_private; 486 struct drm_nouveau_private *dev_priv = dev->dev_private;
470 uint32_t tmp; 487 uint32_t tmp;
488 int ret;
471 489
472 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 490 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
473 ~NV_PMC_ENABLE_PGRAPH); 491 ~NV_PMC_ENABLE_PGRAPH);
474 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 492 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
475 NV_PMC_ENABLE_PGRAPH); 493 NV_PMC_ENABLE_PGRAPH);
476 494
495 ret = nv04_graph_register(dev);
496 if (ret)
497 return ret;
498
477 /* Enable PGRAPH interrupts */ 499 /* Enable PGRAPH interrupts */
500 nouveau_irq_register(dev, 12, nv04_graph_isr);
478 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); 501 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
479 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 502 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
480 503
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
510 533
511void nv04_graph_takedown(struct drm_device *dev) 534void nv04_graph_takedown(struct drm_device *dev)
512{ 535{
536 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
537 nouveau_irq_unregister(dev, 12);
513} 538}
514 539
515void 540void
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
524} 549}
525 550
526static int 551static int
527nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, 552nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
528 int mthd, uint32_t data) 553 u32 class, u32 mthd, u32 data)
529{ 554{
530 atomic_set(&chan->fence.last_sequence_irq, data); 555 atomic_set(&chan->fence.last_sequence_irq, data);
531 return 0; 556 return 0;
532} 557}
533 558
559int
560nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
561 u32 class, u32 mthd, u32 data)
562{
563 struct drm_device *dev = chan->dev;
564 struct nouveau_page_flip_state s;
565
566 if (!nouveau_finish_page_flip(chan, &s))
567 nv_set_crtc_base(dev, s.crtc,
568 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
569
570 return 0;
571}
572
534/* 573/*
535 * Software methods, why they are needed, and how they all work: 574 * Software methods, why they are needed, and how they all work:
536 * 575 *
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
606 */ 645 */
607 646
608static void 647static void
609nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) 648nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
610{ 649{
611 struct drm_device *dev = chan->dev; 650 struct drm_device *dev = chan->dev;
612 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 651 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
613 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; 652 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
614 uint32_t tmp; 653 u32 tmp;
615 654
616 tmp = nv_ri32(dev, instance); 655 tmp = nv_ri32(dev, instance);
617 tmp &= ~mask; 656 tmp &= ~mask;
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
623} 662}
624 663
625static void 664static void
626nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) 665nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
627{ 666{
628 struct drm_device *dev = chan->dev; 667 struct drm_device *dev = chan->dev;
629 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 668 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
630 uint32_t tmp, ctx1; 669 u32 tmp, ctx1;
631 int class, op, valid = 1; 670 int class, op, valid = 1;
632 671
633 ctx1 = nv_ri32(dev, instance); 672 ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
672} 711}
673 712
674static int 713static int
675nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, 714nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
676 int mthd, uint32_t data) 715 u32 class, u32 mthd, u32 data)
677{ 716{
678 if (data > 5) 717 if (data > 5)
679 return 1; 718 return 1;
680 /* Old versions of the objects only accept first three operations. */ 719 /* Old versions of the objects only accept first three operations. */
681 if (data > 2 && grclass < 0x40) 720 if (data > 2 && class < 0x40)
682 return 1; 721 return 1;
683 nv04_graph_set_ctx1(chan, 0x00038000, data << 15); 722 nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
684 /* changing operation changes set of objects needed for validation */ 723 /* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
687} 726}
688 727
689static int 728static int
690nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, 729nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
691 int mthd, uint32_t data) 730 u32 class, u32 mthd, u32 data)
692{ 731{
693 uint32_t min = data & 0xffff, max; 732 uint32_t min = data & 0xffff, max;
694 uint32_t w = data >> 16; 733 uint32_t w = data >> 16;
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
706} 745}
707 746
708static int 747static int
709nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, 748nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
710 int mthd, uint32_t data) 749 u32 class, u32 mthd, u32 data)
711{ 750{
712 uint32_t min = data & 0xffff, max; 751 uint32_t min = data & 0xffff, max;
713 uint32_t w = data >> 16; 752 uint32_t w = data >> 16;
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
725} 764}
726 765
727static int 766static int
728nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, 767nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
729 int mthd, uint32_t data) 768 u32 class, u32 mthd, u32 data)
730{ 769{
731 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 770 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
732 case 0x30: 771 case 0x30:
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
742} 781}
743 782
744static int 783static int
745nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, 784nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
746 int mthd, uint32_t data) 785 u32 class, u32 mthd, u32 data)
747{ 786{
748 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 787 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
749 case 0x30: 788 case 0x30:
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
763} 802}
764 803
765static int 804static int
766nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, 805nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
767 int mthd, uint32_t data) 806 u32 class, u32 mthd, u32 data)
768{ 807{
769 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 808 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
770 case 0x30: 809 case 0x30:
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
778} 817}
779 818
780static int 819static int
781nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, 820nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
782 int mthd, uint32_t data) 821 u32 class, u32 mthd, u32 data)
783{ 822{
784 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 823 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
785 case 0x30: 824 case 0x30:
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
793} 832}
794 833
795static int 834static int
796nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, 835nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
797 int mthd, uint32_t data) 836 u32 class, u32 mthd, u32 data)
798{ 837{
799 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 838 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
800 case 0x30: 839 case 0x30:
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
808} 847}
809 848
810static int 849static int
811nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, 850nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
812 int mthd, uint32_t data) 851 u32 class, u32 mthd, u32 data)
813{ 852{
814 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 853 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
815 case 0x30: 854 case 0x30:
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
823} 862}
824 863
825static int 864static int
826nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, 865nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
827 int mthd, uint32_t data) 866 u32 class, u32 mthd, u32 data)
828{ 867{
829 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 868 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
830 case 0x30: 869 case 0x30:
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
838} 877}
839 878
840static int 879static int
841nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, 880nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
842 int mthd, uint32_t data) 881 u32 class, u32 mthd, u32 data)
843{ 882{
844 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 883 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
845 case 0x30: 884 case 0x30:
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
853} 892}
854 893
855static int 894static int
856nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, 895nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
857 int mthd, uint32_t data) 896 u32 class, u32 mthd, u32 data)
858{ 897{
859 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 898 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
860 case 0x30: 899 case 0x30:
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
868} 907}
869 908
870static int 909static int
871nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, 910nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
872 int mthd, uint32_t data) 911 u32 class, u32 mthd, u32 data)
873{ 912{
874 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 913 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
875 case 0x30: 914 case 0x30:
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
883} 922}
884 923
885static int 924static int
886nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, 925nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
887 int mthd, uint32_t data) 926 u32 class, u32 mthd, u32 data)
888{ 927{
889 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 928 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
890 case 0x30: 929 case 0x30:
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
898} 937}
899 938
900static int 939static int
901nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, 940nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
902 int mthd, uint32_t data) 941 u32 class, u32 mthd, u32 data)
903{ 942{
904 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 943 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
905 case 0x30: 944 case 0x30:
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
913} 952}
914 953
915static int 954static int
916nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, 955nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
917 int mthd, uint32_t data) 956 u32 class, u32 mthd, u32 data)
918{ 957{
919 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 958 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
920 case 0x30: 959 case 0x30:
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
930 return 1; 969 return 1;
931} 970}
932 971
933static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { 972static int
934 { 0x0150, nv04_graph_mthd_set_ref }, 973nv04_graph_register(struct drm_device *dev)
935 {} 974{
936}; 975 struct drm_nouveau_private *dev_priv = dev->dev_private;
937
938static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
939 { 0x0184, nv04_graph_mthd_bind_nv01_patt },
940 { 0x0188, nv04_graph_mthd_bind_rop },
941 { 0x018c, nv04_graph_mthd_bind_beta1 },
942 { 0x0190, nv04_graph_mthd_bind_surf_dst },
943 { 0x02fc, nv04_graph_mthd_set_operation },
944 {},
945};
946
947static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
948 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
949 { 0x018c, nv04_graph_mthd_bind_rop },
950 { 0x0190, nv04_graph_mthd_bind_beta1 },
951 { 0x0194, nv04_graph_mthd_bind_beta4 },
952 { 0x0198, nv04_graph_mthd_bind_surf2d },
953 { 0x02fc, nv04_graph_mthd_set_operation },
954 {},
955};
956
957static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
958 { 0x0184, nv04_graph_mthd_bind_chroma },
959 { 0x0188, nv04_graph_mthd_bind_clip },
960 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
961 { 0x0190, nv04_graph_mthd_bind_rop },
962 { 0x0194, nv04_graph_mthd_bind_beta1 },
963 { 0x0198, nv04_graph_mthd_bind_surf_dst },
964 { 0x019c, nv04_graph_mthd_bind_surf_src },
965 { 0x02fc, nv04_graph_mthd_set_operation },
966 {},
967};
968
969static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
970 { 0x0184, nv04_graph_mthd_bind_chroma },
971 { 0x0188, nv04_graph_mthd_bind_clip },
972 { 0x018c, nv04_graph_mthd_bind_nv04_patt },
973 { 0x0190, nv04_graph_mthd_bind_rop },
974 { 0x0194, nv04_graph_mthd_bind_beta1 },
975 { 0x0198, nv04_graph_mthd_bind_beta4 },
976 { 0x019c, nv04_graph_mthd_bind_surf2d },
977 { 0x02fc, nv04_graph_mthd_set_operation },
978 {},
979};
980
981static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
982 { 0x0188, nv04_graph_mthd_bind_chroma },
983 { 0x018c, nv04_graph_mthd_bind_clip },
984 { 0x0190, nv04_graph_mthd_bind_nv04_patt },
985 { 0x0194, nv04_graph_mthd_bind_rop },
986 { 0x0198, nv04_graph_mthd_bind_beta1 },
987 { 0x019c, nv04_graph_mthd_bind_beta4 },
988 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
989 { 0x03e4, nv04_graph_mthd_set_operation },
990 {},
991};
992
993static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
994 { 0x0184, nv04_graph_mthd_bind_chroma },
995 { 0x0188, nv04_graph_mthd_bind_clip },
996 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
997 { 0x0190, nv04_graph_mthd_bind_rop },
998 { 0x0194, nv04_graph_mthd_bind_beta1 },
999 { 0x0198, nv04_graph_mthd_bind_surf_dst },
1000 { 0x02fc, nv04_graph_mthd_set_operation },
1001 {},
1002};
1003
1004static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
1005 { 0x0184, nv04_graph_mthd_bind_chroma },
1006 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1007 { 0x018c, nv04_graph_mthd_bind_rop },
1008 { 0x0190, nv04_graph_mthd_bind_beta1 },
1009 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1010 { 0x02fc, nv04_graph_mthd_set_operation },
1011 {},
1012};
1013
1014static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
1015 { 0x0184, nv04_graph_mthd_bind_chroma },
1016 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1017 { 0x018c, nv04_graph_mthd_bind_rop },
1018 { 0x0190, nv04_graph_mthd_bind_beta1 },
1019 { 0x0194, nv04_graph_mthd_bind_beta4 },
1020 { 0x0198, nv04_graph_mthd_bind_surf2d },
1021 { 0x02fc, nv04_graph_mthd_set_operation },
1022 {},
1023};
1024
1025static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
1026 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1027 { 0x018c, nv04_graph_mthd_bind_rop },
1028 { 0x0190, nv04_graph_mthd_bind_beta1 },
1029 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1030 { 0x0304, nv04_graph_mthd_set_operation },
1031 {},
1032};
1033
1034static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
1035 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1036 { 0x018c, nv04_graph_mthd_bind_rop },
1037 { 0x0190, nv04_graph_mthd_bind_beta1 },
1038 { 0x0194, nv04_graph_mthd_bind_beta4 },
1039 { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
1040 { 0x0304, nv04_graph_mthd_set_operation },
1041 {},
1042};
1043 976
1044static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { 977 if (dev_priv->engine.graph.registered)
1045 { 0x0184, nv04_graph_mthd_bind_clip }, 978 return 0;
1046 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1047 { 0x018c, nv04_graph_mthd_bind_rop },
1048 { 0x0190, nv04_graph_mthd_bind_beta1 },
1049 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1050 { 0x02fc, nv04_graph_mthd_set_operation },
1051 {},
1052};
1053 979
1054static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { 980 /* dvd subpicture */
1055 { 0x0184, nv04_graph_mthd_bind_clip }, 981 NVOBJ_CLASS(dev, 0x0038, GR);
1056 { 0x0188, nv04_graph_mthd_bind_nv04_patt }, 982
1057 { 0x018c, nv04_graph_mthd_bind_rop }, 983 /* m2mf */
1058 { 0x0190, nv04_graph_mthd_bind_beta1 }, 984 NVOBJ_CLASS(dev, 0x0039, GR);
1059 { 0x0194, nv04_graph_mthd_bind_beta4 }, 985
1060 { 0x0198, nv04_graph_mthd_bind_surf2d }, 986 /* nv03 gdirect */
1061 { 0x02fc, nv04_graph_mthd_set_operation }, 987 NVOBJ_CLASS(dev, 0x004b, GR);
1062 {}, 988 NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
989 NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
990 NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
991 NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
992 NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
993
994 /* nv04 gdirect */
995 NVOBJ_CLASS(dev, 0x004a, GR);
996 NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
997 NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
998 NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
999 NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
1000 NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
1001 NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
1002
1003 /* nv01 imageblit */
1004 NVOBJ_CLASS(dev, 0x001f, GR);
1005 NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
1006 NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
1007 NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1008 NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
1009 NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
1010 NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
1011 NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
1012 NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
1013
1014 /* nv04 imageblit */
1015 NVOBJ_CLASS(dev, 0x005f, GR);
1016 NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
1017 NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
1018 NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1019 NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
1020 NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
1021 NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
1022 NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
1023 NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
1024
1025 /* nv04 iifc */
1026 NVOBJ_CLASS(dev, 0x0060, GR);
1027 NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
1028 NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
1029 NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
1030 NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
1031 NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
1032 NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
1033 NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
1034 NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
1035
1036 /* nv05 iifc */
1037 NVOBJ_CLASS(dev, 0x0064, GR);
1038
1039 /* nv01 ifc */
1040 NVOBJ_CLASS(dev, 0x0021, GR);
1041 NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
1042 NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
1043 NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1044 NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
1045 NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
1046 NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
1047 NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
1048
1049 /* nv04 ifc */
1050 NVOBJ_CLASS(dev, 0x0061, GR);
1051 NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
1052 NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
1053 NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1054 NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
1055 NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
1056 NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
1057 NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
1058 NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
1059
1060 /* nv05 ifc */
1061 NVOBJ_CLASS(dev, 0x0065, GR);
1062
1063 /* nv03 sifc */
1064 NVOBJ_CLASS(dev, 0x0036, GR);
1065 NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
1066 NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1067 NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
1068 NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
1069 NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
1070 NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
1071
1072 /* nv04 sifc */
1073 NVOBJ_CLASS(dev, 0x0076, GR);
1074 NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
1075 NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1076 NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
1077 NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
1078 NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
1079 NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
1080 NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
1081
1082 /* nv05 sifc */
1083 NVOBJ_CLASS(dev, 0x0066, GR);
1084
1085 /* nv03 sifm */
1086 NVOBJ_CLASS(dev, 0x0037, GR);
1087 NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1088 NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
1089 NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
1090 NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
1091 NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
1092
1093 /* nv04 sifm */
1094 NVOBJ_CLASS(dev, 0x0077, GR);
1095 NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1096 NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
1097 NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
1098 NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
1099 NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
1100 NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
1101
1102 /* null */
1103 NVOBJ_CLASS(dev, 0x0030, GR);
1104
1105 /* surf2d */
1106 NVOBJ_CLASS(dev, 0x0042, GR);
1107
1108 /* rop */
1109 NVOBJ_CLASS(dev, 0x0043, GR);
1110
1111 /* beta1 */
1112 NVOBJ_CLASS(dev, 0x0012, GR);
1113
1114 /* beta4 */
1115 NVOBJ_CLASS(dev, 0x0072, GR);
1116
1117 /* cliprect */
1118 NVOBJ_CLASS(dev, 0x0019, GR);
1119
1120 /* nv01 pattern */
1121 NVOBJ_CLASS(dev, 0x0018, GR);
1122
1123 /* nv04 pattern */
1124 NVOBJ_CLASS(dev, 0x0044, GR);
1125
1126 /* swzsurf */
1127 NVOBJ_CLASS(dev, 0x0052, GR);
1128
1129 /* surf3d */
1130 NVOBJ_CLASS(dev, 0x0053, GR);
1131 NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
1132 NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
1133
1134 /* nv03 tex_tri */
1135 NVOBJ_CLASS(dev, 0x0048, GR);
1136 NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
1137 NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
1138 NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
1139
1140 /* tex_tri */
1141 NVOBJ_CLASS(dev, 0x0054, GR);
1142
1143 /* multitex_tri */
1144 NVOBJ_CLASS(dev, 0x0055, GR);
1145
1146 /* nv01 chroma */
1147 NVOBJ_CLASS(dev, 0x0017, GR);
1148
1149 /* nv04 chroma */
1150 NVOBJ_CLASS(dev, 0x0057, GR);
1151
1152 /* surf_dst */
1153 NVOBJ_CLASS(dev, 0x0058, GR);
1154
1155 /* surf_src */
1156 NVOBJ_CLASS(dev, 0x0059, GR);
1157
1158 /* surf_color */
1159 NVOBJ_CLASS(dev, 0x005a, GR);
1160
1161 /* surf_zeta */
1162 NVOBJ_CLASS(dev, 0x005b, GR);
1163
1164 /* nv01 line */
1165 NVOBJ_CLASS(dev, 0x001c, GR);
1166 NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
1167 NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1168 NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
1169 NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
1170 NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
1171 NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
1172
1173 /* nv04 line */
1174 NVOBJ_CLASS(dev, 0x005c, GR);
1175 NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
1176 NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1177 NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
1178 NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
1179 NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
1180 NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
1181 NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
1182
1183 /* nv01 tri */
1184 NVOBJ_CLASS(dev, 0x001d, GR);
1185 NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
1186 NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1187 NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
1188 NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
1189 NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
1190 NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
1191
1192 /* nv04 tri */
1193 NVOBJ_CLASS(dev, 0x005d, GR);
1194 NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
1195 NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1196 NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
1197 NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
1198 NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
1199 NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
1200 NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
1201
1202 /* nv01 rect */
1203 NVOBJ_CLASS(dev, 0x001e, GR);
1204 NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
1205 NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1206 NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
1207 NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
1208 NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
1209 NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
1210
1211 /* nv04 rect */
1212 NVOBJ_CLASS(dev, 0x005e, GR);
1213 NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
1214 NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1215 NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
1216 NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
1217 NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
1218 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1219 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1220
1221 /* nvsw */
1222 NVOBJ_CLASS(dev, 0x506e, SW);
1223 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1224 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1225
1226 dev_priv->engine.graph.registered = true;
1227 return 0;
1063}; 1228};
1064 1229
1065static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { 1230static struct nouveau_bitfield nv04_graph_intr[] = {
1066 { 0x0188, nv04_graph_mthd_bind_clip }, 1231 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1067 { 0x018c, nv04_graph_mthd_bind_surf_color }, 1232 {}
1068 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
1069 {},
1070}; 1233};
1071 1234
1072static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { 1235static struct nouveau_bitfield nv04_graph_nstatus[] =
1073 { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, 1236{
1074 { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, 1237 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1075 {}, 1238 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1239 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1240 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1241 {}
1076}; 1242};
1077 1243
1078struct nouveau_pgraph_object_class nv04_graph_grclass[] = { 1244struct nouveau_bitfield nv04_graph_nsource[] =
1079 { 0x0038, false, NULL }, /* dvd subpicture */ 1245{
1080 { 0x0039, false, NULL }, /* m2mf */ 1246 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1081 { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ 1247 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1082 { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ 1248 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1083 { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ 1249 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1084 { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ 1250 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1085 { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ 1251 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1086 { 0x0064, false, NULL }, /* nv05 iifc */ 1252 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1087 { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ 1253 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1088 { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ 1254 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1089 { 0x0065, false, NULL }, /* nv05 ifc */ 1255 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1090 { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ 1256 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1091 { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ 1257 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1092 { 0x0066, false, NULL }, /* nv05 sifc */ 1258 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1093 { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ 1259 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1094 { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ 1260 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1095 { 0x0030, false, NULL }, /* null */ 1261 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1096 { 0x0042, false, NULL }, /* surf2d */ 1262 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1097 { 0x0043, false, NULL }, /* rop */ 1263 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1098 { 0x0012, false, NULL }, /* beta1 */ 1264 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1099 { 0x0072, false, NULL }, /* beta4 */
1100 { 0x0019, false, NULL }, /* cliprect */
1101 { 0x0018, false, NULL }, /* nv01 pattern */
1102 { 0x0044, false, NULL }, /* nv04 pattern */
1103 { 0x0052, false, NULL }, /* swzsurf */
1104 { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
1105 { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
1106 { 0x0054, false, NULL }, /* tex_tri */
1107 { 0x0055, false, NULL }, /* multitex_tri */
1108 { 0x0017, false, NULL }, /* nv01 chroma */
1109 { 0x0057, false, NULL }, /* nv04 chroma */
1110 { 0x0058, false, NULL }, /* surf_dst */
1111 { 0x0059, false, NULL }, /* surf_src */
1112 { 0x005a, false, NULL }, /* surf_color */
1113 { 0x005b, false, NULL }, /* surf_zeta */
1114 { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
1115 { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
1116 { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
1117 { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
1118 { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
1119 { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
1120 { 0x506e, true, nv04_graph_mthds_sw },
1121 {} 1265 {}
1122}; 1266};
1123 1267
1268static void
1269nv04_graph_isr(struct drm_device *dev)
1270{
1271 u32 stat;
1272
1273 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1274 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1275 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1276 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1277 u32 chid = (addr & 0x0f000000) >> 24;
1278 u32 subc = (addr & 0x0000e000) >> 13;
1279 u32 mthd = (addr & 0x00001ffc);
1280 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1281 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1282 u32 show = stat;
1283
1284 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1285 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1286 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1287 show &= ~NV_PGRAPH_INTR_NOTIFY;
1288 }
1289 }
1290
1291 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1292 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1293 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1294 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1295 nv04_graph_context_switch(dev);
1296 }
1297
1298 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1299 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1300
1301 if (show && nouveau_ratelimit()) {
1302 NV_INFO(dev, "PGRAPH -");
1303 nouveau_bitfield_print(nv04_graph_intr, show);
1304 printk(" nsource:");
1305 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1306 printk(" nstatus:");
1307 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1308 printk("\n");
1309 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1310 "mthd 0x%04x data 0x%08x\n",
1311 chid, subc, class, mthd, data);
1312 }
1313 }
1314}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae297abde..b8e3edb5c063 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev)
98} 98}
99 99
100int 100int
101nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 101nv04_instmem_suspend(struct drm_device *dev)
102 uint32_t *sz)
103{ 102{
104 return 0; 103 return 0;
105} 104}
106 105
107void 106void
108nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 107nv04_instmem_resume(struct drm_device *dev)
109{
110}
111
112int
113nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
114{ 108{
115 return 0;
116} 109}
117 110
118int 111int
119nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 112nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
120{ 113{
114 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
115 struct drm_mm_node *ramin = NULL;
116
117 do {
118 if (drm_mm_pre_get(&dev_priv->ramin_heap))
119 return -ENOMEM;
120
121 spin_lock(&dev_priv->ramin_lock);
122 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
123 if (ramin == NULL) {
124 spin_unlock(&dev_priv->ramin_lock);
125 return -ENOMEM;
126 }
127
128 ramin = drm_mm_get_block_atomic(ramin, size, align);
129 spin_unlock(&dev_priv->ramin_lock);
130 } while (ramin == NULL);
131
132 gpuobj->node = ramin;
133 gpuobj->vinst = ramin->start;
121 return 0; 134 return 0;
122} 135}
123 136
124void 137void
125nv04_instmem_flush(struct drm_device *dev) 138nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
126{ 139{
140 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
141
142 spin_lock(&dev_priv->ramin_lock);
143 drm_mm_put_block(gpuobj->node);
144 gpuobj->node = NULL;
145 spin_unlock(&dev_priv->ramin_lock);
127} 146}
128 147
129int 148int
130nv04_instmem_suspend(struct drm_device *dev) 149nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
131{ 150{
151 gpuobj->pinst = gpuobj->vinst;
132 return 0; 152 return 0;
133} 153}
134 154
135void 155void
136nv04_instmem_resume(struct drm_device *dev) 156nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
137{ 157{
138} 158}
139 159
160void
161nv04_instmem_flush(struct drm_device *dev)
162{
163}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e501..f78181a59b4a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6static struct drm_mm_node *
7nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
11 struct drm_mm_node *mem;
12 int ret;
13
14 ret = drm_mm_pre_get(&pfb->tag_heap);
15 if (ret)
16 return NULL;
17
18 spin_lock(&dev_priv->tile.lock);
19 mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
20 if (mem)
21 mem = drm_mm_get_block_atomic(mem, size, 0);
22 spin_unlock(&dev_priv->tile.lock);
23
24 return mem;
25}
26
27static void
28nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31
32 spin_lock(&dev_priv->tile.lock);
33 drm_mm_put_block(mem);
34 spin_unlock(&dev_priv->tile.lock);
35}
36
37void
38nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
39 uint32_t size, uint32_t pitch, uint32_t flags)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
43 int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
44
45 tile->addr = addr;
46 tile->limit = max(1u, addr + size) - 1;
47 tile->pitch = pitch;
48
49 if (dev_priv->card_type == NV_20) {
50 if (flags & NOUVEAU_GEM_TILE_ZETA) {
51 /*
52 * Allocate some of the on-die tag memory,
53 * used to store Z compression meta-data (most
54 * likely just a bitmap determining if a given
55 * tile is compressed or not).
56 */
57 tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
58
59 if (tile->tag_mem) {
60 /* Enable Z compression */
61 if (dev_priv->chipset >= 0x25)
62 tile->zcomp = tile->tag_mem->start |
63 (bpp == 16 ?
64 NV25_PFB_ZCOMP_MODE_16 :
65 NV25_PFB_ZCOMP_MODE_32);
66 else
67 tile->zcomp = tile->tag_mem->start |
68 NV20_PFB_ZCOMP_EN |
69 (bpp == 16 ? 0 :
70 NV20_PFB_ZCOMP_MODE_32);
71 }
72
73 tile->addr |= 3;
74 } else {
75 tile->addr |= 1;
76 }
77
78 } else {
79 tile->addr |= 1 << 31;
80 }
81}
82
6void 83void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 84nv10_fb_free_tile_region(struct drm_device *dev, int i)
8 uint32_t size, uint32_t pitch)
9{ 85{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 86 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12 88
13 if (pitch) { 89 if (tile->tag_mem) {
14 if (dev_priv->card_type >= NV_20) 90 nv20_fb_free_tag(dev, tile->tag_mem);
15 addr |= 1; 91 tile->tag_mem = NULL;
16 else
17 addr |= 1 << 31;
18 } 92 }
19 93
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 94 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 95}
22 nv_wr32(dev, NV10_PFB_TILE(i), addr); 96
97void
98nv10_fb_set_tile_region(struct drm_device *dev, int i)
99{
100 struct drm_nouveau_private *dev_priv = dev->dev_private;
101 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
102
103 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
104 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
105 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
106
107 if (dev_priv->card_type == NV_20)
108 nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
23} 109}
24 110
25int 111int
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
31 117
32 pfb->num_tiles = NV10_PFB_TILE__SIZE; 118 pfb->num_tiles = NV10_PFB_TILE__SIZE;
33 119
120 if (dev_priv->card_type == NV_20)
121 drm_mm_init(&pfb->tag_heap, 0,
122 (dev_priv->chipset >= 0x25 ?
123 64 * 1024 : 32 * 1024));
124
34 /* Turn all the tiling regions off. */ 125 /* Turn all the tiling regions off. */
35 for (i = 0; i < pfb->num_tiles; i++) 126 for (i = 0; i < pfb->num_tiles; i++)
36 pfb->set_region_tiling(dev, i, 0, 0, 0); 127 pfb->set_tile_region(dev, i);
37 128
38 return 0; 129 return 0;
39} 130}
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
41void 132void
42nv10_fb_takedown(struct drm_device *dev) 133nv10_fb_takedown(struct drm_device *dev)
43{ 134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
137 int i;
138
139 for (i = 0; i < pfb->num_tiles; i++)
140 pfb->free_tile_region(dev, i);
141
142 if (dev_priv->card_type == NV_20)
143 drm_mm_takedown(&pfb->tag_heap);
44} 144}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad58fd5..d2ecbff4bee1 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
53 if (ret) 53 if (ret)
54 return ret; 54 return ret;
55 55
56 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
57 NV03_USER(chan->id), PAGE_SIZE);
58 if (!chan->user)
59 return -ENOMEM;
60
56 /* Fill entries that are seen filled in dumps of nvidia driver just 61 /* Fill entries that are seen filled in dumps of nvidia driver just
57 * after channel's is put into DMA mode 62 * after channel's is put into DMA mode
58 */ 63 */
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
73 return 0; 78 return 0;
74} 79}
75 80
76void
77nv10_fifo_destroy_context(struct nouveau_channel *chan)
78{
79 struct drm_device *dev = chan->dev;
80
81 nv_wr32(dev, NV04_PFIFO_MODE,
82 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
83
84 nouveau_gpuobj_ref(NULL, &chan->ramfc);
85}
86
87static void 81static void
88nv10_fifo_do_load_context(struct drm_device *dev, int chid) 82nv10_fifo_do_load_context(struct drm_device *dev, int chid)
89{ 83{
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
219static void 213static void
220nv10_fifo_init_intr(struct drm_device *dev) 214nv10_fifo_init_intr(struct drm_device *dev)
221{ 215{
216 nouveau_irq_register(dev, 8, nv04_fifo_isr);
222 nv_wr32(dev, 0x002100, 0xffffffff); 217 nv_wr32(dev, 0x002100, 0xffffffff);
223 nv_wr32(dev, 0x002140, 0xffffffff); 218 nv_wr32(dev, 0x002140, 0xffffffff);
224} 219}
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
241 pfifo->reassign(dev, true); 236 pfifo->reassign(dev, true);
242 237
243 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 238 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
244 if (dev_priv->fifos[i]) { 239 if (dev_priv->channels.ptr[i]) {
245 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 240 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
246 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 241 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
247 } 242 }
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c9731159..8c92edb7bbcd 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
26#include "drm.h" 26#include "drm.h"
27#include "nouveau_drm.h" 27#include "nouveau_drm.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_util.h"
30
31static int nv10_graph_register(struct drm_device *);
32static void nv10_graph_isr(struct drm_device *);
29 33
30#define NV10_FIFO_NUMBER 32 34#define NV10_FIFO_NUMBER 32
31 35
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
786 return 0; 790 return 0;
787} 791}
788 792
789void 793static void
790nv10_graph_context_switch(struct drm_device *dev) 794nv10_graph_context_switch(struct drm_device *dev)
791{ 795{
792 struct drm_nouveau_private *dev_priv = dev->dev_private; 796 struct drm_nouveau_private *dev_priv = dev->dev_private;
793 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
794 struct nouveau_channel *chan = NULL; 797 struct nouveau_channel *chan = NULL;
795 int chid; 798 int chid;
796 799
797 pgraph->fifo_access(dev, false);
798 nouveau_wait_for_idle(dev); 800 nouveau_wait_for_idle(dev);
799 801
800 /* If previous context is valid, we need to save it */ 802 /* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
802 804
803 /* Load context for next channel */ 805 /* Load context for next channel */
804 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 806 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
805 chan = dev_priv->fifos[chid]; 807 chan = dev_priv->channels.ptr[chid];
806 if (chan && chan->pgraph_ctx) 808 if (chan && chan->pgraph_ctx)
807 nv10_graph_load_context(chan); 809 nv10_graph_load_context(chan);
808
809 pgraph->fifo_access(dev, true);
810} 810}
811 811
812#define NV_WRITE_CTX(reg, val) do { \ 812#define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
833 if (chid >= dev_priv->engine.fifo.channels) 833 if (chid >= dev_priv->engine.fifo.channels)
834 return NULL; 834 return NULL;
835 835
836 return dev_priv->fifos[chid]; 836 return dev_priv->channels.ptr[chid];
837} 837}
838 838
839int nv10_graph_create_context(struct nouveau_channel *chan) 839int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
875 875
876void nv10_graph_destroy_context(struct nouveau_channel *chan) 876void nv10_graph_destroy_context(struct nouveau_channel *chan)
877{ 877{
878 struct drm_device *dev = chan->dev;
879 struct drm_nouveau_private *dev_priv = dev->dev_private;
880 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
878 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 881 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
882 unsigned long flags;
883
884 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
885 pgraph->fifo_access(dev, false);
886
887 /* Unload the context if it's the currently active one */
888 if (pgraph->channel(dev) == chan)
889 pgraph->unload_context(dev);
879 890
891 /* Free the context resources */
880 kfree(pgraph_ctx); 892 kfree(pgraph_ctx);
881 chan->pgraph_ctx = NULL; 893 chan->pgraph_ctx = NULL;
894
895 pgraph->fifo_access(dev, true);
896 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
882} 897}
883 898
884void 899void
885nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 900nv10_graph_set_tile_region(struct drm_device *dev, int i)
886 uint32_t size, uint32_t pitch)
887{ 901{
888 uint32_t limit = max(1u, addr + size) - 1; 902 struct drm_nouveau_private *dev_priv = dev->dev_private;
889 903 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
890 if (pitch)
891 addr |= 1 << 31;
892 904
893 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); 905 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
894 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); 906 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
895 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); 907 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
896} 908}
897 909
898int nv10_graph_init(struct drm_device *dev) 910int nv10_graph_init(struct drm_device *dev)
899{ 911{
900 struct drm_nouveau_private *dev_priv = dev->dev_private; 912 struct drm_nouveau_private *dev_priv = dev->dev_private;
901 uint32_t tmp; 913 uint32_t tmp;
902 int i; 914 int ret, i;
903 915
904 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
905 ~NV_PMC_ENABLE_PGRAPH); 917 ~NV_PMC_ENABLE_PGRAPH);
906 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 918 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
907 NV_PMC_ENABLE_PGRAPH); 919 NV_PMC_ENABLE_PGRAPH);
908 920
921 ret = nv10_graph_register(dev);
922 if (ret)
923 return ret;
924
925 nouveau_irq_register(dev, 12, nv10_graph_isr);
909 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 926 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
910 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 927 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
911 928
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
928 945
929 /* Turn all the tiling regions off. */ 946 /* Turn all the tiling regions off. */
930 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 947 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
931 nv10_graph_set_region_tiling(dev, i, 0, 0, 0); 948 nv10_graph_set_tile_region(dev, i);
932 949
933 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); 950 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
934 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); 951 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
948 965
949void nv10_graph_takedown(struct drm_device *dev) 966void nv10_graph_takedown(struct drm_device *dev)
950{ 967{
968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
969 nouveau_irq_unregister(dev, 12);
951} 970}
952 971
953static int 972static int
954nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, 973nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
955 int mthd, uint32_t data) 974 u32 class, u32 mthd, u32 data)
956{ 975{
957 struct drm_device *dev = chan->dev; 976 struct drm_device *dev = chan->dev;
958 struct graph_state *ctx = chan->pgraph_ctx; 977 struct graph_state *ctx = chan->pgraph_ctx;
959 struct pipe_state *pipe = &ctx->pipe_state; 978 struct pipe_state *pipe = &ctx->pipe_state;
960 struct drm_nouveau_private *dev_priv = dev->dev_private;
961 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
962 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 979 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
963 uint32_t xfmode0, xfmode1; 980 uint32_t xfmode0, xfmode1;
964 int i; 981 int i;
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
1025 1042
1026 nouveau_wait_for_idle(dev); 1043 nouveau_wait_for_idle(dev);
1027 1044
1028 pgraph->fifo_access(dev, true);
1029
1030 return 0; 1045 return 0;
1031} 1046}
1032 1047
1033static int 1048static int
1034nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, 1049nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1035 int mthd, uint32_t data) 1050 u32 class, u32 mthd, u32 data)
1036{ 1051{
1037 struct drm_device *dev = chan->dev; 1052 struct drm_device *dev = chan->dev;
1038 struct drm_nouveau_private *dev_priv = dev->dev_private;
1039 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1040 1053
1041 nouveau_wait_for_idle(dev); 1054 nouveau_wait_for_idle(dev);
1042 1055
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
1045 nv_wr32(dev, 0x004006b0, 1058 nv_wr32(dev, 0x004006b0,
1046 nv_rd32(dev, 0x004006b0) | 0x8 << 24); 1059 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
1047 1060
1048 pgraph->fifo_access(dev, true); 1061 return 0;
1062}
1063
1064static int
1065nv10_graph_register(struct drm_device *dev)
1066{
1067 struct drm_nouveau_private *dev_priv = dev->dev_private;
1068
1069 if (dev_priv->engine.graph.registered)
1070 return 0;
1071
1072 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1073 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1074 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1075 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1076 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1077 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1078 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1079 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1080 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1081 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1082 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1083 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1084 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1085 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1086 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1087 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1088 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1089 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1090
1091 /* celcius */
1092 if (dev_priv->chipset <= 0x10) {
1093 NVOBJ_CLASS(dev, 0x0056, GR);
1094 } else
1095 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1096 NVOBJ_CLASS(dev, 0x0096, GR);
1097 } else {
1098 NVOBJ_CLASS(dev, 0x0099, GR);
1099 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1100 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1101 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1102 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1103 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1104 }
1049 1105
1106 /* nvsw */
1107 NVOBJ_CLASS(dev, 0x506e, SW);
1108 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1109
1110 dev_priv->engine.graph.registered = true;
1050 return 0; 1111 return 0;
1051} 1112}
1052 1113
1053static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { 1114struct nouveau_bitfield nv10_graph_intr[] = {
1054 { 0x1638, nv17_graph_mthd_lma_window }, 1115 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1055 { 0x163c, nv17_graph_mthd_lma_window }, 1116 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1056 { 0x1640, nv17_graph_mthd_lma_window },
1057 { 0x1644, nv17_graph_mthd_lma_window },
1058 { 0x1658, nv17_graph_mthd_lma_enable },
1059 {} 1117 {}
1060}; 1118};
1061 1119
1062struct nouveau_pgraph_object_class nv10_graph_grclass[] = { 1120struct nouveau_bitfield nv10_graph_nstatus[] =
1063 { 0x0030, false, NULL }, /* null */ 1121{
1064 { 0x0039, false, NULL }, /* m2mf */ 1122 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1065 { 0x004a, false, NULL }, /* gdirect */ 1123 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1066 { 0x005f, false, NULL }, /* imageblit */ 1124 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1067 { 0x009f, false, NULL }, /* imageblit (nv12) */ 1125 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1068 { 0x008a, false, NULL }, /* ifc */
1069 { 0x0089, false, NULL }, /* sifm */
1070 { 0x0062, false, NULL }, /* surf2d */
1071 { 0x0043, false, NULL }, /* rop */
1072 { 0x0012, false, NULL }, /* beta1 */
1073 { 0x0072, false, NULL }, /* beta4 */
1074 { 0x0019, false, NULL }, /* cliprect */
1075 { 0x0044, false, NULL }, /* pattern */
1076 { 0x0052, false, NULL }, /* swzsurf */
1077 { 0x0093, false, NULL }, /* surf3d */
1078 { 0x0094, false, NULL }, /* tex_tri */
1079 { 0x0095, false, NULL }, /* multitex_tri */
1080 { 0x0056, false, NULL }, /* celcius (nv10) */
1081 { 0x0096, false, NULL }, /* celcius (nv11) */
1082 { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
1083 {} 1126 {}
1084}; 1127};
1128
1129static void
1130nv10_graph_isr(struct drm_device *dev)
1131{
1132 u32 stat;
1133
1134 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1135 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1136 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1137 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1138 u32 chid = (addr & 0x01f00000) >> 20;
1139 u32 subc = (addr & 0x00070000) >> 16;
1140 u32 mthd = (addr & 0x00001ffc);
1141 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1142 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
1143 u32 show = stat;
1144
1145 if (stat & NV_PGRAPH_INTR_ERROR) {
1146 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1147 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1148 show &= ~NV_PGRAPH_INTR_ERROR;
1149 }
1150 }
1151
1152 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1153 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1154 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1155 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1156 nv10_graph_context_switch(dev);
1157 }
1158
1159 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1160 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1161
1162 if (show && nouveau_ratelimit()) {
1163 NV_INFO(dev, "PGRAPH -");
1164 nouveau_bitfield_print(nv10_graph_intr, show);
1165 printk(" nsource:");
1166 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1167 printk(" nstatus:");
1168 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1169 printk("\n");
1170 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1171 "mthd 0x%04x data 0x%08x\n",
1172 chid, subc, class, mthd, data);
1173 }
1174 }
1175}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56eca..8464b76798d5 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
32#define NV34_GRCTX_SIZE (18140) 32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396) 33#define NV35_36_GRCTX_SIZE (22396)
34 34
35static int nv20_graph_register(struct drm_device *);
36static int nv30_graph_register(struct drm_device *);
37static void nv20_graph_isr(struct drm_device *);
38
35static void 39static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 40nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{ 41{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
425 struct drm_device *dev = chan->dev; 429 struct drm_device *dev = chan->dev;
426 struct drm_nouveau_private *dev_priv = dev->dev_private; 430 struct drm_nouveau_private *dev_priv = dev->dev_private;
427 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 431 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
432 unsigned long flags;
428 433
429 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 434 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
435 pgraph->fifo_access(dev, false);
436
437 /* Unload the context if it's the currently active one */
438 if (pgraph->channel(dev) == chan)
439 pgraph->unload_context(dev);
440
441 pgraph->fifo_access(dev, true);
442 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
443
444 /* Free the context resources */
430 nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 445 nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
446 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
431} 447}
432 448
433int 449int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
496} 512}
497 513
498void 514void
499nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 515nv20_graph_set_tile_region(struct drm_device *dev, int i)
500 uint32_t size, uint32_t pitch)
501{ 516{
502 uint32_t limit = max(1u, addr + size) - 1; 517 struct drm_nouveau_private *dev_priv = dev->dev_private;
503 518 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
504 if (pitch)
505 addr |= 1;
506 519
507 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 520 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
508 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 521 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
509 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 522 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
510 523
511 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 524 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); 525 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
513 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 526 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
514 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); 527 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
515 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 528 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
516 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); 529 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
530
531 if (dev_priv->card_type == NV_20) {
532 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
533 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
534 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
535 }
517} 536}
518 537
519int 538int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
560 579
561 nv20_graph_rdi(dev); 580 nv20_graph_rdi(dev);
562 581
582 ret = nv20_graph_register(dev);
583 if (ret) {
584 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
585 return ret;
586 }
587
588 nouveau_irq_register(dev, 12, nv20_graph_isr);
563 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 589 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
564 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 590 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
565 591
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
571 nv_wr32(dev, 0x40009C , 0x00000040); 597 nv_wr32(dev, 0x40009C , 0x00000040);
572 598
573 if (dev_priv->chipset >= 0x25) { 599 if (dev_priv->chipset >= 0x25) {
574 nv_wr32(dev, 0x400890, 0x00080000); 600 nv_wr32(dev, 0x400890, 0x00a8cfff);
575 nv_wr32(dev, 0x400610, 0x304B1FB6); 601 nv_wr32(dev, 0x400610, 0x304B1FB6);
576 nv_wr32(dev, 0x400B80, 0x18B82880); 602 nv_wr32(dev, 0x400B80, 0x1cbd3883);
577 nv_wr32(dev, 0x400B84, 0x44000000); 603 nv_wr32(dev, 0x400B84, 0x44000000);
578 nv_wr32(dev, 0x400098, 0x40000080); 604 nv_wr32(dev, 0x400098, 0x40000080);
579 nv_wr32(dev, 0x400B88, 0x000000ff); 605 nv_wr32(dev, 0x400B88, 0x000000ff);
606
580 } else { 607 } else {
581 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ 608 nv_wr32(dev, 0x400880, 0x0008c7df);
582 nv_wr32(dev, 0x400094, 0x00000005); 609 nv_wr32(dev, 0x400094, 0x00000005);
583 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ 610 nv_wr32(dev, 0x400B80, 0x45eae20e);
584 nv_wr32(dev, 0x400B84, 0x24000000); 611 nv_wr32(dev, 0x400B84, 0x24000000);
585 nv_wr32(dev, 0x400098, 0x00000040); 612 nv_wr32(dev, 0x400098, 0x00000040);
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); 613 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
591 618
592 /* Turn all the tiling regions off. */ 619 /* Turn all the tiling regions off. */
593 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 620 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
594 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 621 nv20_graph_set_tile_region(dev, i);
595 622
596 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
599 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
600 nv_rd32(dev, 0x100300 + i * 4));
601 }
602 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); 623 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
603 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); 624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
604 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); 625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 663 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 664 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
644 665
666 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
667 nouveau_irq_unregister(dev, 12);
668
645 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 669 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
646} 670}
647 671
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
684 return ret; 708 return ret;
685 } 709 }
686 710
711 ret = nv30_graph_register(dev);
712 if (ret) {
713 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
714 return ret;
715 }
716
687 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 717 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
688 pgraph->ctx_table->pinst >> 4); 718 pgraph->ctx_table->pinst >> 4);
689 719
720 nouveau_irq_register(dev, 12, nv20_graph_isr);
690 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 721 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
691 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 722 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
692 723
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
724 755
725 /* Turn all the tiling regions off. */ 756 /* Turn all the tiling regions off. */
726 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 757 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
727 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 758 nv20_graph_set_tile_region(dev, i);
728 759
729 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 760 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
730 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 761 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
744 return 0; 775 return 0;
745} 776}
746 777
747struct nouveau_pgraph_object_class nv20_graph_grclass[] = { 778static int
748 { 0x0030, false, NULL }, /* null */ 779nv20_graph_register(struct drm_device *dev)
749 { 0x0039, false, NULL }, /* m2mf */ 780{
750 { 0x004a, false, NULL }, /* gdirect */ 781 struct drm_nouveau_private *dev_priv = dev->dev_private;
751 { 0x009f, false, NULL }, /* imageblit (nv12) */ 782
752 { 0x008a, false, NULL }, /* ifc */ 783 if (dev_priv->engine.graph.registered)
753 { 0x0089, false, NULL }, /* sifm */ 784 return 0;
754 { 0x0062, false, NULL }, /* surf2d */ 785
755 { 0x0043, false, NULL }, /* rop */ 786 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
756 { 0x0012, false, NULL }, /* beta1 */ 787 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
757 { 0x0072, false, NULL }, /* beta4 */ 788 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
758 { 0x0019, false, NULL }, /* cliprect */ 789 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
759 { 0x0044, false, NULL }, /* pattern */ 790 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
760 { 0x009e, false, NULL }, /* swzsurf */ 791 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
761 { 0x0096, false, NULL }, /* celcius */ 792 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
762 { 0x0097, false, NULL }, /* kelvin (nv20) */ 793 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
763 { 0x0597, false, NULL }, /* kelvin (nv25) */ 794 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
764 {} 795 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
765}; 796 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
766 797 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
767struct nouveau_pgraph_object_class nv30_graph_grclass[] = { 798 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
768 { 0x0030, false, NULL }, /* null */ 799 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
769 { 0x0039, false, NULL }, /* m2mf */ 800 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
770 { 0x004a, false, NULL }, /* gdirect */ 801
771 { 0x009f, false, NULL }, /* imageblit (nv12) */ 802 /* kelvin */
772 { 0x008a, false, NULL }, /* ifc */ 803 if (dev_priv->chipset < 0x25)
773 { 0x038a, false, NULL }, /* ifc (nv30) */ 804 NVOBJ_CLASS(dev, 0x0097, GR);
774 { 0x0089, false, NULL }, /* sifm */ 805 else
775 { 0x0389, false, NULL }, /* sifm (nv30) */ 806 NVOBJ_CLASS(dev, 0x0597, GR);
776 { 0x0062, false, NULL }, /* surf2d */ 807
777 { 0x0362, false, NULL }, /* surf2d (nv30) */ 808 /* nvsw */
778 { 0x0043, false, NULL }, /* rop */ 809 NVOBJ_CLASS(dev, 0x506e, SW);
779 { 0x0012, false, NULL }, /* beta1 */ 810 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
780 { 0x0072, false, NULL }, /* beta4 */ 811
781 { 0x0019, false, NULL }, /* cliprect */ 812 dev_priv->engine.graph.registered = true;
782 { 0x0044, false, NULL }, /* pattern */ 813 return 0;
783 { 0x039e, false, NULL }, /* swzsurf */ 814}
784 { 0x0397, false, NULL }, /* rankine (nv30) */ 815
785 { 0x0497, false, NULL }, /* rankine (nv35) */ 816static int
786 { 0x0697, false, NULL }, /* rankine (nv34) */ 817nv30_graph_register(struct drm_device *dev)
787 {} 818{
788}; 819 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 820
821 if (dev_priv->engine.graph.registered)
822 return 0;
823
824 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
825 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
826 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
827 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
828 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
829 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
830 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
831 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
832 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
833 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
834 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
835 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
836 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
837 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
838 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
839 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
840 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
841
842 /* rankine */
843 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
844 NVOBJ_CLASS(dev, 0x0397, GR);
845 else
846 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
847 NVOBJ_CLASS(dev, 0x0697, GR);
848 else
849 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
850 NVOBJ_CLASS(dev, 0x0497, GR);
851
852 /* nvsw */
853 NVOBJ_CLASS(dev, 0x506e, SW);
854 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
855
856 dev_priv->engine.graph.registered = true;
857 return 0;
858}
859
860static void
861nv20_graph_isr(struct drm_device *dev)
862{
863 u32 stat;
864
865 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
866 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
867 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
868 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
869 u32 chid = (addr & 0x01f00000) >> 20;
870 u32 subc = (addr & 0x00070000) >> 16;
871 u32 mthd = (addr & 0x00001ffc);
872 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
873 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
874 u32 show = stat;
875
876 if (stat & NV_PGRAPH_INTR_ERROR) {
877 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
878 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
879 show &= ~NV_PGRAPH_INTR_ERROR;
880 }
881 }
882
883 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
884 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
885
886 if (show && nouveau_ratelimit()) {
887 NV_INFO(dev, "PGRAPH -");
888 nouveau_bitfield_print(nv10_graph_intr, show);
889 printk(" nsource:");
890 nouveau_bitfield_print(nv04_graph_nsource, nsource);
891 printk(" nstatus:");
892 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
893 printk("\n");
894 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
895 "mthd 0x%04x data 0x%08x\n",
896 chid, subc, class, mthd, data);
897 }
898 }
899}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f095128..e0135f0e2144 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_drm.h" 30#include "nouveau_drm.h"
31 31
32void
33nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
34 uint32_t size, uint32_t pitch, uint32_t flags)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
38
39 tile->addr = addr | 1;
40 tile->limit = max(1u, addr + size) - 1;
41 tile->pitch = pitch;
42}
43
44void
45nv30_fb_free_tile_region(struct drm_device *dev, int i)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
49
50 tile->addr = tile->limit = tile->pitch = 0;
51}
52
32static int 53static int
33calc_bias(struct drm_device *dev, int k, int i, int j) 54calc_bias(struct drm_device *dev, int k, int i, int j)
34{ 55{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
65 86
66 /* Turn all the tiling regions off. */ 87 /* Turn all the tiling regions off. */
67 for (i = 0; i < pfb->num_tiles; i++) 88 for (i = 0; i < pfb->num_tiles; i++)
68 pfb->set_region_tiling(dev, i, 0, 0, 0); 89 pfb->set_tile_region(dev, i);
69 90
70 /* Init the memory timing regs at 0x10037c/0x1003ac */ 91 /* Init the memory timing regs at 0x10037c/0x1003ac */
71 if (dev_priv->chipset == 0x30 || 92 if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd7..f3d9c0505f7b 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void 6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 7nv40_fb_set_tile_region(struct drm_device *dev, int i)
8 uint32_t size, uint32_t pitch)
9{ 8{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 9 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 10 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12
13 if (pitch)
14 addr |= 1;
15 11
16 switch (dev_priv->chipset) { 12 switch (dev_priv->chipset) {
17 case 0x40: 13 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 14 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 15 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr); 16 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
21 break; 17 break;
22 18
23 default: 19 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); 20 nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); 21 nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr); 22 nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
27 break; 23 break;
28 } 24 }
29} 25}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
64 60
65 /* Turn all the tiling regions off. */ 61 /* Turn all the tiling regions off. */
66 for (i = 0; i < pfb->num_tiles; i++) 62 for (i = 0; i < pfb->num_tiles; i++)
67 pfb->set_region_tiling(dev, i, 0, 0, 0); 63 pfb->set_tile_region(dev, i);
68 64
69 return 0; 65 return 0;
70} 66}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b28cdd..c86e4d4e9b96 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
47 if (ret) 47 if (ret)
48 return ret; 48 return ret;
49 49
50 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
51 NV40_USER(chan->id), PAGE_SIZE);
52 if (!chan->user)
53 return -ENOMEM;
54
50 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 55 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
51 56
52 nv_wi32(dev, fc + 0, chan->pushbuf_base); 57 nv_wi32(dev, fc + 0, chan->pushbuf_base);
@@ -70,17 +75,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
70 return 0; 75 return 0;
71} 76}
72 77
73void
74nv40_fifo_destroy_context(struct nouveau_channel *chan)
75{
76 struct drm_device *dev = chan->dev;
77
78 nv_wr32(dev, NV04_PFIFO_MODE,
79 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
80
81 nouveau_gpuobj_ref(NULL, &chan->ramfc);
82}
83
84static void 78static void
85nv40_fifo_do_load_context(struct drm_device *dev, int chid) 79nv40_fifo_do_load_context(struct drm_device *dev, int chid)
86{ 80{
@@ -279,6 +273,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
279static void 273static void
280nv40_fifo_init_intr(struct drm_device *dev) 274nv40_fifo_init_intr(struct drm_device *dev)
281{ 275{
276 nouveau_irq_register(dev, 8, nv04_fifo_isr);
282 nv_wr32(dev, 0x002100, 0xffffffff); 277 nv_wr32(dev, 0x002100, 0xffffffff);
283 nv_wr32(dev, 0x002140, 0xffffffff); 278 nv_wr32(dev, 0x002140, 0xffffffff);
284} 279}
@@ -301,7 +296,7 @@ nv40_fifo_init(struct drm_device *dev)
301 pfifo->reassign(dev, true); 296 pfifo->reassign(dev, true);
302 297
303 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 298 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
304 if (dev_priv->fifos[i]) { 299 if (dev_priv->channels.ptr[i]) {
305 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 300 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
306 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 301 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
307 } 302 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91569b8..0618846a97ce 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_grctx.h"
31 31
32static int nv40_graph_register(struct drm_device *);
33static void nv40_graph_isr(struct drm_device *);
34
32struct nouveau_channel * 35struct nouveau_channel *
33nv40_graph_channel(struct drm_device *dev) 36nv40_graph_channel(struct drm_device *dev)
34{ 37{
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
42 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; 45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
43 46
44 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
45 struct nouveau_channel *chan = dev_priv->fifos[i]; 48 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
46 49
47 if (chan && chan->ramin_grctx && 50 if (chan && chan->ramin_grctx &&
48 chan->ramin_grctx->pinst == inst) 51 chan->ramin_grctx->pinst == inst)
@@ -79,6 +82,22 @@ nv40_graph_create_context(struct nouveau_channel *chan)
79void 82void
80nv40_graph_destroy_context(struct nouveau_channel *chan) 83nv40_graph_destroy_context(struct nouveau_channel *chan)
81{ 84{
85 struct drm_device *dev = chan->dev;
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
88 unsigned long flags;
89
90 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
91 pgraph->fifo_access(dev, false);
92
93 /* Unload the context if it's the currently active one */
94 if (pgraph->channel(dev) == chan)
95 pgraph->unload_context(dev);
96
97 pgraph->fifo_access(dev, true);
98 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
99
100 /* Free the context resources */
82 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 101 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
83} 102}
84 103
@@ -174,43 +193,39 @@ nv40_graph_unload_context(struct drm_device *dev)
174} 193}
175 194
176void 195void
177nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 196nv40_graph_set_tile_region(struct drm_device *dev, int i)
178 uint32_t size, uint32_t pitch)
179{ 197{
180 struct drm_nouveau_private *dev_priv = dev->dev_private; 198 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 uint32_t limit = max(1u, addr + size) - 1; 199 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
182
183 if (pitch)
184 addr |= 1;
185 200
186 switch (dev_priv->chipset) { 201 switch (dev_priv->chipset) {
187 case 0x44: 202 case 0x44:
188 case 0x4a: 203 case 0x4a:
189 case 0x4e: 204 case 0x4e:
190 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 205 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
191 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 206 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
192 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 207 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
193 break; 208 break;
194 209
195 case 0x46: 210 case 0x46:
196 case 0x47: 211 case 0x47:
197 case 0x49: 212 case 0x49:
198 case 0x4b: 213 case 0x4b:
199 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); 214 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
200 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); 215 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
201 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); 216 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
202 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 217 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
203 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 218 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
204 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 219 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
205 break; 220 break;
206 221
207 default: 222 default:
208 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 223 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
209 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 224 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
210 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 225 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
211 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 226 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
212 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 227 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
213 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 228 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
214 break; 229 break;
215 } 230 }
216} 231}
@@ -232,7 +247,7 @@ nv40_graph_init(struct drm_device *dev)
232 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 247 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
233 struct nouveau_grctx ctx = {}; 248 struct nouveau_grctx ctx = {};
234 uint32_t vramsz, *cp; 249 uint32_t vramsz, *cp;
235 int i, j; 250 int ret, i, j;
236 251
237 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 252 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
238 ~NV_PMC_ENABLE_PGRAPH); 253 ~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +271,14 @@ nv40_graph_init(struct drm_device *dev)
256 271
257 kfree(cp); 272 kfree(cp);
258 273
274 ret = nv40_graph_register(dev);
275 if (ret)
276 return ret;
277
259 /* No context present currently */ 278 /* No context present currently */
260 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 279 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
261 280
281 nouveau_irq_register(dev, 12, nv40_graph_isr);
262 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 282 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
263 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 283 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
264 284
@@ -347,7 +367,7 @@ nv40_graph_init(struct drm_device *dev)
347 367
348 /* Turn all the tiling regions off. */ 368 /* Turn all the tiling regions off. */
349 for (i = 0; i < pfb->num_tiles; i++) 369 for (i = 0; i < pfb->num_tiles; i++)
350 nv40_graph_set_region_tiling(dev, i, 0, 0, 0); 370 nv40_graph_set_tile_region(dev, i);
351 371
352 /* begin RAM config */ 372 /* begin RAM config */
353 vramsz = pci_resource_len(dev->pdev, 0) - 1; 373 vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +410,111 @@ nv40_graph_init(struct drm_device *dev)
390 410
391void nv40_graph_takedown(struct drm_device *dev) 411void nv40_graph_takedown(struct drm_device *dev)
392{ 412{
413 nouveau_irq_unregister(dev, 12);
393} 414}
394 415
395struct nouveau_pgraph_object_class nv40_graph_grclass[] = { 416static int
396 { 0x0030, false, NULL }, /* null */ 417nv40_graph_register(struct drm_device *dev)
397 { 0x0039, false, NULL }, /* m2mf */ 418{
398 { 0x004a, false, NULL }, /* gdirect */ 419 struct drm_nouveau_private *dev_priv = dev->dev_private;
399 { 0x009f, false, NULL }, /* imageblit (nv12) */
400 { 0x008a, false, NULL }, /* ifc */
401 { 0x0089, false, NULL }, /* sifm */
402 { 0x3089, false, NULL }, /* sifm (nv40) */
403 { 0x0062, false, NULL }, /* surf2d */
404 { 0x3062, false, NULL }, /* surf2d (nv40) */
405 { 0x0043, false, NULL }, /* rop */
406 { 0x0012, false, NULL }, /* beta1 */
407 { 0x0072, false, NULL }, /* beta4 */
408 { 0x0019, false, NULL }, /* cliprect */
409 { 0x0044, false, NULL }, /* pattern */
410 { 0x309e, false, NULL }, /* swzsurf */
411 { 0x4097, false, NULL }, /* curie (nv40) */
412 { 0x4497, false, NULL }, /* curie (nv44) */
413 {}
414};
415 420
421 if (dev_priv->engine.graph.registered)
422 return 0;
423
424 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
425 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
426 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
427 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
428 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
429 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
430 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
431 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
432 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
433 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
434 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
435 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
436 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
437 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
438 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
439 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
440
441 /* curie */
442 if (dev_priv->chipset >= 0x60 ||
443 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
444 NVOBJ_CLASS(dev, 0x4497, GR);
445 else
446 NVOBJ_CLASS(dev, 0x4097, GR);
447
448 /* nvsw */
449 NVOBJ_CLASS(dev, 0x506e, SW);
450 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
451
452 dev_priv->engine.graph.registered = true;
453 return 0;
454}
455
456static int
457nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
458{
459 struct drm_nouveau_private *dev_priv = dev->dev_private;
460 struct nouveau_channel *chan;
461 unsigned long flags;
462 int i;
463
464 spin_lock_irqsave(&dev_priv->channels.lock, flags);
465 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
466 chan = dev_priv->channels.ptr[i];
467 if (!chan || !chan->ramin_grctx)
468 continue;
469
470 if (inst == chan->ramin_grctx->pinst)
471 break;
472 }
473 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
474 return i;
475}
476
477static void
478nv40_graph_isr(struct drm_device *dev)
479{
480 u32 stat;
481
482 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
483 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
484 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
485 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
486 u32 chid = nv40_graph_isr_chid(dev, inst);
487 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
488 u32 subc = (addr & 0x00070000) >> 16;
489 u32 mthd = (addr & 0x00001ffc);
490 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
491 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
492 u32 show = stat;
493
494 if (stat & NV_PGRAPH_INTR_ERROR) {
495 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
496 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
497 show &= ~NV_PGRAPH_INTR_ERROR;
498 } else
499 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
500 nv_mask(dev, 0x402000, 0, 0);
501 }
502 }
503
504 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
505 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
506
507 if (show && nouveau_ratelimit()) {
508 NV_INFO(dev, "PGRAPH -");
509 nouveau_bitfield_print(nv10_graph_intr, show);
510 printk(" nsource:");
511 nouveau_bitfield_print(nv04_graph_nsource, nsource);
512 printk(" nstatus:");
513 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
514 printk("\n");
515 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
516 "class 0x%04x mthd 0x%04x data 0x%08x\n",
517 chid, inst, subc, class, mthd, data);
518 }
519 }
520}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0c6de8..2c346f797285 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
345 uint32_t buffer_handle, uint32_t width, uint32_t height) 345 uint32_t buffer_handle, uint32_t width, uint32_t height)
346{ 346{
347 struct drm_device *dev = crtc->dev; 347 struct drm_device *dev = crtc->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 348 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
350 struct nouveau_bo *cursor = NULL; 349 struct nouveau_bo *cursor = NULL;
351 struct drm_gem_object *gem; 350 struct drm_gem_object *gem;
@@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
374 373
375 nouveau_bo_unmap(cursor); 374 nouveau_bo_unmap(cursor);
376 375
377 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - 376 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
378 dev_priv->vm_vram_base);
379 nv_crtc->cursor.show(nv_crtc, true); 377 nv_crtc->cursor.show(nv_crtc, true);
380 378
381out: 379out:
@@ -437,6 +435,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
437 .cursor_move = nv50_crtc_cursor_move, 435 .cursor_move = nv50_crtc_cursor_move,
438 .gamma_set = nv50_crtc_gamma_set, 436 .gamma_set = nv50_crtc_gamma_set,
439 .set_config = drm_crtc_helper_set_config, 437 .set_config = drm_crtc_helper_set_config,
438 .page_flip = nouveau_crtc_page_flip,
440 .destroy = nv50_crtc_destroy, 439 .destroy = nv50_crtc_destroy,
441}; 440};
442 441
@@ -453,6 +452,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
453 452
454 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 453 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
455 454
455 drm_vblank_pre_modeset(dev, nv_crtc->index);
456 nv50_crtc_blank(nv_crtc, true); 456 nv50_crtc_blank(nv_crtc, true);
457} 457}
458 458
@@ -468,6 +468,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
468 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 468 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
469 469
470 nv50_crtc_blank(nv_crtc, false); 470 nv50_crtc_blank(nv_crtc, false);
471 drm_vblank_post_modeset(dev, nv_crtc->index);
471 472
472 ret = RING_SPACE(evo, 2); 473 ret = RING_SPACE(evo, 2);
473 if (ret) { 474 if (ret) {
@@ -545,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
545 return -EINVAL; 546 return -EINVAL;
546 } 547 }
547 548
548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 549 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
549 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 550 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 551 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 552 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c611ddea..7cc94ed9ed95 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
33#include "nouveau_ramht.h" 33#include "nouveau_ramht.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35 35
36static void nv50_display_isr(struct drm_device *);
37
36static inline int 38static inline int
37nv50_sor_nr(struct drm_device *dev) 39nv50_sor_nr(struct drm_device *dev)
38{ 40{
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
46 return 4; 48 return 4;
47} 49}
48 50
49static void
50nv50_evo_channel_del(struct nouveau_channel **pchan)
51{
52 struct nouveau_channel *chan = *pchan;
53
54 if (!chan)
55 return;
56 *pchan = NULL;
57
58 nouveau_gpuobj_channel_takedown(chan);
59 nouveau_bo_unmap(chan->pushbuf_bo);
60 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
61
62 if (chan->user)
63 iounmap(chan->user);
64
65 kfree(chan);
66}
67
68static int
69nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
70 uint32_t tile_flags, uint32_t magic_flags,
71 uint32_t offset, uint32_t limit)
72{
73 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
74 struct drm_device *dev = evo->dev;
75 struct nouveau_gpuobj *obj = NULL;
76 int ret;
77
78 ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
79 if (ret)
80 return ret;
81 obj->engine = NVOBJ_ENGINE_DISPLAY;
82
83 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
84 nv_wo32(obj, 4, limit);
85 nv_wo32(obj, 8, offset);
86 nv_wo32(obj, 12, 0x00000000);
87 nv_wo32(obj, 16, 0x00000000);
88 if (dev_priv->card_type < NV_C0)
89 nv_wo32(obj, 20, 0x00010000);
90 else
91 nv_wo32(obj, 20, 0x00020000);
92 dev_priv->engine.instmem.flush(dev);
93
94 ret = nouveau_ramht_insert(evo, name, obj);
95 nouveau_gpuobj_ref(NULL, &obj);
96 if (ret) {
97 return ret;
98 }
99
100 return 0;
101}
102
103static int
104nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
105{
106 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_gpuobj *ramht = NULL;
108 struct nouveau_channel *chan;
109 int ret;
110
111 chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
112 if (!chan)
113 return -ENOMEM;
114 *pchan = chan;
115
116 chan->id = -1;
117 chan->dev = dev;
118 chan->user_get = 4;
119 chan->user_put = 0;
120
121 ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
122 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
123 if (ret) {
124 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
125 nv50_evo_channel_del(pchan);
126 return ret;
127 }
128
129 ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
130 if (ret) {
131 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
132 nv50_evo_channel_del(pchan);
133 return ret;
134 }
135
136 ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
137 if (ret) {
138 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
139 nv50_evo_channel_del(pchan);
140 return ret;
141 }
142
143 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
144 nouveau_gpuobj_ref(NULL, &ramht);
145 if (ret) {
146 nv50_evo_channel_del(pchan);
147 return ret;
148 }
149
150 if (dev_priv->chipset != 0x50) {
151 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
152 0, 0xffffffff);
153 if (ret) {
154 nv50_evo_channel_del(pchan);
155 return ret;
156 }
157
158
159 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
160 0, 0xffffffff);
161 if (ret) {
162 nv50_evo_channel_del(pchan);
163 return ret;
164 }
165 }
166
167 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
168 0, dev_priv->vram_size);
169 if (ret) {
170 nv50_evo_channel_del(pchan);
171 return ret;
172 }
173
174 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
175 false, true, &chan->pushbuf_bo);
176 if (ret == 0)
177 ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
178 if (ret) {
179 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
180 nv50_evo_channel_del(pchan);
181 return ret;
182 }
183
184 ret = nouveau_bo_map(chan->pushbuf_bo);
185 if (ret) {
186 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
187 nv50_evo_channel_del(pchan);
188 return ret;
189 }
190
191 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
192 NV50_PDISPLAY_USER(0), PAGE_SIZE);
193 if (!chan->user) {
194 NV_ERROR(dev, "Error mapping EVO control regs.\n");
195 nv50_evo_channel_del(pchan);
196 return -ENOMEM;
197 }
198
199 return 0;
200}
201
202int 51int
203nv50_display_early_init(struct drm_device *dev) 52nv50_display_early_init(struct drm_device *dev)
204{ 53{
@@ -214,17 +63,16 @@ int
214nv50_display_init(struct drm_device *dev) 63nv50_display_init(struct drm_device *dev)
215{ 64{
216 struct drm_nouveau_private *dev_priv = dev->dev_private; 65 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
218 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 66 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
219 struct nouveau_channel *evo = dev_priv->evo;
220 struct drm_connector *connector; 67 struct drm_connector *connector;
221 uint32_t val, ram_amount; 68 struct nouveau_channel *evo;
222 uint64_t start;
223 int ret, i; 69 int ret, i;
70 u32 val;
224 71
225 NV_DEBUG_KMS(dev, "\n"); 72 NV_DEBUG_KMS(dev, "\n");
226 73
227 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); 74 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
75
228 /* 76 /*
229 * I think the 0x006101XX range is some kind of main control area 77 * I think the 0x006101XX range is some kind of main control area
230 * that enables things. 78 * that enables things.
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
240 val = nv_rd32(dev, 0x0061610c + (i * 0x800)); 88 val = nv_rd32(dev, 0x0061610c + (i * 0x800));
241 nv_wr32(dev, 0x0061019c + (i * 0x10), val); 89 nv_wr32(dev, 0x0061019c + (i * 0x10), val);
242 } 90 }
91
243 /* DAC */ 92 /* DAC */
244 for (i = 0; i < 3; i++) { 93 for (i = 0; i < 3; i++) {
245 val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); 94 val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
246 nv_wr32(dev, 0x006101d0 + (i * 0x04), val); 95 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
247 } 96 }
97
248 /* SOR */ 98 /* SOR */
249 for (i = 0; i < nv50_sor_nr(dev); i++) { 99 for (i = 0; i < nv50_sor_nr(dev); i++) {
250 val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); 100 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
251 nv_wr32(dev, 0x006101e0 + (i * 0x04), val); 101 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
252 } 102 }
103
253 /* EXT */ 104 /* EXT */
254 for (i = 0; i < 3; i++) { 105 for (i = 0; i < 3; i++) {
255 val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); 106 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
262 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); 113 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
263 } 114 }
264 115
265 /* This used to be in crtc unblank, but seems out of place there. */
266 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
267 /* RAM is clamped to 256 MiB. */
268 ram_amount = dev_priv->vram_size;
269 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
270 if (ram_amount > 256*1024*1024)
271 ram_amount = 256*1024*1024;
272 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
273 nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
274 nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
275
276 /* The precise purpose is unknown, i suspect it has something to do 116 /* The precise purpose is unknown, i suspect it has something to do
277 * with text mode. 117 * with text mode.
278 */ 118 */
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
287 } 127 }
288 } 128 }
289 129
290 /* taken from nv bug #12637, attempts to un-wedge the hw if it's
291 * stuck in some unspecified state
292 */
293 start = ptimer->read(dev);
294 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
295 while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
296 if ((val & 0x9f0000) == 0x20000)
297 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
298 val | 0x800000);
299
300 if ((val & 0x3f0000) == 0x30000)
301 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
302 val | 0x200000);
303
304 if (ptimer->read(dev) - start > 1000000000ULL) {
305 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
306 NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
307 return -EBUSY;
308 }
309 }
310
311 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
313 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
314 0x40000000, 0x40000000)) {
315 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
316 NV_ERROR(dev, "0x610200 = 0x%08x\n",
317 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
318 return -EBUSY;
319 }
320
321 for (i = 0; i < 2; i++) { 130 for (i = 0; i < 2; i++) {
322 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); 131 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
323 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 132 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
341 } 150 }
342 } 151 }
343 152
344 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 153 nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
154 nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
155 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
156 nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
157 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
158 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
159 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
160 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
161
162 /* enable hotplug interrupts */
163 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
164 struct nouveau_connector *conn = nouveau_connector(connector);
345 165
346 /* initialise fifo */ 166 if (conn->dcb->gpio_tag == 0xff)
347 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), 167 continue;
348 ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | 168
349 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | 169 pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
350 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
351 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
352 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
353 if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
354 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
355 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
356 return -EBUSY;
357 } 170 }
358 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 171
359 (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | 172 ret = nv50_evo_init(dev);
360 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
361 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
362 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
363 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
364 nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
365
366 evo->dma.max = (4096/4) - 2;
367 evo->dma.put = 0;
368 evo->dma.cur = evo->dma.put;
369 evo->dma.free = evo->dma.max - evo->dma.cur;
370
371 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
372 if (ret) 173 if (ret)
373 return ret; 174 return ret;
175 evo = dev_priv->evo;
374 176
375 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 177 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
376 OUT_RING(evo, 0);
377 178
378 ret = RING_SPACE(evo, 11); 179 ret = RING_SPACE(evo, 11);
379 if (ret) 180 if (ret)
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
393 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) 194 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
394 NV_ERROR(dev, "evo pushbuf stalled\n"); 195 NV_ERROR(dev, "evo pushbuf stalled\n");
395 196
396 /* enable clock change interrupts. */
397 nv_wr32(dev, 0x610028, 0x00010001);
398 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
399 NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
400 NV50_PDISPLAY_INTR_EN_CLK_UNK40));
401
402 /* enable hotplug interrupts */
403 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
404 struct nouveau_connector *conn = nouveau_connector(connector);
405
406 if (conn->dcb->gpio_tag == 0xff)
407 continue;
408
409 pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
410 }
411 197
412 return 0; 198 return 0;
413} 199}
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
452 } 238 }
453 } 239 }
454 240
455 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); 241 nv50_evo_fini(dev);
456 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
457 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
458 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
459 NV_ERROR(dev, "0x610200 = 0x%08x\n",
460 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
461 }
462 242
463 for (i = 0; i < 3; i++) { 243 for (i = 0; i < 3; i++) {
464 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), 244 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
470 } 250 }
471 251
472 /* disable interrupts. */ 252 /* disable interrupts. */
473 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); 253 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
474 254
475 /* disable hotplug interrupts */ 255 /* disable hotplug interrupts */
476 nv_wr32(dev, 0xe054, 0xffffffff); 256 nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
508 288
509 dev->mode_config.fb_base = dev_priv->fb_phys; 289 dev->mode_config.fb_base = dev_priv->fb_phys;
510 290
511 /* Create EVO channel */
512 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
513 if (ret) {
514 NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
515 return ret;
516 }
517
518 /* Create CRTC objects */ 291 /* Create CRTC objects */
519 for (i = 0; i < 2; i++) 292 for (i = 0; i < 2; i++)
520 nv50_crtc_create(dev, i); 293 nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
557 } 330 }
558 } 331 }
559 332
333 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
334 nouveau_irq_register(dev, 26, nv50_display_isr);
335
560 ret = nv50_display_init(dev); 336 ret = nv50_display_init(dev);
561 if (ret) { 337 if (ret) {
562 nv50_display_destroy(dev); 338 nv50_display_destroy(dev);
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
569void 345void
570nv50_display_destroy(struct drm_device *dev) 346nv50_display_destroy(struct drm_device *dev)
571{ 347{
572 struct drm_nouveau_private *dev_priv = dev->dev_private;
573
574 NV_DEBUG_KMS(dev, "\n"); 348 NV_DEBUG_KMS(dev, "\n");
575 349
576 drm_mode_config_cleanup(dev); 350 drm_mode_config_cleanup(dev);
577 351
578 nv50_display_disable(dev); 352 nv50_display_disable(dev);
579 nv50_evo_channel_del(&dev_priv->evo); 353 nouveau_irq_unregister(dev, 26);
580} 354}
581 355
582static u16 356static u16
@@ -660,32 +434,32 @@ static void
660nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) 434nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
661{ 435{
662 struct drm_nouveau_private *dev_priv = dev->dev_private; 436 struct drm_nouveau_private *dev_priv = dev->dev_private;
663 struct nouveau_channel *chan; 437 struct nouveau_channel *chan, *tmp;
664 struct list_head *entry, *tmp;
665 438
666 list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { 439 list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
667 chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); 440 nvsw.vbl_wait) {
441 if (chan->nvsw.vblsem_head != crtc)
442 continue;
668 443
669 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, 444 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
670 chan->nvsw.vblsem_rval); 445 chan->nvsw.vblsem_rval);
671 list_del(&chan->nvsw.vbl_wait); 446 list_del(&chan->nvsw.vbl_wait);
447 drm_vblank_put(dev, crtc);
672 } 448 }
449
450 drm_handle_vblank(dev, crtc);
673} 451}
674 452
675static void 453static void
676nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) 454nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
677{ 455{
678 intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
679
680 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) 456 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
681 nv50_display_vblank_crtc_handler(dev, 0); 457 nv50_display_vblank_crtc_handler(dev, 0);
682 458
683 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) 459 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
684 nv50_display_vblank_crtc_handler(dev, 1); 460 nv50_display_vblank_crtc_handler(dev, 1);
685 461
686 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, 462 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
687 NV50_PDISPLAY_INTR_EN) & ~intr);
688 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
689} 463}
690 464
691static void 465static void
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
1011static void 785static void
1012nv50_display_error_handler(struct drm_device *dev) 786nv50_display_error_handler(struct drm_device *dev)
1013{ 787{
1014 uint32_t addr, data; 788 u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
1015 789 u32 addr, data;
1016 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); 790 int chid;
1017 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
1018 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
1019
1020 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
1021 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1022
1023 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
1024}
1025
1026void
1027nv50_display_irq_hotplug_bh(struct work_struct *work)
1028{
1029 struct drm_nouveau_private *dev_priv =
1030 container_of(work, struct drm_nouveau_private, hpd_work);
1031 struct drm_device *dev = dev_priv->dev;
1032 struct drm_connector *connector;
1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
1034 uint32_t unplug_mask, plug_mask, change_mask;
1035 uint32_t hpd0, hpd1;
1036
1037 spin_lock_irq(&dev_priv->hpd_state.lock);
1038 hpd0 = dev_priv->hpd_state.hpd0_bits;
1039 dev_priv->hpd_state.hpd0_bits = 0;
1040 hpd1 = dev_priv->hpd_state.hpd1_bits;
1041 dev_priv->hpd_state.hpd1_bits = 0;
1042 spin_unlock_irq(&dev_priv->hpd_state.lock);
1043
1044 hpd0 &= nv_rd32(dev, 0xe050);
1045 if (dev_priv->chipset >= 0x90)
1046 hpd1 &= nv_rd32(dev, 0xe070);
1047
1048 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
1049 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
1050 change_mask = plug_mask | unplug_mask;
1051
1052 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1053 struct drm_encoder_helper_funcs *helper;
1054 struct nouveau_connector *nv_connector =
1055 nouveau_connector(connector);
1056 struct nouveau_encoder *nv_encoder;
1057 struct dcb_gpio_entry *gpio;
1058 uint32_t reg;
1059 bool plugged;
1060
1061 if (!nv_connector->dcb)
1062 continue;
1063 791
1064 gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); 792 for (chid = 0; chid < 5; chid++) {
1065 if (!gpio || !(change_mask & (1 << gpio->line))) 793 if (!(channels & (1 << chid)))
1066 continue; 794 continue;
1067 795
1068 reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); 796 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
1069 plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); 797 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
1070 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", 798 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
1071 drm_get_connector_name(connector)) ; 799 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
1072 800 "(0x%04x 0x%02x)\n", chid,
1073 if (!connector->encoder || !connector->encoder->crtc || 801 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1074 !connector->encoder->crtc->enabled)
1075 continue;
1076 nv_encoder = nouveau_encoder(connector->encoder);
1077 helper = connector->encoder->helper_private;
1078
1079 if (nv_encoder->dcb->type != OUTPUT_DP)
1080 continue;
1081 802
1082 if (plugged) 803 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
1083 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
1084 else
1085 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
1086 } 804 }
1087
1088 drm_helper_hpd_irq_event(dev);
1089} 805}
1090 806
1091void 807static void
1092nv50_display_irq_handler(struct drm_device *dev) 808nv50_display_isr(struct drm_device *dev)
1093{ 809{
1094 struct drm_nouveau_private *dev_priv = dev->dev_private; 810 struct drm_nouveau_private *dev_priv = dev->dev_private;
1095 uint32_t delayed = 0; 811 uint32_t delayed = 0;
1096 812
1097 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
1098 uint32_t hpd0_bits, hpd1_bits = 0;
1099
1100 hpd0_bits = nv_rd32(dev, 0xe054);
1101 nv_wr32(dev, 0xe054, hpd0_bits);
1102
1103 if (dev_priv->chipset >= 0x90) {
1104 hpd1_bits = nv_rd32(dev, 0xe074);
1105 nv_wr32(dev, 0xe074, hpd1_bits);
1106 }
1107
1108 spin_lock(&dev_priv->hpd_state.lock);
1109 dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
1110 dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
1111 spin_unlock(&dev_priv->hpd_state.lock);
1112
1113 queue_work(dev_priv->wq, &dev_priv->hpd_work);
1114 }
1115
1116 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 813 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
1117 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 814 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
1118 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 815 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
1123 if (!intr0 && !(intr1 & ~delayed)) 820 if (!intr0 && !(intr1 & ~delayed))
1124 break; 821 break;
1125 822
1126 if (intr0 & 0x00010000) { 823 if (intr0 & 0x001f0000) {
1127 nv50_display_error_handler(dev); 824 nv50_display_error_handler(dev);
1128 intr0 &= ~0x00010000; 825 intr0 &= ~0x001f0000;
1129 } 826 }
1130 827
1131 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 828 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
1156 } 853 }
1157 } 854 }
1158} 855}
1159
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b85ee0..f0e30b78ef6b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nv50_evo.h" 36#include "nv50_evo.h"
37 37
38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work); 38void nv50_display_irq_handler_bh(struct work_struct *work);
40void nv50_display_irq_hotplug_bh(struct work_struct *work);
41int nv50_display_early_init(struct drm_device *dev); 39int nv50_display_early_init(struct drm_device *dev);
42void nv50_display_late_takedown(struct drm_device *dev); 40void nv50_display_late_takedown(struct drm_device *dev);
43int nv50_display_create(struct drm_device *dev); 41int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 000000000000..887b2a97e2a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,318 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_dma.h"
29#include "nouveau_ramht.h"
30
31static void
32nv50_evo_channel_del(struct nouveau_channel **pevo)
33{
34 struct drm_nouveau_private *dev_priv;
35 struct nouveau_channel *evo = *pevo;
36
37 if (!evo)
38 return;
39 *pevo = NULL;
40
41 dev_priv = evo->dev->dev_private;
42 dev_priv->evo_alloc &= ~(1 << evo->id);
43
44 nouveau_gpuobj_channel_takedown(evo);
45 nouveau_bo_unmap(evo->pushbuf_bo);
46 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
47
48 if (evo->user)
49 iounmap(evo->user);
50
51 kfree(evo);
52}
53
54int
55nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
56 u32 tile_flags, u32 magic_flags, u32 offset, u32 limit)
57{
58 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
59 struct drm_device *dev = evo->dev;
60 struct nouveau_gpuobj *obj = NULL;
61 int ret;
62
63 ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
64 if (ret)
65 return ret;
66 obj->engine = NVOBJ_ENGINE_DISPLAY;
67
68 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
69 nv_wo32(obj, 4, limit);
70 nv_wo32(obj, 8, offset);
71 nv_wo32(obj, 12, 0x00000000);
72 nv_wo32(obj, 16, 0x00000000);
73 if (dev_priv->card_type < NV_C0)
74 nv_wo32(obj, 20, 0x00010000);
75 else
76 nv_wo32(obj, 20, 0x00020000);
77 dev_priv->engine.instmem.flush(dev);
78
79 ret = nouveau_ramht_insert(evo, name, obj);
80 nouveau_gpuobj_ref(NULL, &obj);
81 if (ret) {
82 return ret;
83 }
84
85 return 0;
86}
87
88static int
89nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 struct nouveau_channel *evo;
93 int ret;
94
95 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
96 if (!evo)
97 return -ENOMEM;
98 *pevo = evo;
99
100 for (evo->id = 0; evo->id < 5; evo->id++) {
101 if (dev_priv->evo_alloc & (1 << evo->id))
102 continue;
103
104 dev_priv->evo_alloc |= (1 << evo->id);
105 break;
106 }
107
108 if (evo->id == 5) {
109 kfree(evo);
110 return -ENODEV;
111 }
112
113 evo->dev = dev;
114 evo->user_get = 4;
115 evo->user_put = 0;
116
117 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
118 false, true, &evo->pushbuf_bo);
119 if (ret == 0)
120 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
121 if (ret) {
122 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
123 nv50_evo_channel_del(pevo);
124 return ret;
125 }
126
127 ret = nouveau_bo_map(evo->pushbuf_bo);
128 if (ret) {
129 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
130 nv50_evo_channel_del(pevo);
131 return ret;
132 }
133
134 evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
135 NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
136 if (!evo->user) {
137 NV_ERROR(dev, "Error mapping EVO control regs.\n");
138 nv50_evo_channel_del(pevo);
139 return -ENOMEM;
140 }
141
142 /* bind primary evo channel's ramht to the channel */
143 if (dev_priv->evo && evo != dev_priv->evo)
144 nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
145
146 return 0;
147}
148
149static int
150nv50_evo_channel_init(struct nouveau_channel *evo)
151{
152 struct drm_device *dev = evo->dev;
153 int id = evo->id, ret, i;
154 u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
155 u32 tmp;
156
157 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
158 if ((tmp & 0x009f0000) == 0x00020000)
159 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
160
161 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
162 if ((tmp & 0x003f0000) == 0x00030000)
163 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
164
165 /* initialise fifo */
166 nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
167 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
168 NV50_PDISPLAY_EVO_DMA_CB_VALID);
169 nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
170 nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
171 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
172 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
173
174 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
175 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
176 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
177 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
178 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
179 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
180 return -EBUSY;
181 }
182
183 /* enable error reporting on the channel */
184 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
185
186 evo->dma.max = (4096/4) - 2;
187 evo->dma.put = 0;
188 evo->dma.cur = evo->dma.put;
189 evo->dma.free = evo->dma.max - evo->dma.cur;
190
191 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
192 if (ret)
193 return ret;
194
195 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
196 OUT_RING(evo, 0);
197
198 return 0;
199}
200
201static void
202nv50_evo_channel_fini(struct nouveau_channel *evo)
203{
204 struct drm_device *dev = evo->dev;
205 int id = evo->id;
206
207 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
208 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
209 nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
210 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
211 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
212 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
213 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
214 }
215}
216
217static int
218nv50_evo_create(struct drm_device *dev)
219{
220 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct nouveau_gpuobj *ramht = NULL;
222 struct nouveau_channel *evo;
223 int ret;
224
225 /* create primary evo channel, the one we use for modesetting
226 * purporses
227 */
228 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
229 if (ret)
230 return ret;
231 evo = dev_priv->evo;
232
233 /* setup object management on it, any other evo channel will
234 * use this also as there's no per-channel support on the
235 * hardware
236 */
237 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
238 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
239 if (ret) {
240 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
241 nv50_evo_channel_del(&dev_priv->evo);
242 return ret;
243 }
244
245 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
246 if (ret) {
247 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
248 nv50_evo_channel_del(&dev_priv->evo);
249 return ret;
250 }
251
252 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
253 if (ret) {
254 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
255 nv50_evo_channel_del(&dev_priv->evo);
256 return ret;
257 }
258
259 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
260 nouveau_gpuobj_ref(NULL, &ramht);
261 if (ret) {
262 nv50_evo_channel_del(&dev_priv->evo);
263 return ret;
264 }
265
266 /* create some default objects for the scanout memtypes we support */
267 if (dev_priv->chipset != 0x50) {
268 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
269 0, 0xffffffff);
270 if (ret) {
271 nv50_evo_channel_del(&dev_priv->evo);
272 return ret;
273 }
274
275
276 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
277 0, 0xffffffff);
278 if (ret) {
279 nv50_evo_channel_del(&dev_priv->evo);
280 return ret;
281 }
282 }
283
284 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
285 0, dev_priv->vram_size);
286 if (ret) {
287 nv50_evo_channel_del(&dev_priv->evo);
288 return ret;
289 }
290
291 return 0;
292}
293
294int
295nv50_evo_init(struct drm_device *dev)
296{
297 struct drm_nouveau_private *dev_priv = dev->dev_private;
298 int ret;
299
300 if (!dev_priv->evo) {
301 ret = nv50_evo_create(dev);
302 if (ret)
303 return ret;
304 }
305
306 return nv50_evo_channel_init(dev_priv->evo);
307}
308
309void
310nv50_evo_fini(struct drm_device *dev)
311{
312 struct drm_nouveau_private *dev_priv = dev->dev_private;
313
314 if (dev_priv->evo) {
315 nv50_evo_channel_fini(dev_priv->evo);
316 nv50_evo_channel_del(&dev_priv->evo);
317 }
318}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae13343bcec..aa4f0d3cea8e 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
24 * 24 *
25 */ 25 */
26 26
27#ifndef __NV50_EVO_H__
28#define __NV50_EVO_H__
29
30int nv50_evo_init(struct drm_device *dev);
31void nv50_evo_fini(struct drm_device *dev);
32int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
33 u32 tile_flags, u32 magic_flags,
34 u32 offset, u32 limit);
35
27#define NV50_EVO_UPDATE 0x00000080 36#define NV50_EVO_UPDATE 0x00000080
28#define NV50_EVO_UNK84 0x00000084 37#define NV50_EVO_UNK84 0x00000084
29#define NV50_EVO_UNK84_NOTIFY 0x40000000 38#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -111,3 +120,4 @@
111#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 120#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
112#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc 121#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
113 122
123#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b15d2c..50290dea0ac4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6struct nv50_fb_priv {
7 struct page *r100c08_page;
8 dma_addr_t r100c08;
9};
10
11static int
12nv50_fb_create(struct drm_device *dev)
13{
14 struct drm_nouveau_private *dev_priv = dev->dev_private;
15 struct nv50_fb_priv *priv;
16
17 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
18 if (!priv)
19 return -ENOMEM;
20
21 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
22 if (!priv->r100c08_page) {
23 kfree(priv);
24 return -ENOMEM;
25 }
26
27 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
28 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
29 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
30 __free_page(priv->r100c08_page);
31 kfree(priv);
32 return -EFAULT;
33 }
34
35 dev_priv->engine.fb.priv = priv;
36 return 0;
37}
38
6int 39int
7nv50_fb_init(struct drm_device *dev) 40nv50_fb_init(struct drm_device *dev)
8{ 41{
9 struct drm_nouveau_private *dev_priv = dev->dev_private; 42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nv50_fb_priv *priv;
44 int ret;
45
46 if (!dev_priv->engine.fb.priv) {
47 ret = nv50_fb_create(dev);
48 if (ret)
49 return ret;
50 }
51 priv = dev_priv->engine.fb.priv;
10 52
11 /* Not a clue what this is exactly. Without pointing it at a 53 /* Not a clue what this is exactly. Without pointing it at a
12 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) 54 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
13 * cause IOMMU "read from address 0" errors (rh#561267) 55 * cause IOMMU "read from address 0" errors (rh#561267)
14 */ 56 */
15 nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); 57 nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
16 58
17 /* This is needed to get meaningful information from 100c90 59 /* This is needed to get meaningful information from 100c90
18 * on traps. No idea what these values mean exactly. */ 60 * on traps. No idea what these values mean exactly. */
19 switch (dev_priv->chipset) { 61 switch (dev_priv->chipset) {
20 case 0x50: 62 case 0x50:
21 nv_wr32(dev, 0x100c90, 0x0707ff); 63 nv_wr32(dev, 0x100c90, 0x000707ff);
22 break; 64 break;
23 case 0xa3: 65 case 0xa3:
24 case 0xa5: 66 case 0xa5:
25 case 0xa8: 67 case 0xa8:
26 nv_wr32(dev, 0x100c90, 0x0d0fff); 68 nv_wr32(dev, 0x100c90, 0x000d0fff);
69 break;
70 case 0xaf:
71 nv_wr32(dev, 0x100c90, 0x089d1fff);
27 break; 72 break;
28 default: 73 default:
29 nv_wr32(dev, 0x100c90, 0x1d07ff); 74 nv_wr32(dev, 0x100c90, 0x001d07ff);
30 break; 75 break;
31 } 76 }
32 77
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
36void 81void
37nv50_fb_takedown(struct drm_device *dev) 82nv50_fb_takedown(struct drm_device *dev)
38{ 83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nv50_fb_priv *priv;
86
87 priv = dev_priv->engine.fb.priv;
88 if (!priv)
89 return;
90 dev_priv->engine.fb.priv = NULL;
91
92 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
93 PCI_DMA_BIDIRECTIONAL);
94 __free_page(priv->r100c08_page);
95 kfree(priv);
39} 96}
40 97
41void 98void
42nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) 99nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
43{ 100{
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 unsigned long flags;
45 u32 trap[6], idx, chinst; 103 u32 trap[6], idx, chinst;
46 int i, ch; 104 int i, ch;
47 105
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
60 return; 118 return;
61 119
62 chinst = (trap[2] << 16) | trap[1]; 120 chinst = (trap[2] << 16) | trap[1];
121
122 spin_lock_irqsave(&dev_priv->channels.lock, flags);
63 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { 123 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
64 struct nouveau_channel *chan = dev_priv->fifos[ch]; 124 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
65 125
66 if (!chan || !chan->ramin) 126 if (!chan || !chan->ramin)
67 continue; 127 continue;
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
69 if (chinst == chan->ramin->vinst >> 12) 129 if (chinst == chan->ramin->vinst >> 12)
70 break; 130 break;
71 } 131 }
132 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
72 133
73 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " 134 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
74 "channel %d (0x%08x)\n", 135 "channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048eddbc..6d38cb1488ae 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,27 +3,20 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_ramht.h" 4#include "nouveau_ramht.h"
5#include "nouveau_fbcon.h" 5#include "nouveau_fbcon.h"
6#include "nouveau_mm.h"
6 7
7void 8int
8nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 9nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
9{ 10{
10 struct nouveau_fbdev *nfbdev = info->par; 11 struct nouveau_fbdev *nfbdev = info->par;
11 struct drm_device *dev = nfbdev->dev; 12 struct drm_device *dev = nfbdev->dev;
12 struct drm_nouveau_private *dev_priv = dev->dev_private; 13 struct drm_nouveau_private *dev_priv = dev->dev_private;
13 struct nouveau_channel *chan = dev_priv->channel; 14 struct nouveau_channel *chan = dev_priv->channel;
15 int ret;
14 16
15 if (info->state != FBINFO_STATE_RUNNING) 17 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
16 return; 18 if (ret)
17 19 return ret;
18 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
19 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
20 nouveau_fbcon_gpu_lockup(info);
21 }
22
23 if (info->flags & FBINFO_HWACCEL_DISABLED) {
24 cfb_fillrect(info, rect);
25 return;
26 }
27 20
28 if (rect->rop != ROP_COPY) { 21 if (rect->rop != ROP_COPY) {
29 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 22 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +38,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
45 OUT_RING(chan, 3); 38 OUT_RING(chan, 3);
46 } 39 }
47 FIRE_RING(chan); 40 FIRE_RING(chan);
41 return 0;
48} 42}
49 43
50void 44int
51nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 45nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
52{ 46{
53 struct nouveau_fbdev *nfbdev = info->par; 47 struct nouveau_fbdev *nfbdev = info->par;
54 struct drm_device *dev = nfbdev->dev; 48 struct drm_device *dev = nfbdev->dev;
55 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_channel *chan = dev_priv->channel; 50 struct nouveau_channel *chan = dev_priv->channel;
51 int ret;
57 52
58 if (info->state != FBINFO_STATE_RUNNING) 53 ret = RING_SPACE(chan, 12);
59 return; 54 if (ret)
60 55 return ret;
61 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
62 nouveau_fbcon_gpu_lockup(info);
63 }
64
65 if (info->flags & FBINFO_HWACCEL_DISABLED) {
66 cfb_copyarea(info, region);
67 return;
68 }
69 56
70 BEGIN_RING(chan, NvSub2D, 0x0110, 1); 57 BEGIN_RING(chan, NvSub2D, 0x0110, 1);
71 OUT_RING(chan, 0); 58 OUT_RING(chan, 0);
@@ -80,9 +67,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
80 OUT_RING(chan, 0); 67 OUT_RING(chan, 0);
81 OUT_RING(chan, region->sy); 68 OUT_RING(chan, region->sy);
82 FIRE_RING(chan); 69 FIRE_RING(chan);
70 return 0;
83} 71}
84 72
85void 73int
86nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 74nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
87{ 75{
88 struct nouveau_fbdev *nfbdev = info->par; 76 struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +80,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92 uint32_t width, dwords, *data = (uint32_t *)image->data; 80 uint32_t width, dwords, *data = (uint32_t *)image->data;
93 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 81 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
94 uint32_t *palette = info->pseudo_palette; 82 uint32_t *palette = info->pseudo_palette;
83 int ret;
95 84
96 if (info->state != FBINFO_STATE_RUNNING) 85 if (image->depth != 1)
97 return; 86 return -ENODEV;
98
99 if (image->depth != 1) {
100 cfb_imageblit(info, image);
101 return;
102 }
103
104 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
105 nouveau_fbcon_gpu_lockup(info);
106 }
107 87
108 if (info->flags & FBINFO_HWACCEL_DISABLED) { 88 ret = RING_SPACE(chan, 11);
109 cfb_imageblit(info, image); 89 if (ret)
110 return; 90 return ret;
111 }
112 91
113 width = ALIGN(image->width, 32); 92 width = ALIGN(image->width, 32);
114 dwords = (width * image->height) >> 5; 93 dwords = (width * image->height) >> 5;
@@ -134,11 +113,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
134 while (dwords) { 113 while (dwords) {
135 int push = dwords > 2047 ? 2047 : dwords; 114 int push = dwords > 2047 ? 2047 : dwords;
136 115
137 if (RING_SPACE(chan, push + 1)) { 116 ret = RING_SPACE(chan, push + 1);
138 nouveau_fbcon_gpu_lockup(info); 117 if (ret)
139 cfb_imageblit(info, image); 118 return ret;
140 return;
141 }
142 119
143 dwords -= push; 120 dwords -= push;
144 121
@@ -148,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
148 } 125 }
149 126
150 FIRE_RING(chan); 127 FIRE_RING(chan);
128 return 0;
151} 129}
152 130
153int 131int
@@ -157,12 +135,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
157 struct drm_device *dev = nfbdev->dev; 135 struct drm_device *dev = nfbdev->dev;
158 struct drm_nouveau_private *dev_priv = dev->dev_private; 136 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 struct nouveau_channel *chan = dev_priv->channel; 137 struct nouveau_channel *chan = dev_priv->channel;
160 struct nouveau_gpuobj *eng2d = NULL; 138 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
161 uint64_t fb;
162 int ret, format; 139 int ret, format;
163 140
164 fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
165
166 switch (info->var.bits_per_pixel) { 141 switch (info->var.bits_per_pixel) {
167 case 8: 142 case 8:
168 format = 0xf3; 143 format = 0xf3;
@@ -190,12 +165,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
190 return -EINVAL; 165 return -EINVAL;
191 } 166 }
192 167
193 ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); 168 ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
194 if (ret)
195 return ret;
196
197 ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
198 nouveau_gpuobj_ref(NULL, &eng2d);
199 if (ret) 169 if (ret)
200 return ret; 170 return ret;
201 171
@@ -253,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
253 OUT_RING(chan, info->fix.line_length); 223 OUT_RING(chan, info->fix.line_length);
254 OUT_RING(chan, info->var.xres_virtual); 224 OUT_RING(chan, info->var.xres_virtual);
255 OUT_RING(chan, info->var.yres_virtual); 225 OUT_RING(chan, info->var.yres_virtual);
256 OUT_RING(chan, upper_32_bits(fb)); 226 OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
257 OUT_RING(chan, lower_32_bits(fb)); 227 OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
258 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 228 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
259 OUT_RING(chan, format); 229 OUT_RING(chan, format);
260 OUT_RING(chan, 1); 230 OUT_RING(chan, 1);
@@ -262,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.line_length); 232 OUT_RING(chan, info->fix.line_length);
263 OUT_RING(chan, info->var.xres_virtual); 233 OUT_RING(chan, info->var.xres_virtual);
264 OUT_RING(chan, info->var.yres_virtual); 234 OUT_RING(chan, info->var.yres_virtual);
265 OUT_RING(chan, upper_32_bits(fb)); 235 OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
266 OUT_RING(chan, lower_32_bits(fb)); 236 OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
267 237
268 return 0; 238 return 0;
269} 239}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd60c10..8dd04c5dac67 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_vm.h"
31 32
32static void 33static void
33nv50_fifo_playlist_update(struct drm_device *dev) 34nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
44 45
45 /* We never schedule channel 0 or 127 */ 46 /* We never schedule channel 0 or 127 */
46 for (i = 1, nr = 0; i < 127; i++) { 47 for (i = 1, nr = 0; i < 127; i++) {
47 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { 48 if (dev_priv->channels.ptr[i] &&
49 dev_priv->channels.ptr[i]->ramfc) {
48 nv_wo32(cur, (nr * 4), i); 50 nv_wo32(cur, (nr * 4), i);
49 nr++; 51 nr++;
50 } 52 }
@@ -60,7 +62,7 @@ static void
60nv50_fifo_channel_enable(struct drm_device *dev, int channel) 62nv50_fifo_channel_enable(struct drm_device *dev, int channel)
61{ 63{
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_channel *chan = dev_priv->fifos[channel]; 65 struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
64 uint32_t inst; 66 uint32_t inst;
65 67
66 NV_DEBUG(dev, "ch%d\n", channel); 68 NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
105{ 107{
106 NV_DEBUG(dev, "\n"); 108 NV_DEBUG(dev, "\n");
107 109
110 nouveau_irq_register(dev, 8, nv04_fifo_isr);
108 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); 111 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
109 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); 112 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
110} 113}
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
118 NV_DEBUG(dev, "\n"); 121 NV_DEBUG(dev, "\n");
119 122
120 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { 123 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
121 if (dev_priv->fifos[i]) 124 if (dev_priv->channels.ptr[i])
122 nv50_fifo_channel_enable(dev, i); 125 nv50_fifo_channel_enable(dev, i);
123 else 126 else
124 nv50_fifo_channel_disable(dev, i); 127 nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
206 if (!pfifo->playlist[0]) 209 if (!pfifo->playlist[0])
207 return; 210 return;
208 211
212 nv_wr32(dev, 0x2140, 0x00000000);
213 nouveau_irq_unregister(dev, 8);
214
209 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); 215 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
210 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); 216 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
211} 217}
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
256 } 262 }
257 ramfc = chan->ramfc; 263 ramfc = chan->ramfc;
258 264
265 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
266 NV50_USER(chan->id), PAGE_SIZE);
267 if (!chan->user)
268 return -ENOMEM;
269
259 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 270 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
260 271
261 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); 272 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@ void
291nv50_fifo_destroy_context(struct nouveau_channel *chan) 302nv50_fifo_destroy_context(struct nouveau_channel *chan)
292{ 303{
293 struct drm_device *dev = chan->dev; 304 struct drm_device *dev = chan->dev;
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 struct nouveau_gpuobj *ramfc = NULL; 307 struct nouveau_gpuobj *ramfc = NULL;
308 unsigned long flags;
295 309
296 NV_DEBUG(dev, "ch%d\n", chan->id); 310 NV_DEBUG(dev, "ch%d\n", chan->id);
297 311
312 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
313 pfifo->reassign(dev, false);
314
315 /* Unload the context if it's the currently active one */
316 if (pfifo->channel_id(dev) == chan->id) {
317 pfifo->disable(dev);
318 pfifo->unload_context(dev);
319 pfifo->enable(dev);
320 }
321
298 /* This will ensure the channel is seen as disabled. */ 322 /* This will ensure the channel is seen as disabled. */
299 nouveau_gpuobj_ref(chan->ramfc, &ramfc); 323 nouveau_gpuobj_ref(chan->ramfc, &ramfc);
300 nouveau_gpuobj_ref(NULL, &chan->ramfc); 324 nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
305 nv50_fifo_channel_disable(dev, 127); 329 nv50_fifo_channel_disable(dev, 127);
306 nv50_fifo_playlist_update(dev); 330 nv50_fifo_playlist_update(dev);
307 331
332 pfifo->reassign(dev, true);
333 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
334
335 /* Free the channel resources */
336 if (chan->user) {
337 iounmap(chan->user);
338 chan->user = NULL;
339 }
308 nouveau_gpuobj_ref(NULL, &ramfc); 340 nouveau_gpuobj_ref(NULL, &ramfc);
309 nouveau_gpuobj_ref(NULL, &chan->cache); 341 nouveau_gpuobj_ref(NULL, &chan->cache);
310} 342}
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
392 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) 424 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
393 return 0; 425 return 0;
394 426
395 chan = dev_priv->fifos[chid]; 427 chan = dev_priv->channels.ptr[chid];
396 if (!chan) { 428 if (!chan) {
397 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); 429 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
398 return -EINVAL; 430 return -EINVAL;
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
467void 499void
468nv50_fifo_tlb_flush(struct drm_device *dev) 500nv50_fifo_tlb_flush(struct drm_device *dev)
469{ 501{
470 nv50_vm_flush(dev, 5); 502 nv50_vm_flush_engine(dev, 5);
471} 503}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2bf3d61..6b149c0cc06d 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28 28
29#include "nv50_display.h"
30
31static void nv50_gpio_isr(struct drm_device *dev);
32static void nv50_gpio_isr_bh(struct work_struct *work);
33
34struct nv50_gpio_priv {
35 struct list_head handlers;
36 spinlock_t lock;
37};
38
39struct nv50_gpio_handler {
40 struct drm_device *dev;
41 struct list_head head;
42 struct work_struct work;
43 bool inhibit;
44
45 struct dcb_gpio_entry *gpio;
46
47 void (*handler)(void *data, int state);
48 void *data;
49};
50
29static int 51static int
30nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) 52nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
31{ 53{
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
75 return 0; 97 return 0;
76} 98}
77 99
100int
101nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
102 void (*handler)(void *, int), void *data)
103{
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
106 struct nv50_gpio_priv *priv = pgpio->priv;
107 struct nv50_gpio_handler *gpioh;
108 struct dcb_gpio_entry *gpio;
109 unsigned long flags;
110
111 gpio = nouveau_bios_gpio_entry(dev, tag);
112 if (!gpio)
113 return -ENOENT;
114
115 gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
116 if (!gpioh)
117 return -ENOMEM;
118
119 INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
120 gpioh->dev = dev;
121 gpioh->gpio = gpio;
122 gpioh->handler = handler;
123 gpioh->data = data;
124
125 spin_lock_irqsave(&priv->lock, flags);
126 list_add(&gpioh->head, &priv->handlers);
127 spin_unlock_irqrestore(&priv->lock, flags);
128 return 0;
129}
130
78void 131void
79nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) 132nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
133 void (*handler)(void *, int), void *data)
80{ 134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
137 struct nv50_gpio_priv *priv = pgpio->priv;
138 struct nv50_gpio_handler *gpioh, *tmp;
81 struct dcb_gpio_entry *gpio; 139 struct dcb_gpio_entry *gpio;
82 u32 reg, mask; 140 unsigned long flags;
83 141
84 gpio = nouveau_bios_gpio_entry(dev, tag); 142 gpio = nouveau_bios_gpio_entry(dev, tag);
85 if (!gpio) { 143 if (!gpio)
86 NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
87 return; 144 return;
145
146 spin_lock_irqsave(&priv->lock, flags);
147 list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
148 if (gpioh->gpio != gpio ||
149 gpioh->handler != handler ||
150 gpioh->data != data)
151 continue;
152 list_del(&gpioh->head);
153 kfree(gpioh);
88 } 154 }
155 spin_unlock_irqrestore(&priv->lock, flags);
156}
157
158bool
159nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
160{
161 struct dcb_gpio_entry *gpio;
162 u32 reg, mask;
163
164 gpio = nouveau_bios_gpio_entry(dev, tag);
165 if (!gpio)
166 return false;
89 167
90 reg = gpio->line < 16 ? 0xe050 : 0xe070; 168 reg = gpio->line < 16 ? 0xe050 : 0xe070;
91 mask = 0x00010001 << (gpio->line & 0xf); 169 mask = 0x00010001 << (gpio->line & 0xf);
92 170
93 nv_wr32(dev, reg + 4, mask); 171 nv_wr32(dev, reg + 4, mask);
94 nv_mask(dev, reg + 0, mask, on ? mask : 0); 172 reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
173 return (reg & mask) == mask;
174}
175
176static int
177nv50_gpio_create(struct drm_device *dev)
178{
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
181 struct nv50_gpio_priv *priv;
182
183 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
184 if (!priv)
185 return -ENOMEM;
186
187 INIT_LIST_HEAD(&priv->handlers);
188 spin_lock_init(&priv->lock);
189 pgpio->priv = priv;
190 return 0;
191}
192
193static void
194nv50_gpio_destroy(struct drm_device *dev)
195{
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
198
199 kfree(pgpio->priv);
200 pgpio->priv = NULL;
95} 201}
96 202
97int 203int
98nv50_gpio_init(struct drm_device *dev) 204nv50_gpio_init(struct drm_device *dev)
99{ 205{
100 struct drm_nouveau_private *dev_priv = dev->dev_private; 206 struct drm_nouveau_private *dev_priv = dev->dev_private;
207 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
208 struct nv50_gpio_priv *priv;
209 int ret;
210
211 if (!pgpio->priv) {
212 ret = nv50_gpio_create(dev);
213 if (ret)
214 return ret;
215 }
216 priv = pgpio->priv;
101 217
102 /* disable, and ack any pending gpio interrupts */ 218 /* disable, and ack any pending gpio interrupts */
103 nv_wr32(dev, 0xe050, 0x00000000); 219 nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev)
107 nv_wr32(dev, 0xe074, 0xffffffff); 223 nv_wr32(dev, 0xe074, 0xffffffff);
108 } 224 }
109 225
226 nouveau_irq_register(dev, 21, nv50_gpio_isr);
110 return 0; 227 return 0;
111} 228}
229
230void
231nv50_gpio_fini(struct drm_device *dev)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234
235 nv_wr32(dev, 0xe050, 0x00000000);
236 if (dev_priv->chipset >= 0x90)
237 nv_wr32(dev, 0xe070, 0x00000000);
238 nouveau_irq_unregister(dev, 21);
239
240 nv50_gpio_destroy(dev);
241}
242
243static void
244nv50_gpio_isr_bh(struct work_struct *work)
245{
246 struct nv50_gpio_handler *gpioh =
247 container_of(work, struct nv50_gpio_handler, work);
248 struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
249 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
250 struct nv50_gpio_priv *priv = pgpio->priv;
251 unsigned long flags;
252 int state;
253
254 state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
255 if (state < 0)
256 return;
257
258 gpioh->handler(gpioh->data, state);
259
260 spin_lock_irqsave(&priv->lock, flags);
261 gpioh->inhibit = false;
262 spin_unlock_irqrestore(&priv->lock, flags);
263}
264
265static void
266nv50_gpio_isr(struct drm_device *dev)
267{
268 struct drm_nouveau_private *dev_priv = dev->dev_private;
269 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
270 struct nv50_gpio_priv *priv = pgpio->priv;
271 struct nv50_gpio_handler *gpioh;
272 u32 intr0, intr1 = 0;
273 u32 hi, lo, ch;
274
275 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
276 if (dev_priv->chipset >= 0x90)
277 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
278
279 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
280 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
281 ch = hi | lo;
282
283 nv_wr32(dev, 0xe054, intr0);
284 if (dev_priv->chipset >= 0x90)
285 nv_wr32(dev, 0xe074, intr1);
286
287 spin_lock(&priv->lock);
288 list_for_each_entry(gpioh, &priv->handlers, head) {
289 if (!(ch & (1 << gpioh->gpio->line)))
290 continue;
291
292 if (gpioh->inhibit)
293 continue;
294 gpioh->inhibit = true;
295
296 queue_work(dev_priv->wq, &gpioh->work);
297 }
298 spin_unlock(&priv->lock);
299}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0af610..c510e74acf4d 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32#include "nouveau_dma.h"
33#include "nouveau_vm.h"
34#include "nv50_evo.h"
35
36static int nv50_graph_register(struct drm_device *);
37static void nv50_graph_isr(struct drm_device *);
32 38
33static void 39static void
34nv50_graph_init_reset(struct drm_device *dev) 40nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev)
46{ 52{
47 NV_DEBUG(dev, "\n"); 53 NV_DEBUG(dev, "\n");
48 54
55 nouveau_irq_register(dev, 12, nv50_graph_isr);
49 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 56 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
50 nv_wr32(dev, 0x400138, 0xffffffff); 57 nv_wr32(dev, 0x400138, 0xffffffff);
51 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 58 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev)
145 nv50_graph_init_reset(dev); 152 nv50_graph_init_reset(dev);
146 nv50_graph_init_regs__nv(dev); 153 nv50_graph_init_regs__nv(dev);
147 nv50_graph_init_regs(dev); 154 nv50_graph_init_regs(dev);
148 nv50_graph_init_intr(dev);
149 155
150 ret = nv50_graph_init_ctxctl(dev); 156 ret = nv50_graph_init_ctxctl(dev);
151 if (ret) 157 if (ret)
152 return ret; 158 return ret;
153 159
160 ret = nv50_graph_register(dev);
161 if (ret)
162 return ret;
163 nv50_graph_init_intr(dev);
154 return 0; 164 return 0;
155} 165}
156 166
@@ -158,6 +168,8 @@ void
158nv50_graph_takedown(struct drm_device *dev) 168nv50_graph_takedown(struct drm_device *dev)
159{ 169{
160 NV_DEBUG(dev, "\n"); 170 NV_DEBUG(dev, "\n");
171 nv_wr32(dev, 0x40013c, 0x00000000);
172 nouveau_irq_unregister(dev, 12);
161} 173}
162 174
163void 175void
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev)
190 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; 202 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
191 203
192 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 204 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->fifos[i]; 205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
194 206
195 if (chan && chan->ramin && chan->ramin->vinst == inst) 207 if (chan && chan->ramin && chan->ramin->vinst == inst)
196 return chan; 208 return chan;
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
211 223
212 NV_DEBUG(dev, "ch%d\n", chan->id); 224 NV_DEBUG(dev, "ch%d\n", chan->id);
213 225
214 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, 226 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
215 NVOBJ_FLAG_ZERO_ALLOC | 227 NVOBJ_FLAG_ZERO_ALLOC |
216 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 228 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
217 if (ret) 229 if (ret)
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
234 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 246 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
235 247
236 dev_priv->engine.instmem.flush(dev); 248 dev_priv->engine.instmem.flush(dev);
249 atomic_inc(&chan->vm->pgraph_refs);
237 return 0; 250 return 0;
238} 251}
239 252
@@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
242{ 255{
243 struct drm_device *dev = chan->dev; 256 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
245 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 259 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
260 unsigned long flags;
246 261
247 NV_DEBUG(dev, "ch%d\n", chan->id); 262 NV_DEBUG(dev, "ch%d\n", chan->id);
248 263
249 if (!chan->ramin) 264 if (!chan->ramin)
250 return; 265 return;
251 266
267 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
268 pgraph->fifo_access(dev, false);
269
270 if (pgraph->channel(dev) == chan)
271 pgraph->unload_context(dev);
272
252 for (i = hdr; i < hdr + 24; i += 4) 273 for (i = hdr; i < hdr + 24; i += 4)
253 nv_wo32(chan->ramin, i, 0); 274 nv_wo32(chan->ramin, i, 0);
254 dev_priv->engine.instmem.flush(dev); 275 dev_priv->engine.instmem.flush(dev);
255 276
277 pgraph->fifo_access(dev, true);
278 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
279
256 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 280 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
281
282 atomic_dec(&chan->vm->pgraph_refs);
257} 283}
258 284
259static int 285static int
@@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev)
306 return 0; 332 return 0;
307} 333}
308 334
309void 335static void
310nv50_graph_context_switch(struct drm_device *dev) 336nv50_graph_context_switch(struct drm_device *dev)
311{ 337{
312 uint32_t inst; 338 uint32_t inst;
@@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev)
322} 348}
323 349
324static int 350static int
325nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, 351nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
326 int mthd, uint32_t data) 352 u32 class, u32 mthd, u32 data)
327{ 353{
328 struct nouveau_gpuobj *gpuobj; 354 struct nouveau_gpuobj *gpuobj;
329 355
@@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
340} 366}
341 367
342static int 368static int
343nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, 369nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
344 int mthd, uint32_t data) 370 u32 class, u32 mthd, u32 data)
345{ 371{
346 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) 372 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
347 return -ERANGE; 373 return -ERANGE;
@@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
351} 377}
352 378
353static int 379static int
354nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, 380nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
355 int mthd, uint32_t data) 381 u32 class, u32 mthd, u32 data)
356{ 382{
357 chan->nvsw.vblsem_rval = data; 383 chan->nvsw.vblsem_rval = data;
358 return 0; 384 return 0;
359} 385}
360 386
361static int 387static int
362nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, 388nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
363 int mthd, uint32_t data) 389 u32 class, u32 mthd, u32 data)
364{ 390{
365 struct drm_device *dev = chan->dev; 391 struct drm_device *dev = chan->dev;
366 struct drm_nouveau_private *dev_priv = dev->dev_private; 392 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
368 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) 394 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
369 return -EINVAL; 395 return -EINVAL;
370 396
371 if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & 397 drm_vblank_get(dev, data);
372 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
373 nv_wr32(dev, NV50_PDISPLAY_INTR_1,
374 NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
375 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
376 NV50_PDISPLAY_INTR_EN) |
377 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
378 }
379 398
399 chan->nvsw.vblsem_head = data;
380 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); 400 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
401
381 return 0; 402 return 0;
382} 403}
383 404
384static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { 405static int
385 { 0x018c, nv50_graph_nvsw_dma_vblsem }, 406nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
386 { 0x0400, nv50_graph_nvsw_vblsem_offset }, 407 u32 class, u32 mthd, u32 data)
387 { 0x0404, nv50_graph_nvsw_vblsem_release_val }, 408{
388 { 0x0408, nv50_graph_nvsw_vblsem_release }, 409 struct nouveau_page_flip_state s;
389 {}
390};
391 410
392struct nouveau_pgraph_object_class nv50_graph_grclass[] = { 411 if (!nouveau_finish_page_flip(chan, &s)) {
393 { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ 412 /* XXX - Do something here */
394 { 0x0030, false, NULL }, /* null */ 413 }
395 { 0x5039, false, NULL }, /* m2mf */ 414
396 { 0x502d, false, NULL }, /* 2d */ 415 return 0;
397 { 0x50c0, false, NULL }, /* compute */ 416}
398 { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ 417
399 { 0x5097, false, NULL }, /* tesla (nv50) */ 418static int
400 { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ 419nv50_graph_register(struct drm_device *dev)
401 { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ 420{
402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 421 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 {} 422
404}; 423 if (dev_priv->engine.graph.registered)
424 return 0;
425
426 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
427 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
428 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
429 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
430 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
431 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
432
433 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
434 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
435 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
436
437 /* tesla */
438 if (dev_priv->chipset == 0x50)
439 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
440 else
441 if (dev_priv->chipset < 0xa0)
442 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
443 else {
444 switch (dev_priv->chipset) {
445 case 0xa0:
446 case 0xaa:
447 case 0xac:
448 NVOBJ_CLASS(dev, 0x8397, GR);
449 break;
450 case 0xa3:
451 case 0xa5:
452 case 0xa8:
453 NVOBJ_CLASS(dev, 0x8597, GR);
454 break;
455 case 0xaf:
456 NVOBJ_CLASS(dev, 0x8697, GR);
457 break;
458 }
459 }
460
461 /* compute */
462 NVOBJ_CLASS(dev, 0x50c0, GR);
463 if (dev_priv->chipset > 0xa0 &&
464 dev_priv->chipset != 0xaa &&
465 dev_priv->chipset != 0xac)
466 NVOBJ_CLASS(dev, 0x85c0, GR);
467
468 dev_priv->engine.graph.registered = true;
469 return 0;
470}
405 471
406void 472void
407nv50_graph_tlb_flush(struct drm_device *dev) 473nv50_graph_tlb_flush(struct drm_device *dev)
408{ 474{
409 nv50_vm_flush(dev, 0); 475 nv50_vm_flush_engine(dev, 0);
410} 476}
411 477
412void 478void
@@ -449,8 +515,500 @@ nv86_graph_tlb_flush(struct drm_device *dev)
449 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); 515 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
450 } 516 }
451 517
452 nv50_vm_flush(dev, 0); 518 nv50_vm_flush_engine(dev, 0);
453 519
454 nv_mask(dev, 0x400500, 0x00000001, 0x00000001); 520 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
455 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 521 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
456} 522}
523
524static struct nouveau_enum nv50_mp_exec_error_names[] =
525{
526 { 3, "STACK_UNDERFLOW" },
527 { 4, "QUADON_ACTIVE" },
528 { 8, "TIMEOUT" },
529 { 0x10, "INVALID_OPCODE" },
530 { 0x40, "BREAKPOINT" },
531 {}
532};
533
534static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
535 { 0x00000001, "NOTIFY" },
536 { 0x00000002, "IN" },
537 { 0x00000004, "OUT" },
538 {}
539};
540
541static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
542 { 0x00000001, "FAULT" },
543 {}
544};
545
546static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
547 { 0x00000001, "FAULT" },
548 {}
549};
550
551static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
552 { 0x00000001, "FAULT" },
553 {}
554};
555
556/* There must be a *lot* of these. Will take some time to gather them up. */
557static struct nouveau_enum nv50_data_error_names[] = {
558 { 4, "INVALID_VALUE" },
559 { 5, "INVALID_ENUM" },
560 { 8, "INVALID_OBJECT" },
561 { 0xc, "INVALID_BITFIELD" },
562 { 0x28, "MP_NO_REG_SPACE" },
563 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
564 {}
565};
566
567static struct nouveau_bitfield nv50_graph_intr[] = {
568 { 0x00000001, "NOTIFY" },
569 { 0x00000002, "COMPUTE_QUERY" },
570 { 0x00000010, "ILLEGAL_MTHD" },
571 { 0x00000020, "ILLEGAL_CLASS" },
572 { 0x00000040, "DOUBLE_NOTIFY" },
573 { 0x00001000, "CONTEXT_SWITCH" },
574 { 0x00010000, "BUFFER_NOTIFY" },
575 { 0x00100000, "DATA_ERROR" },
576 { 0x00200000, "TRAP" },
577 { 0x01000000, "SINGLE_STEP" },
578 {}
579};
580
581static void
582nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
583{
584 struct drm_nouveau_private *dev_priv = dev->dev_private;
585 uint32_t units = nv_rd32(dev, 0x1540);
586 uint32_t addr, mp10, status, pc, oplow, ophigh;
587 int i;
588 int mps = 0;
589 for (i = 0; i < 4; i++) {
590 if (!(units & 1 << (i+24)))
591 continue;
592 if (dev_priv->chipset < 0xa0)
593 addr = 0x408200 + (tpid << 12) + (i << 7);
594 else
595 addr = 0x408100 + (tpid << 11) + (i << 7);
596 mp10 = nv_rd32(dev, addr + 0x10);
597 status = nv_rd32(dev, addr + 0x14);
598 if (!status)
599 continue;
600 if (display) {
601 nv_rd32(dev, addr + 0x20);
602 pc = nv_rd32(dev, addr + 0x24);
603 oplow = nv_rd32(dev, addr + 0x70);
604 ophigh= nv_rd32(dev, addr + 0x74);
605 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
606 "TP %d MP %d: ", tpid, i);
607 nouveau_enum_print(nv50_mp_exec_error_names, status);
608 printk(" at %06x warp %d, opcode %08x %08x\n",
609 pc&0xffffff, pc >> 24,
610 oplow, ophigh);
611 }
612 nv_wr32(dev, addr + 0x10, mp10);
613 nv_wr32(dev, addr + 0x14, 0);
614 mps++;
615 }
616 if (!mps && display)
617 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
618 "No MPs claiming errors?\n", tpid);
619}
620
621static void
622nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
623 uint32_t ustatus_new, int display, const char *name)
624{
625 struct drm_nouveau_private *dev_priv = dev->dev_private;
626 int tps = 0;
627 uint32_t units = nv_rd32(dev, 0x1540);
628 int i, r;
629 uint32_t ustatus_addr, ustatus;
630 for (i = 0; i < 16; i++) {
631 if (!(units & (1 << i)))
632 continue;
633 if (dev_priv->chipset < 0xa0)
634 ustatus_addr = ustatus_old + (i << 12);
635 else
636 ustatus_addr = ustatus_new + (i << 11);
637 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
638 if (!ustatus)
639 continue;
640 tps++;
641 switch (type) {
642 case 6: /* texture error... unknown for now */
643 nv50_fb_vm_trap(dev, display, name);
644 if (display) {
645 NV_ERROR(dev, "magic set %d:\n", i);
646 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
647 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
648 nv_rd32(dev, r));
649 }
650 break;
651 case 7: /* MP error */
652 if (ustatus & 0x00010000) {
653 nv50_pgraph_mp_trap(dev, i, display);
654 ustatus &= ~0x00010000;
655 }
656 break;
657 case 8: /* TPDMA error */
658 {
659 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
660 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
661 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
662 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
663 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
664 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
665 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
666 nv50_fb_vm_trap(dev, display, name);
667 /* 2d engine destination */
668 if (ustatus & 0x00000010) {
669 if (display) {
670 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
671 i, e14, e10);
672 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
673 i, e0c, e18, e1c, e20, e24);
674 }
675 ustatus &= ~0x00000010;
676 }
677 /* Render target */
678 if (ustatus & 0x00000040) {
679 if (display) {
680 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
681 i, e14, e10);
682 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
683 i, e0c, e18, e1c, e20, e24);
684 }
685 ustatus &= ~0x00000040;
686 }
687 /* CUDA memory: l[], g[] or stack. */
688 if (ustatus & 0x00000080) {
689 if (display) {
690 if (e18 & 0x80000000) {
691 /* g[] read fault? */
692 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
693 i, e14, e10 | ((e18 >> 24) & 0x1f));
694 e18 &= ~0x1f000000;
695 } else if (e18 & 0xc) {
696 /* g[] write fault? */
697 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
698 i, e14, e10 | ((e18 >> 7) & 0x1f));
699 e18 &= ~0x00000f80;
700 } else {
701 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
702 i, e14, e10);
703 }
704 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
705 i, e0c, e18, e1c, e20, e24);
706 }
707 ustatus &= ~0x00000080;
708 }
709 }
710 break;
711 }
712 if (ustatus) {
713 if (display)
714 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
715 }
716 nv_wr32(dev, ustatus_addr, 0xc0000000);
717 }
718
719 if (!tps && display)
720 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
721}
722
723static int
724nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
725{
726 u32 status = nv_rd32(dev, 0x400108);
727 u32 ustatus;
728
729 if (!status && display) {
730 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
731 return 1;
732 }
733
734 /* DISPATCH: Relays commands to other units and handles NOTIFY,
735 * COND, QUERY. If you get a trap from it, the command is still stuck
736 * in DISPATCH and you need to do something about it. */
737 if (status & 0x001) {
738 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
739 if (!ustatus && display) {
740 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
741 }
742
743 nv_wr32(dev, 0x400500, 0x00000000);
744
745 /* Known to be triggered by screwed up NOTIFY and COND... */
746 if (ustatus & 0x00000001) {
747 u32 addr = nv_rd32(dev, 0x400808);
748 u32 subc = (addr & 0x00070000) >> 16;
749 u32 mthd = (addr & 0x00001ffc);
750 u32 datal = nv_rd32(dev, 0x40080c);
751 u32 datah = nv_rd32(dev, 0x400810);
752 u32 class = nv_rd32(dev, 0x400814);
753 u32 r848 = nv_rd32(dev, 0x400848);
754
755 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
756 if (display && (addr & 0x80000000)) {
757 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
758 "subc %d class 0x%04x mthd 0x%04x "
759 "data 0x%08x%08x "
760 "400808 0x%08x 400848 0x%08x\n",
761 chid, inst, subc, class, mthd, datah,
762 datal, addr, r848);
763 } else
764 if (display) {
765 NV_INFO(dev, "PGRAPH - no stuck command?\n");
766 }
767
768 nv_wr32(dev, 0x400808, 0);
769 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
770 nv_wr32(dev, 0x400848, 0);
771 ustatus &= ~0x00000001;
772 }
773
774 if (ustatus & 0x00000002) {
775 u32 addr = nv_rd32(dev, 0x40084c);
776 u32 subc = (addr & 0x00070000) >> 16;
777 u32 mthd = (addr & 0x00001ffc);
778 u32 data = nv_rd32(dev, 0x40085c);
779 u32 class = nv_rd32(dev, 0x400814);
780
781 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
782 if (display && (addr & 0x80000000)) {
783 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
784 "subc %d class 0x%04x mthd 0x%04x "
785 "data 0x%08x 40084c 0x%08x\n",
786 chid, inst, subc, class, mthd,
787 data, addr);
788 } else
789 if (display) {
790 NV_INFO(dev, "PGRAPH - no stuck command?\n");
791 }
792
793 nv_wr32(dev, 0x40084c, 0);
794 ustatus &= ~0x00000002;
795 }
796
797 if (ustatus && display) {
798 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
799 "0x%08x)\n", ustatus);
800 }
801
802 nv_wr32(dev, 0x400804, 0xc0000000);
803 nv_wr32(dev, 0x400108, 0x001);
804 status &= ~0x001;
805 if (!status)
806 return 0;
807 }
808
809 /* M2MF: Memory to memory copy engine. */
810 if (status & 0x002) {
811 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
812 if (display) {
813 NV_INFO(dev, "PGRAPH - TRAP_M2MF");
814 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
815 printk("\n");
816 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
817 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
818 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
819
820 }
821
822 /* No sane way found yet -- just reset the bugger. */
823 nv_wr32(dev, 0x400040, 2);
824 nv_wr32(dev, 0x400040, 0);
825 nv_wr32(dev, 0x406800, 0xc0000000);
826 nv_wr32(dev, 0x400108, 0x002);
827 status &= ~0x002;
828 }
829
830 /* VFETCH: Fetches data from vertex buffers. */
831 if (status & 0x004) {
832 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
833 if (display) {
834 NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
835 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
836 printk("\n");
837 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
838 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
839 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
840 }
841
842 nv_wr32(dev, 0x400c04, 0xc0000000);
843 nv_wr32(dev, 0x400108, 0x004);
844 status &= ~0x004;
845 }
846
847 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
848 if (status & 0x008) {
849 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
850 if (display) {
851 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
852 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
853 printk("\n");
854 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
855 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
856 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
857
858 }
859
860 /* No sane way found yet -- just reset the bugger. */
861 nv_wr32(dev, 0x400040, 0x80);
862 nv_wr32(dev, 0x400040, 0);
863 nv_wr32(dev, 0x401800, 0xc0000000);
864 nv_wr32(dev, 0x400108, 0x008);
865 status &= ~0x008;
866 }
867
868 /* CCACHE: Handles code and c[] caches and fills them. */
869 if (status & 0x010) {
870 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
871 if (display) {
872 NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
873 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
874 printk("\n");
875 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
876 " %08x %08x %08x\n",
877 nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
878 nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
879 nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
880 nv_rd32(dev, 0x40581c));
881
882 }
883
884 nv_wr32(dev, 0x405018, 0xc0000000);
885 nv_wr32(dev, 0x400108, 0x010);
886 status &= ~0x010;
887 }
888
889 /* Unknown, not seen yet... 0x402000 is the only trap status reg
890 * remaining, so try to handle it anyway. Perhaps related to that
891 * unknown DMA slot on tesla? */
892 if (status & 0x20) {
893 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
894 if (display)
895 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
896 nv_wr32(dev, 0x402000, 0xc0000000);
897 /* no status modifiction on purpose */
898 }
899
900 /* TEXTURE: CUDA texturing units */
901 if (status & 0x040) {
902 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
903 "PGRAPH - TRAP_TEXTURE");
904 nv_wr32(dev, 0x400108, 0x040);
905 status &= ~0x040;
906 }
907
908 /* MP: CUDA execution engines. */
909 if (status & 0x080) {
910 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
911 "PGRAPH - TRAP_MP");
912 nv_wr32(dev, 0x400108, 0x080);
913 status &= ~0x080;
914 }
915
916 /* TPDMA: Handles TP-initiated uncached memory accesses:
917 * l[], g[], stack, 2d surfaces, render targets. */
918 if (status & 0x100) {
919 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
920 "PGRAPH - TRAP_TPDMA");
921 nv_wr32(dev, 0x400108, 0x100);
922 status &= ~0x100;
923 }
924
925 if (status) {
926 if (display)
927 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
928 nv_wr32(dev, 0x400108, status);
929 }
930
931 return 1;
932}
933
934static int
935nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
936{
937 struct drm_nouveau_private *dev_priv = dev->dev_private;
938 struct nouveau_channel *chan;
939 unsigned long flags;
940 int i;
941
942 spin_lock_irqsave(&dev_priv->channels.lock, flags);
943 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
944 chan = dev_priv->channels.ptr[i];
945 if (!chan || !chan->ramin)
946 continue;
947
948 if (inst == chan->ramin->vinst)
949 break;
950 }
951 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
952 return i;
953}
954
955static void
956nv50_graph_isr(struct drm_device *dev)
957{
958 u32 stat;
959
960 while ((stat = nv_rd32(dev, 0x400100))) {
961 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
962 u32 chid = nv50_graph_isr_chid(dev, inst);
963 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
964 u32 subc = (addr & 0x00070000) >> 16;
965 u32 mthd = (addr & 0x00001ffc);
966 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
967 u32 class = nv_rd32(dev, 0x400814);
968 u32 show = stat;
969
970 if (stat & 0x00000010) {
971 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
972 mthd, data))
973 show &= ~0x00000010;
974 }
975
976 if (stat & 0x00001000) {
977 nv_wr32(dev, 0x400500, 0x00000000);
978 nv_wr32(dev, 0x400100, 0x00001000);
979 nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
980 nv50_graph_context_switch(dev);
981 stat &= ~0x00001000;
982 show &= ~0x00001000;
983 }
984
985 show = (show && nouveau_ratelimit()) ? show : 0;
986
987 if (show & 0x00100000) {
988 u32 ecode = nv_rd32(dev, 0x400110);
989 NV_INFO(dev, "PGRAPH - DATA_ERROR ");
990 nouveau_enum_print(nv50_data_error_names, ecode);
991 printk("\n");
992 }
993
994 if (stat & 0x00200000) {
995 if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
996 show &= ~0x00200000;
997 }
998
999 nv_wr32(dev, 0x400100, stat);
1000 nv_wr32(dev, 0x400500, 0x00010001);
1001
1002 if (show) {
1003 NV_INFO(dev, "PGRAPH -");
1004 nouveau_bitfield_print(nv50_graph_intr, show);
1005 printk("\n");
1006 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
1007 "class 0x%04x mthd 0x%04x data 0x%08x\n",
1008 chid, inst, subc, class, mthd, data);
1009 }
1010 }
1011
1012 if (nv_rd32(dev, 0x400824) & (1 << 31))
1013 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1014}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229b7647..adac4da98f7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30
30#include "nouveau_drv.h" 31#include "nouveau_drv.h"
32#include "nouveau_vm.h"
33
34#define BAR1_VM_BASE 0x0020000000ULL
35#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
36#define BAR3_VM_BASE 0x0000000000ULL
37#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
31 38
32struct nv50_instmem_priv { 39struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */ 40 uint32_t save1700[5]; /* 0x1700->0x1710 */
34 41
35 struct nouveau_gpuobj *pramin_pt; 42 struct nouveau_gpuobj *bar1_dmaobj;
36 struct nouveau_gpuobj *pramin_bar; 43 struct nouveau_gpuobj *bar3_dmaobj;
37 struct nouveau_gpuobj *fb_bar;
38}; 44};
39 45
40static void 46static void
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
48 return; 54 return;
49 55
50 nouveau_gpuobj_ref(NULL, &chan->ramfc); 56 nouveau_gpuobj_ref(NULL, &chan->ramfc);
57 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
51 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 58 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
52 if (chan->ramin_heap.free_stack.next) 59 if (chan->ramin_heap.free_stack.next)
53 drm_mm_takedown(&chan->ramin_heap); 60 drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan)
56} 63}
57 64
58static int 65static int
59nv50_channel_new(struct drm_device *dev, u32 size, 66nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
60 struct nouveau_channel **pchan) 67 struct nouveau_channel **pchan)
61{ 68{
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 70 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
64 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; 71 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
65 struct nouveau_channel *chan; 72 struct nouveau_channel *chan;
66 int ret; 73 int ret, i;
67 74
68 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 75 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
69 if (!chan) 76 if (!chan)
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size,
92 return ret; 99 return ret;
93 } 100 }
94 101
102 for (i = 0; i < 0x4000; i += 8) {
103 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
104 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
105 }
106
107 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
108 if (ret) {
109 nv50_channel_del(&chan);
110 return ret;
111 }
112
95 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : 113 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
96 chan->ramin->pinst + fc, 114 chan->ramin->pinst + fc,
97 chan->ramin->vinst + fc, 0x100, 115 chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev)
111 struct drm_nouveau_private *dev_priv = dev->dev_private; 129 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv50_instmem_priv *priv; 130 struct nv50_instmem_priv *priv;
113 struct nouveau_channel *chan; 131 struct nouveau_channel *chan;
132 struct nouveau_vm *vm;
114 int ret, i; 133 int ret, i;
115 u32 tmp; 134 u32 tmp;
116 135
@@ -127,112 +146,89 @@ nv50_instmem_init(struct drm_device *dev)
127 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); 146 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
128 if (ret) { 147 if (ret) {
129 NV_ERROR(dev, "Failed to init RAMIN heap\n"); 148 NV_ERROR(dev, "Failed to init RAMIN heap\n");
130 return -ENOMEM; 149 goto error;
131 } 150 }
132 151
133 /* we need a channel to plug into the hw to control the BARs */ 152 /* BAR3 */
134 ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); 153 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
154 29, 12, 16, &dev_priv->bar3_vm);
135 if (ret) 155 if (ret)
136 return ret; 156 goto error;
137 chan = dev_priv->fifos[127] = dev_priv->fifos[0];
138 157
139 /* allocate page table for PRAMIN BAR */ 158 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
140 ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, 159 0x1000, NVOBJ_FLAG_DONT_MAP |
141 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 160 NVOBJ_FLAG_ZERO_ALLOC,
142 &priv->pramin_pt); 161 &dev_priv->bar3_vm->pgt[0].obj);
143 if (ret) 162 if (ret)
144 return ret; 163 goto error;
164 dev_priv->bar3_vm->pgt[0].page_shift = 12;
165 dev_priv->bar3_vm->pgt[0].refcount = 1;
145 166
146 nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); 167 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj);
147 nv_wo32(chan->vm_pd, 0x0004, 0);
148 168
149 /* DMA object for PRAMIN BAR */ 169 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
150 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
151 if (ret) 170 if (ret)
152 return ret; 171 goto error;
153 nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); 172 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
154 nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); 173
155 nv_wo32(priv->pramin_bar, 0x08, 0x00000000); 174 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
156 nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); 175 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
157 nv_wo32(priv->pramin_bar, 0x10, 0x00000000); 176 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
158 nv_wo32(priv->pramin_bar, 0x14, 0x00000000); 177 &priv->bar3_dmaobj);
159
160 /* map channel into PRAMIN, gpuobj didn't do it for us */
161 ret = nv50_instmem_bind(dev, chan->ramin);
162 if (ret) 178 if (ret)
163 return ret; 179 goto error;
164 180
165 /* poke regs... */
166 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); 181 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
167 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); 182 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
168 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); 183 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
169
170 tmp = nv_ri32(dev, 0);
171 nv_wi32(dev, 0, ~tmp);
172 if (nv_ri32(dev, 0) != ~tmp) {
173 NV_ERROR(dev, "PRAMIN readback failed\n");
174 return -EIO;
175 }
176 nv_wi32(dev, 0, tmp);
177 184
185 dev_priv->engine.instmem.flush(dev);
178 dev_priv->ramin_available = true; 186 dev_priv->ramin_available = true;
179 187
180 /* Determine VM layout */ 188 tmp = nv_ro32(chan->ramin, 0);
181 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); 189 nv_wo32(chan->ramin, 0, ~tmp);
182 dev_priv->vm_gart_size = NV50_VM_BLOCK; 190 if (nv_ro32(chan->ramin, 0) != ~tmp) {
183 191 NV_ERROR(dev, "PRAMIN readback failed\n");
184 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 192 ret = -EIO;
185 dev_priv->vm_vram_size = dev_priv->vram_size; 193 goto error;
186 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
187 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
188 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
189 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
190
191 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
192
193 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
194 dev_priv->vm_gart_base,
195 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
196 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
197 dev_priv->vm_vram_base,
198 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
199
200 /* VRAM page table(s), mapped into VM at +1GiB */
201 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
202 ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
203 0, NVOBJ_FLAG_ZERO_ALLOC,
204 &chan->vm_vram_pt[i]);
205 if (ret) {
206 NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
207 dev_priv->vm_vram_pt_nr = i;
208 return ret;
209 }
210 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
211
212 nv_wo32(chan->vm_pd, 0x10 + (i*8),
213 chan->vm_vram_pt[i]->vinst | 0x61);
214 nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
215 } 194 }
195 nv_wo32(chan->ramin, 0, tmp);
216 196
217 /* DMA object for FB BAR */ 197 /* BAR1 */
218 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); 198 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE,
199 29, 12, 16, &vm);
219 if (ret) 200 if (ret)
220 return ret; 201 goto error;
221 nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
222 nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
223 pci_resource_len(dev->pdev, 1) - 1);
224 nv_wo32(priv->fb_bar, 0x08, 0x40000000);
225 nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
226 nv_wo32(priv->fb_bar, 0x10, 0x00000000);
227 nv_wo32(priv->fb_bar, 0x14, 0x00000000);
228 202
229 dev_priv->engine.instmem.flush(dev); 203 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
204 if (ret)
205 goto error;
206 nouveau_vm_ref(NULL, &vm, NULL);
207
208 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
209 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
210 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
211 &priv->bar1_dmaobj);
212 if (ret)
213 goto error;
230 214
231 nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); 215 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
232 for (i = 0; i < 8; i++) 216 for (i = 0; i < 8; i++)
233 nv_wr32(dev, 0x1900 + (i*4), 0); 217 nv_wr32(dev, 0x1900 + (i*4), 0);
234 218
219 /* Create shared channel VM, space is reserved at the beginning
220 * to catch "NULL pointer" references
221 */
222 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
223 29, 12, 16, &dev_priv->chan_vm);
224 if (ret)
225 return ret;
226
235 return 0; 227 return 0;
228
229error:
230 nv50_instmem_takedown(dev);
231 return ret;
236} 232}
237 233
238void 234void
@@ -240,7 +236,7 @@ nv50_instmem_takedown(struct drm_device *dev)
240{ 236{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 237 struct drm_nouveau_private *dev_priv = dev->dev_private;
242 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 238 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
243 struct nouveau_channel *chan = dev_priv->fifos[0]; 239 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
244 int i; 240 int i;
245 241
246 NV_DEBUG(dev, "\n"); 242 NV_DEBUG(dev, "\n");
@@ -250,23 +246,23 @@ nv50_instmem_takedown(struct drm_device *dev)
250 246
251 dev_priv->ramin_available = false; 247 dev_priv->ramin_available = false;
252 248
253 /* Restore state from before init */ 249 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
250
254 for (i = 0x1700; i <= 0x1710; i += 4) 251 for (i = 0x1700; i <= 0x1710; i += 4)
255 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); 252 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
256 253
257 nouveau_gpuobj_ref(NULL, &priv->fb_bar); 254 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
258 nouveau_gpuobj_ref(NULL, &priv->pramin_bar); 255 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
259 nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
260 256
261 /* Destroy dummy channel */ 257 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
262 if (chan) { 258 dev_priv->channels.ptr[127] = 0;
263 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) 259 nv50_channel_del(&dev_priv->channels.ptr[0]);
264 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
265 dev_priv->vm_vram_pt_nr = 0;
266 260
267 nv50_channel_del(&dev_priv->fifos[0]); 261 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj);
268 dev_priv->fifos[127] = NULL; 262 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
269 } 263
264 if (dev_priv->ramin_heap.free_stack.next)
265 drm_mm_takedown(&dev_priv->ramin_heap);
270 266
271 dev_priv->engine.instmem.priv = NULL; 267 dev_priv->engine.instmem.priv = NULL;
272 kfree(priv); 268 kfree(priv);
@@ -276,16 +272,8 @@ int
276nv50_instmem_suspend(struct drm_device *dev) 272nv50_instmem_suspend(struct drm_device *dev)
277{ 273{
278 struct drm_nouveau_private *dev_priv = dev->dev_private; 274 struct drm_nouveau_private *dev_priv = dev->dev_private;
279 struct nouveau_channel *chan = dev_priv->fifos[0];
280 struct nouveau_gpuobj *ramin = chan->ramin;
281 int i;
282 275
283 ramin->im_backing_suspend = vmalloc(ramin->size); 276 dev_priv->ramin_available = false;
284 if (!ramin->im_backing_suspend)
285 return -ENOMEM;
286
287 for (i = 0; i < ramin->size; i += 4)
288 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
289 return 0; 277 return 0;
290} 278}
291 279
@@ -294,146 +282,121 @@ nv50_instmem_resume(struct drm_device *dev)
294{ 282{
295 struct drm_nouveau_private *dev_priv = dev->dev_private; 283 struct drm_nouveau_private *dev_priv = dev->dev_private;
296 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 284 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
297 struct nouveau_channel *chan = dev_priv->fifos[0]; 285 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
298 struct nouveau_gpuobj *ramin = chan->ramin;
299 int i; 286 int i;
300 287
301 dev_priv->ramin_available = false;
302 dev_priv->ramin_base = ~0;
303 for (i = 0; i < ramin->size; i += 4)
304 nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
305 dev_priv->ramin_available = true;
306 vfree(ramin->im_backing_suspend);
307 ramin->im_backing_suspend = NULL;
308
309 /* Poke the relevant regs, and pray it works :) */ 288 /* Poke the relevant regs, and pray it works :) */
310 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); 289 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
311 nv_wr32(dev, NV50_PUNK_UNK1710, 0); 290 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
312 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | 291 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
313 NV50_PUNK_BAR_CFG_BASE_VALID); 292 NV50_PUNK_BAR_CFG_BASE_VALID);
314 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | 293 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
315 NV50_PUNK_BAR1_CTXDMA_VALID); 294 NV50_PUNK_BAR1_CTXDMA_VALID);
316 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | 295 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
317 NV50_PUNK_BAR3_CTXDMA_VALID); 296 NV50_PUNK_BAR3_CTXDMA_VALID);
318 297
319 for (i = 0; i < 8; i++) 298 for (i = 0; i < 8; i++)
320 nv_wr32(dev, 0x1900 + (i*4), 0); 299 nv_wr32(dev, 0x1900 + (i*4), 0);
300
301 dev_priv->ramin_available = true;
321} 302}
322 303
304struct nv50_gpuobj_node {
305 struct nouveau_vram *vram;
306 struct nouveau_vma chan_vma;
307 u32 align;
308};
309
310
323int 311int
324nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 312nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
325 uint32_t *sz)
326{ 313{
314 struct drm_device *dev = gpuobj->dev;
315 struct drm_nouveau_private *dev_priv = dev->dev_private;
316 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
317 struct nv50_gpuobj_node *node = NULL;
327 int ret; 318 int ret;
328 319
329 if (gpuobj->im_backing) 320 node = kzalloc(sizeof(*node), GFP_KERNEL);
330 return -EINVAL; 321 if (!node)
322 return -ENOMEM;
323 node->align = align;
331 324
332 *sz = ALIGN(*sz, 4096); 325 size = (size + 4095) & ~4095;
333 if (*sz == 0) 326 align = max(align, (u32)4096);
334 return -EINVAL;
335 327
336 ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, 328 ret = vram->get(dev, size, align, 0, 0, &node->vram);
337 true, false, &gpuobj->im_backing);
338 if (ret) { 329 if (ret) {
339 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); 330 kfree(node);
340 return ret; 331 return ret;
341 } 332 }
342 333
343 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); 334 gpuobj->vinst = node->vram->offset;
344 if (ret) { 335
345 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); 336 if (gpuobj->flags & NVOBJ_FLAG_VM) {
346 nouveau_bo_ref(NULL, &gpuobj->im_backing); 337 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
347 return ret; 338 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
339 &node->chan_vma);
340 if (ret) {
341 vram->put(dev, &node->vram);
342 kfree(node);
343 return ret;
344 }
345
346 nouveau_vm_map(&node->chan_vma, node->vram);
347 gpuobj->vinst = node->chan_vma.offset;
348 } 348 }
349 349
350 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; 350 gpuobj->size = size;
351 gpuobj->node = node;
351 return 0; 352 return 0;
352} 353}
353 354
354void 355void
355nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 356nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
356{ 357{
358 struct drm_device *dev = gpuobj->dev;
357 struct drm_nouveau_private *dev_priv = dev->dev_private; 359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
361 struct nv50_gpuobj_node *node;
362
363 node = gpuobj->node;
364 gpuobj->node = NULL;
358 365
359 if (gpuobj && gpuobj->im_backing) { 366 if (node->chan_vma.node) {
360 if (gpuobj->im_bound) 367 nouveau_vm_unmap(&node->chan_vma);
361 dev_priv->engine.instmem.unbind(dev, gpuobj); 368 nouveau_vm_put(&node->chan_vma);
362 nouveau_bo_unpin(gpuobj->im_backing);
363 nouveau_bo_ref(NULL, &gpuobj->im_backing);
364 gpuobj->im_backing = NULL;
365 } 369 }
370 vram->put(dev, &node->vram);
371 kfree(node);
366} 372}
367 373
368int 374int
369nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 375nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
370{ 376{
371 struct drm_nouveau_private *dev_priv = dev->dev_private; 377 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
372 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 378 struct nv50_gpuobj_node *node = gpuobj->node;
373 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; 379 int ret;
374 uint32_t pte, pte_end;
375 uint64_t vram;
376
377 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
378 return -EINVAL;
379
380 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
381 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
382
383 pte = (gpuobj->im_pramin->start >> 12) << 1;
384 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
385 vram = gpuobj->vinst;
386
387 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
388 gpuobj->im_pramin->start, pte, pte_end);
389 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
390
391 vram |= 1;
392 if (dev_priv->vram_sys_base) {
393 vram += dev_priv->vram_sys_base;
394 vram |= 0x30;
395 }
396
397 while (pte < pte_end) {
398 nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
399 nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
400 vram += 0x1000;
401 pte += 2;
402 }
403 dev_priv->engine.instmem.flush(dev);
404 380
405 nv50_vm_flush(dev, 6); 381 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
382 NV_MEM_ACCESS_RW, &node->vram->bar_vma);
383 if (ret)
384 return ret;
406 385
407 gpuobj->im_bound = 1; 386 nouveau_vm_map(&node->vram->bar_vma, node->vram);
387 gpuobj->pinst = node->vram->bar_vma.offset;
408 return 0; 388 return 0;
409} 389}
410 390
411int 391void
412nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 392nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
413{ 393{
414 struct drm_nouveau_private *dev_priv = dev->dev_private; 394 struct nv50_gpuobj_node *node = gpuobj->node;
415 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
416 uint32_t pte, pte_end;
417
418 if (gpuobj->im_bound == 0)
419 return -EINVAL;
420
421 /* can happen during late takedown */
422 if (unlikely(!dev_priv->ramin_available))
423 return 0;
424 395
425 pte = (gpuobj->im_pramin->start >> 12) << 1; 396 if (node->vram->bar_vma.node) {
426 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 397 nouveau_vm_unmap(&node->vram->bar_vma);
427 398 nouveau_vm_put(&node->vram->bar_vma);
428 while (pte < pte_end) {
429 nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
430 nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
431 pte += 2;
432 } 399 }
433 dev_priv->engine.instmem.flush(dev);
434
435 gpuobj->im_bound = 0;
436 return 0;
437} 400}
438 401
439void 402void
@@ -452,11 +415,3 @@ nv84_instmem_flush(struct drm_device *dev)
452 NV_ERROR(dev, "PRAMIN flush timeout\n"); 415 NV_ERROR(dev, "PRAMIN flush timeout\n");
453} 416}
454 417
455void
456nv50_vm_flush(struct drm_device *dev, int engine)
457{
458 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
459 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
460 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
461}
462
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 000000000000..eebab95f59b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde,
32 struct nouveau_gpuobj *pgt)
33{
34 struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
35 u32 coverage = (pgt->size >> 3) << type;
36 u64 phys;
37
38 phys = pgt->vinst;
39 phys |= 0x01; /* present */
40 phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */
41 if (dev_priv->vram_sys_base) {
42 phys += dev_priv->vram_sys_base;
43 phys |= 0x30;
44 }
45
46 if (coverage <= 32 * 1024 * 1024)
47 phys |= 0x60;
48 else if (coverage <= 64 * 1024 * 1024)
49 phys |= 0x40;
50 else if (coverage < 128 * 1024 * 1024)
51 phys |= 0x20;
52
53 nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
54 nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
55}
56
57void
58nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde)
59{
60 nv_wo32(pgd, (pde * 8) + 0, 0x00000000);
61 nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe);
62}
63
64static inline u64
65nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
66 u64 phys, u32 memtype, u32 target)
67{
68 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
69
70 phys |= 1; /* present */
71 phys |= (u64)memtype << 40;
72
73 /* IGPs don't have real VRAM, re-target to stolen system memory */
74 if (target == 0 && dev_priv->vram_sys_base) {
75 phys += dev_priv->vram_sys_base;
76 target = 3;
77 }
78
79 phys |= target << 4;
80
81 if (vma->access & NV_MEM_ACCESS_SYS)
82 phys |= (1 << 6);
83
84 if (!(vma->access & NV_MEM_ACCESS_WO))
85 phys |= (1 << 3);
86
87 return phys;
88}
89
90void
91nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
92 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
93{
94 u32 block, i;
95
96 phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
97 pte <<= 3;
98 cnt <<= 3;
99
100 while (cnt) {
101 u32 offset_h = upper_32_bits(phys);
102 u32 offset_l = lower_32_bits(phys);
103
104 for (i = 7; i >= 0; i--) {
105 block = 1 << (i + 3);
106 if (cnt >= block && !(pte & (block - 1)))
107 break;
108 }
109 offset_l |= (i << 7);
110
111 phys += block << (vma->node->type - 3);
112 cnt -= block;
113
114 while (block) {
115 nv_wo32(pgt, pte + 0, offset_l);
116 nv_wo32(pgt, pte + 4, offset_h);
117 pte += 8;
118 block -= 8;
119 }
120 }
121}
122
123void
124nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
125 u32 pte, dma_addr_t *list, u32 cnt)
126{
127 pte <<= 3;
128 while (cnt--) {
129 u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
130 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
131 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
132 pte += 8;
133 }
134}
135
136void
137nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
138{
139 pte <<= 3;
140 while (cnt--) {
141 nv_wo32(pgt, pte + 0, 0x00000000);
142 nv_wo32(pgt, pte + 4, 0x00000000);
143 pte += 8;
144 }
145}
146
147void
148nv50_vm_flush(struct nouveau_vm *vm)
149{
150 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
151 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
152 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
153 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
154 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
155
156 pinstmem->flush(vm->dev);
157
158 /* BAR */
159 if (vm != dev_priv->chan_vm) {
160 nv50_vm_flush_engine(vm->dev, 6);
161 return;
162 }
163
164 pfifo->tlb_flush(vm->dev);
165
166 if (atomic_read(&vm->pgraph_refs))
167 pgraph->tlb_flush(vm->dev);
168 if (atomic_read(&vm->pcrypt_refs))
169 pcrypt->tlb_flush(vm->dev);
170}
171
172void
173nv50_vm_flush_engine(struct drm_device *dev, int engine)
174{
175 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
178}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 000000000000..47489ed0a5a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static int types[0x80] = {
30 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
32 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
35 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
37 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
38};
39
40bool
41nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
42{
43 int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
44
45 if (likely(type < sizeof(types) && types[type]))
46 return true;
47 return false;
48}
49
50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this;
58 struct nouveau_vram *vram;
59
60 vram = *pvram;
61 *pvram = NULL;
62 if (unlikely(vram == NULL))
63 return;
64
65 mutex_lock(&mm->mutex);
66 while (!list_empty(&vram->regions)) {
67 this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
68
69 list_del(&this->rl_entry);
70 nouveau_mm_put(mm, this);
71 }
72 mutex_unlock(&mm->mutex);
73
74 kfree(vram);
75}
76
77int
78nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
79 u32 type, struct nouveau_vram **pvram)
80{
81 struct drm_nouveau_private *dev_priv = dev->dev_private;
82 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
83 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
84 struct nouveau_mm *mm = man->priv;
85 struct nouveau_mm_node *r;
86 struct nouveau_vram *vram;
87 int ret;
88
89 if (!types[type])
90 return -EINVAL;
91 size >>= 12;
92 align >>= 12;
93 size_nc >>= 12;
94
95 vram = kzalloc(sizeof(*vram), GFP_KERNEL);
96 if (!vram)
97 return -ENOMEM;
98
99 INIT_LIST_HEAD(&vram->regions);
100 vram->dev = dev_priv->dev;
101 vram->memtype = type;
102 vram->size = size;
103
104 mutex_lock(&mm->mutex);
105 do {
106 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
107 if (ret) {
108 mutex_unlock(&mm->mutex);
109 nv50_vram_del(dev, &vram);
110 return ret;
111 }
112
113 list_add_tail(&r->rl_entry, &vram->regions);
114 size -= r->length;
115 } while (size);
116 mutex_unlock(&mm->mutex);
117
118 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
119 vram->offset = (u64)r->offset << 12;
120 *pvram = vram;
121 return 0;
122}
123
124static u32
125nv50_vram_rblock(struct drm_device *dev)
126{
127 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 int i, parts, colbits, rowbitsa, rowbitsb, banks;
129 u64 rowsize, predicted;
130 u32 r0, r4, rt, ru, rblock_size;
131
132 r0 = nv_rd32(dev, 0x100200);
133 r4 = nv_rd32(dev, 0x100204);
134 rt = nv_rd32(dev, 0x100250);
135 ru = nv_rd32(dev, 0x001540);
136 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
137
138 for (i = 0, parts = 0; i < 8; i++) {
139 if (ru & (0x00010000 << i))
140 parts++;
141 }
142
143 colbits = (r4 & 0x0000f000) >> 12;
144 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
145 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
146 banks = ((r4 & 0x01000000) ? 8 : 4);
147
148 rowsize = parts * banks * (1 << colbits) * 8;
149 predicted = rowsize << rowbitsa;
150 if (r0 & 0x00000004)
151 predicted += rowsize << rowbitsb;
152
153 if (predicted != dev_priv->vram_size) {
154 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
155 (u32)(dev_priv->vram_size >> 20));
156 NV_WARN(dev, "we calculated %dMiB VRAM\n",
157 (u32)(predicted >> 20));
158 }
159
160 rblock_size = rowsize;
161 if (rt & 1)
162 rblock_size *= 3;
163
164 NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
165 return rblock_size;
166}
167
168int
169nv50_vram_init(struct drm_device *dev)
170{
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172
173 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
174 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
175 dev_priv->vram_size &= 0xffffffff00ULL;
176
177 switch (dev_priv->chipset) {
178 case 0xaa:
179 case 0xac:
180 case 0xaf:
181 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
182 dev_priv->vram_rblock_size = 4096;
183 break;
184 default:
185 dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
186 break;
187 }
188
189 return 0;
190}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 000000000000..ec18ae1c3886
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29
30static void nv84_crypt_isr(struct drm_device *);
31
32int
33nv84_crypt_create_context(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *ramin = chan->ramin;
38 int ret;
39
40 NV_DEBUG(dev, "ch%d\n", chan->id);
41
42 ret = nouveau_gpuobj_new(dev, chan, 256, 0,
43 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
44 &chan->crypt_ctx);
45 if (ret)
46 return ret;
47
48 nv_wo32(ramin, 0xa0, 0x00190000);
49 nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
50 nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
51 nv_wo32(ramin, 0xac, 0);
52 nv_wo32(ramin, 0xb0, 0);
53 nv_wo32(ramin, 0xb4, 0);
54
55 dev_priv->engine.instmem.flush(dev);
56 atomic_inc(&chan->vm->pcrypt_refs);
57 return 0;
58}
59
60void
61nv84_crypt_destroy_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64 u32 inst;
65
66 if (!chan->crypt_ctx)
67 return;
68
69 inst = (chan->ramin->vinst >> 12);
70 inst |= 0x80000000;
71
72 /* mark context as invalid if still on the hardware, not
73 * doing this causes issues the next time PCRYPT is used,
74 * unsurprisingly :)
75 */
76 nv_wr32(dev, 0x10200c, 0x00000000);
77 if (nv_rd32(dev, 0x102188) == inst)
78 nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
79 if (nv_rd32(dev, 0x10218c) == inst)
80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
81 nv_wr32(dev, 0x10200c, 0x00000010);
82
83 nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
84 atomic_dec(&chan->vm->pcrypt_refs);
85}
86
87void
88nv84_crypt_tlb_flush(struct drm_device *dev)
89{
90 nv50_vm_flush_engine(dev, 0x0a);
91}
92
93int
94nv84_crypt_init(struct drm_device *dev)
95{
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
98
99 if (!pcrypt->registered) {
100 NVOBJ_CLASS(dev, 0x74c1, CRYPT);
101 pcrypt->registered = true;
102 }
103
104 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
105 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
106
107 nouveau_irq_register(dev, 14, nv84_crypt_isr);
108 nv_wr32(dev, 0x102130, 0xffffffff);
109 nv_wr32(dev, 0x102140, 0xffffffbf);
110
111 nv_wr32(dev, 0x10200c, 0x00000010);
112 return 0;
113}
114
115void
116nv84_crypt_fini(struct drm_device *dev)
117{
118 nv_wr32(dev, 0x102140, 0x00000000);
119 nouveau_irq_unregister(dev, 14);
120}
121
122static void
123nv84_crypt_isr(struct drm_device *dev)
124{
125 u32 stat = nv_rd32(dev, 0x102130);
126 u32 mthd = nv_rd32(dev, 0x102190);
127 u32 data = nv_rd32(dev, 0x102194);
128 u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
129 int show = nouveau_ratelimit();
130
131 if (show) {
132 NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
133 stat, mthd, data, inst);
134 }
135
136 nv_wr32(dev, 0x102130, stat);
137 nv_wr32(dev, 0x10200c, 0x10);
138
139 nv50_fb_vm_trap(dev, show, "PCRYPT");
140}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78a9088..39232085193d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -26,67 +26,89 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28 28
29struct nvc0_gpuobj_node {
30 struct nouveau_bo *vram;
31 struct drm_mm_node *ramin;
32 u32 align;
33};
34
29int 35int
30nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 36nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
31 uint32_t *size)
32{ 37{
38 struct drm_device *dev = gpuobj->dev;
39 struct nvc0_gpuobj_node *node = NULL;
33 int ret; 40 int ret;
34 41
35 *size = ALIGN(*size, 4096); 42 node = kzalloc(sizeof(*node), GFP_KERNEL);
36 if (*size == 0) 43 if (!node)
37 return -EINVAL; 44 return -ENOMEM;
45 node->align = align;
38 46
39 ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, 47 ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
40 true, false, &gpuobj->im_backing); 48 0, 0x0000, true, false, &node->vram);
41 if (ret) { 49 if (ret) {
42 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); 50 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
43 return ret; 51 return ret;
44 } 52 }
45 53
46 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); 54 ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
47 if (ret) { 55 if (ret) {
48 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); 56 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
49 nouveau_bo_ref(NULL, &gpuobj->im_backing); 57 nouveau_bo_ref(NULL, &node->vram);
50 return ret; 58 return ret;
51 } 59 }
52 60
53 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; 61 gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
62 gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
63 gpuobj->node = node;
54 return 0; 64 return 0;
55} 65}
56 66
57void 67void
58nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 68nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
59{ 69{
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 70 struct nvc0_gpuobj_node *node;
61 71
62 if (gpuobj && gpuobj->im_backing) { 72 node = gpuobj->node;
63 if (gpuobj->im_bound) 73 gpuobj->node = NULL;
64 dev_priv->engine.instmem.unbind(dev, gpuobj); 74
65 nouveau_bo_unpin(gpuobj->im_backing); 75 nouveau_bo_unpin(node->vram);
66 nouveau_bo_ref(NULL, &gpuobj->im_backing); 76 nouveau_bo_ref(NULL, &node->vram);
67 gpuobj->im_backing = NULL; 77 kfree(node);
68 }
69} 78}
70 79
71int 80int
72nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 81nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
73{ 82{
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
75 uint32_t pte, pte_end; 84 struct nvc0_gpuobj_node *node = gpuobj->node;
76 uint64_t vram; 85 struct drm_device *dev = gpuobj->dev;
77 86 struct drm_mm_node *ramin = NULL;
78 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 87 u32 pte, pte_end;
79 return -EINVAL; 88 u64 vram;
80 89
81 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", 90 do {
82 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 91 if (drm_mm_pre_get(&dev_priv->ramin_heap))
83 92 return -ENOMEM;
84 pte = gpuobj->im_pramin->start >> 12; 93
85 pte_end = (gpuobj->im_pramin->size >> 12) + pte; 94 spin_lock(&dev_priv->ramin_lock);
95 ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
96 node->align, 0);
97 if (ramin == NULL) {
98 spin_unlock(&dev_priv->ramin_lock);
99 return -ENOMEM;
100 }
101
102 ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
103 spin_unlock(&dev_priv->ramin_lock);
104 } while (ramin == NULL);
105
106 pte = (ramin->start >> 12) << 1;
107 pte_end = ((ramin->size >> 12) << 1) + pte;
86 vram = gpuobj->vinst; 108 vram = gpuobj->vinst;
87 109
88 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", 110 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
89 gpuobj->im_pramin->start, pte, pte_end); 111 ramin->start, pte, pte_end);
90 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); 112 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
91 113
92 while (pte < pte_end) { 114 while (pte < pte_end) {
@@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
103 nv_wr32(dev, 0x100cbc, 0x80000005); 125 nv_wr32(dev, 0x100cbc, 0x80000005);
104 } 126 }
105 127
106 gpuobj->im_bound = 1; 128 node->ramin = ramin;
129 gpuobj->pinst = ramin->start;
107 return 0; 130 return 0;
108} 131}
109 132
110int 133void
111nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 134nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
112{ 135{
113 struct drm_nouveau_private *dev_priv = dev->dev_private; 136 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
114 uint32_t pte, pte_end; 137 struct nvc0_gpuobj_node *node = gpuobj->node;
138 u32 pte, pte_end;
115 139
116 if (gpuobj->im_bound == 0) 140 if (!node->ramin || !dev_priv->ramin_available)
117 return -EINVAL; 141 return;
142
143 pte = (node->ramin->start >> 12) << 1;
144 pte_end = ((node->ramin->size >> 12) << 1) + pte;
118 145
119 pte = gpuobj->im_pramin->start >> 12;
120 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
121 while (pte < pte_end) { 146 while (pte < pte_end) {
122 nv_wr32(dev, 0x702000 + (pte * 8), 0); 147 nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
123 nv_wr32(dev, 0x702004 + (pte * 8), 0); 148 nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
124 pte++; 149 pte++;
125 } 150 }
126 dev_priv->engine.instmem.flush(dev); 151 dev_priv->engine.instmem.flush(gpuobj->dev);
127 152
128 gpuobj->im_bound = 0; 153 spin_lock(&dev_priv->ramin_lock);
129 return 0; 154 drm_mm_put_block(node->ramin);
155 node->ramin = NULL;
156 spin_unlock(&dev_priv->ramin_lock);
130} 157}
131 158
132void 159void
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a585613..fe0f253089ac 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
153#define NV_PCRTC_START 0x00600800 153#define NV_PCRTC_START 0x00600800
154#define NV_PCRTC_CONFIG 0x00600804 154#define NV_PCRTC_CONFIG 0x00600804
155# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) 155# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
156# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) 156# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0)
157# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
157#define NV_PCRTC_CURSOR_CONFIG 0x00600810 158#define NV_PCRTC_CURSOR_CONFIG 0x00600810
158# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) 159# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
159# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) 160# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b1a1ac..e2cfe80f6fca 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free {
71#define NOUVEAU_GETPARAM_PCI_VENDOR 3 71#define NOUVEAU_GETPARAM_PCI_VENDOR 3
72#define NOUVEAU_GETPARAM_PCI_DEVICE 4 72#define NOUVEAU_GETPARAM_PCI_DEVICE 4
73#define NOUVEAU_GETPARAM_BUS_TYPE 5 73#define NOUVEAU_GETPARAM_BUS_TYPE 5
74#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
75#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
76#define NOUVEAU_GETPARAM_FB_SIZE 8 74#define NOUVEAU_GETPARAM_FB_SIZE 8
77#define NOUVEAU_GETPARAM_AGP_SIZE 9 75#define NOUVEAU_GETPARAM_AGP_SIZE 9
78#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
79#define NOUVEAU_GETPARAM_CHIPSET_ID 11 76#define NOUVEAU_GETPARAM_CHIPSET_ID 11
80#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 77#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
81#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 78#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
82#define NOUVEAU_GETPARAM_PTIMER_TIME 14 79#define NOUVEAU_GETPARAM_PTIMER_TIME 14
83#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 80#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
81#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
84struct drm_nouveau_getparam { 82struct drm_nouveau_getparam {
85 uint64_t param; 83 uint64_t param;
86 uint64_t value; 84 uint64_t value;
@@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf {
171}; 169};
172 170
173#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 171#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
174#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
175#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 172#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
176struct drm_nouveau_gem_cpu_prep { 173struct drm_nouveau_gem_cpu_prep {
177 uint32_t handle; 174 uint32_t handle;