aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2011-02-24 22:37:02 -0500
committerDave Airlie <airlied@redhat.com>2011-02-24 22:37:02 -0500
commitdc87eaf1771d15152ca379a0b7c32df57a0e87dc (patch)
treee67c014bf376d703e0b0a713bdfb2da7221df161
parenta2c06ee2fe5b48a71e697bae00c6e7195fc016b6 (diff)
parent562af10c676936ba510860d3a25e60e55312d5cd (diff)
Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next
* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (50 commits) drm/nv50: flesh out ZCULL init and match nvidia on later chipsets drm/nv50: support for compression drm/nv50-nvc0: delay GART binding until move_notify time drm/nouveau: rename nouveau_vram to nouveau_mem drm/nvc0: allow creation of buffers with any non-compressed memtype drm/nv50-nvc0: unmap buffers from the vm when they're evicted drm/nv50-nvc0: move vm bind/unbind to move_notify hook drm/nv50-nvc0: restrict memtype to those specified at creation time drm/nouveau: pass domain rather than ttm flags to gem_new() drm/nv50: simplify bo moves now that they're all through the vm drm/nouveau: remove no_vm/mappable flags from nouveau_bo drm/nouveau: Fix pageflip event drm/nouveau/vbios: parse more gpio tag bits from connector table drm/nouveau: decode PFIFO DMA_PUSHER error codes drm/nv50: fix typos in CCACHE error reporting drm/nvc0: support for sw methods + enable page flipping drm/nv50: enable page flipping drm/nv50-nvc0: activate/update ds channel's framebuffer on modesets drm/nv50-nvc0: initialise display sync channels drm/nv50-nvc0: precalculate some fb state when creating them ...
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c268
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c192
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c137
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c338
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c164
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c191
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c62
-rw-r--r--include/drm/nouveau_drm.h1
50 files changed, 1697 insertions, 809 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99917e2..8314a49b6b9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -282,7 +282,7 @@ static void still_alive(void)
282{ 282{
283#if 0 283#if 0
284 sync(); 284 sync();
285 msleep(2); 285 mdelay(2);
286#endif 286#endif
287} 287}
288 288
@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
1904 BIOSLOG(bios, "0x%04X: " 1904 BIOSLOG(bios, "0x%04X: "
1905 "Condition not met, sleeping for 20ms\n", 1905 "Condition not met, sleeping for 20ms\n",
1906 offset); 1906 offset);
1907 msleep(20); 1907 mdelay(20);
1908 } 1908 }
1909 } 1909 }
1910 1910
@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1938 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n", 1938 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
1939 offset, time); 1939 offset, time);
1940 1940
1941 msleep(time); 1941 mdelay(time);
1942 1942
1943 return 3; 1943 return 3;
1944} 1944}
@@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2962 if (time < 1000) 2962 if (time < 1000)
2963 udelay(time); 2963 udelay(time);
2964 else 2964 else
2965 msleep((time + 900) / 1000); 2965 mdelay((time + 900) / 1000);
2966 2966
2967 return 3; 2967 return 3;
2968} 2968}
@@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3856 3856
3857 if (script == LVDS_PANEL_OFF) { 3857 if (script == LVDS_PANEL_OFF) {
3858 /* off-on delay in ms */ 3858 /* off-on delay in ms */
3859 msleep(ROM16(bios->data[bios->fp.xlated_entry + 7])); 3859 mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
3860 } 3860 }
3861#ifdef __powerpc__ 3861#ifdef __powerpc__
3862 /* Powerbook specific quirks */ 3862 /* Powerbook specific quirks */
@@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5950 } 5950 }
5951} 5951}
5952 5952
5953static const u8 hpd_gpio[16] = {
5954 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
5955 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
5956};
5957
5953static void 5958static void
5954parse_dcb_connector_table(struct nvbios *bios) 5959parse_dcb_connector_table(struct nvbios *bios)
5955{ 5960{
@@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)
5986 5991
5987 cte->type = (cte->entry & 0x000000ff) >> 0; 5992 cte->type = (cte->entry & 0x000000ff) >> 0;
5988 cte->index2 = (cte->entry & 0x00000f00) >> 8; 5993 cte->index2 = (cte->entry & 0x00000f00) >> 8;
5989 switch (cte->entry & 0x00033000) { 5994
5990 case 0x00001000: 5995 cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
5991 cte->gpio_tag = 0x07; 5996 cte->gpio_tag = hpd_gpio[cte->gpio_tag];
5992 break;
5993 case 0x00002000:
5994 cte->gpio_tag = 0x08;
5995 break;
5996 case 0x00010000:
5997 cte->gpio_tag = 0x51;
5998 break;
5999 case 0x00020000:
6000 cte->gpio_tag = 0x52;
6001 break;
6002 default:
6003 cte->gpio_tag = 0xff;
6004 break;
6005 }
6006 5997
6007 if (cte->type == 0xff) 5998 if (cte->type == 0xff)
6008 continue; 5999 continue;
@@ -6228,7 +6219,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6228 entry->tvconf.has_component_output = false; 6219 entry->tvconf.has_component_output = false;
6229 break; 6220 break;
6230 case OUTPUT_LVDS: 6221 case OUTPUT_LVDS:
6231 if ((conn & 0x00003f00) != 0x10) 6222 if ((conn & 0x00003f00) >> 8 != 0x10)
6232 entry->lvdsconf.use_straps_for_mode = true; 6223 entry->lvdsconf.use_straps_for_mode = true;
6233 entry->lvdsconf.use_power_scripts = true; 6224 entry->lvdsconf.use_power_scripts = true;
6234 break; 6225 break;
@@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
6702 struct nvbios *bios = &dev_priv->vbios; 6693 struct nvbios *bios = &dev_priv->vbios;
6703 struct init_exec iexec = { true, false }; 6694 struct init_exec iexec = { true, false };
6704 6695
6705 mutex_lock(&bios->lock); 6696 spin_lock_bh(&bios->lock);
6706 bios->display.output = dcbent; 6697 bios->display.output = dcbent;
6707 parse_init_table(bios, table, &iexec); 6698 parse_init_table(bios, table, &iexec);
6708 bios->display.output = NULL; 6699 bios->display.output = NULL;
6709 mutex_unlock(&bios->lock); 6700 spin_unlock_bh(&bios->lock);
6710} 6701}
6711 6702
6712static bool NVInitVBIOS(struct drm_device *dev) 6703static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
6715 struct nvbios *bios = &dev_priv->vbios; 6706 struct nvbios *bios = &dev_priv->vbios;
6716 6707
6717 memset(bios, 0, sizeof(struct nvbios)); 6708 memset(bios, 0, sizeof(struct nvbios));
6718 mutex_init(&bios->lock); 6709 spin_lock_init(&bios->lock);
6719 bios->dev = dev; 6710 bios->dev = dev;
6720 6711
6721 if (!NVShadowVBIOS(dev, bios->data)) 6712 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 50a648e01c49..8a54fa7edf5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -251,7 +251,7 @@ struct nvbios {
251 uint8_t digital_min_front_porch; 251 uint8_t digital_min_front_porch;
252 bool fp_no_ddc; 252 bool fp_no_ddc;
253 253
254 struct mutex lock; 254 spinlock_t lock;
255 255
256 uint8_t data[NV_PROM_SIZE]; 256 uint8_t data[NV_PROM_SIZE];
257 unsigned int length; 257 unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26f4654..3fcffcf75e35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -54,8 +54,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
54} 54}
55 55
56static void 56static void
57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
58 int *page_shift) 58 int *align, int *size, int *page_shift)
59{ 59{
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61 61
@@ -80,7 +80,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
80 } 80 }
81 } else { 81 } else {
82 if (likely(dev_priv->chan_vm)) { 82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024) 83 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift; 84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else 85 else
86 *page_shift = dev_priv->chan_vm->spg_shift; 86 *page_shift = dev_priv->chan_vm->spg_shift;
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
98int 98int
99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100 int size, int align, uint32_t flags, uint32_t tile_mode, 100 int size, int align, uint32_t flags, uint32_t tile_mode,
101 uint32_t tile_flags, bool no_vm, bool mappable, 101 uint32_t tile_flags, struct nouveau_bo **pnvbo)
102 struct nouveau_bo **pnvbo)
103{ 102{
104 struct drm_nouveau_private *dev_priv = dev->dev_private; 103 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_bo *nvbo; 104 struct nouveau_bo *nvbo;
@@ -110,16 +109,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
110 return -ENOMEM; 109 return -ENOMEM;
111 INIT_LIST_HEAD(&nvbo->head); 110 INIT_LIST_HEAD(&nvbo->head);
112 INIT_LIST_HEAD(&nvbo->entry); 111 INIT_LIST_HEAD(&nvbo->entry);
113 nvbo->mappable = mappable;
114 nvbo->no_vm = no_vm;
115 nvbo->tile_mode = tile_mode; 112 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags; 113 nvbo->tile_flags = tile_flags;
117 nvbo->bo.bdev = &dev_priv->ttm.bdev; 114 nvbo->bo.bdev = &dev_priv->ttm.bdev;
118 115
119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); 116 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
120 align >>= PAGE_SHIFT; 117 align >>= PAGE_SHIFT;
121 118
122 if (!nvbo->no_vm && dev_priv->chan_vm) { 119 if (dev_priv->chan_vm) {
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, 120 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma); 121 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) { 122 if (ret) {
@@ -128,6 +125,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
128 } 125 }
129 } 126 }
130 127
128 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
131 nouveau_bo_placement_set(nvbo, flags, 0); 129 nouveau_bo_placement_set(nvbo, flags, 0);
132 130
133 nvbo->channel = chan; 131 nvbo->channel = chan;
@@ -140,11 +138,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
140 } 138 }
141 nvbo->channel = NULL; 139 nvbo->channel = NULL;
142 140
143 if (nvbo->vma.node) { 141 if (nvbo->vma.node)
144 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) 142 nvbo->bo.offset = nvbo->vma.offset;
145 nvbo->bo.offset = nvbo->vma.offset;
146 }
147
148 *pnvbo = nvbo; 143 *pnvbo = nvbo;
149 return 0; 144 return 0;
150} 145}
@@ -166,17 +161,17 @@ static void
166set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 161set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167{ 162{
168 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 163 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
164 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
169 165
170 if (dev_priv->card_type == NV_10 && 166 if (dev_priv->card_type == NV_10 &&
171 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { 167 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
168 nvbo->bo.mem.num_pages < vram_pages / 2) {
172 /* 169 /*
173 * Make sure that the color and depth buffers are handled 170 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x 171 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled 172 * speed up when alpha-blending and depth-test are enabled
176 * at the same time. 173 * at the same time.
177 */ 174 */
178 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 175 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181 nvbo->placement.fpfn = vram_pages / 2; 176 nvbo->placement.fpfn = vram_pages / 2;
182 nvbo->placement.lpfn = ~0; 177 nvbo->placement.lpfn = ~0;
@@ -314,11 +309,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
314 if (ret) 309 if (ret)
315 return ret; 310 return ret;
316 311
317 if (nvbo->vma.node) { 312 if (nvbo->vma.node)
318 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) 313 nvbo->bo.offset = nvbo->vma.offset;
319 nvbo->bo.offset = nvbo->vma.offset;
320 }
321
322 return 0; 314 return 0;
323} 315}
324 316
@@ -381,7 +373,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
381 case NOUVEAU_GART_AGP: 373 case NOUVEAU_GART_AGP:
382 return ttm_agp_backend_init(bdev, dev->agp->bridge); 374 return ttm_agp_backend_init(bdev, dev->agp->bridge);
383#endif 375#endif
384 case NOUVEAU_GART_SGDMA: 376 case NOUVEAU_GART_PDMA:
377 case NOUVEAU_GART_HW:
385 return nouveau_sgdma_init_ttm(dev); 378 return nouveau_sgdma_init_ttm(dev);
386 default: 379 default:
387 NV_ERROR(dev, "Unknown GART type %d\n", 380 NV_ERROR(dev, "Unknown GART type %d\n",
@@ -427,7 +420,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
427 man->default_caching = TTM_PL_FLAG_WC; 420 man->default_caching = TTM_PL_FLAG_WC;
428 break; 421 break;
429 case TTM_PL_TT: 422 case TTM_PL_TT:
430 man->func = &ttm_bo_manager_func; 423 if (dev_priv->card_type >= NV_50)
424 man->func = &nouveau_gart_manager;
425 else
426 man->func = &ttm_bo_manager_func;
431 switch (dev_priv->gart_info.type) { 427 switch (dev_priv->gart_info.type) {
432 case NOUVEAU_GART_AGP: 428 case NOUVEAU_GART_AGP:
433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 429 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -435,7 +431,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
435 TTM_PL_FLAG_WC; 431 TTM_PL_FLAG_WC;
436 man->default_caching = TTM_PL_FLAG_WC; 432 man->default_caching = TTM_PL_FLAG_WC;
437 break; 433 break;
438 case NOUVEAU_GART_SGDMA: 434 case NOUVEAU_GART_PDMA:
435 case NOUVEAU_GART_HW:
439 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 436 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
440 TTM_MEMTYPE_FLAG_CMA; 437 TTM_MEMTYPE_FLAG_CMA;
441 man->available_caching = TTM_PL_MASK_CACHING; 438 man->available_caching = TTM_PL_MASK_CACHING;
@@ -497,45 +494,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
497 return ret; 494 return ret;
498} 495}
499 496
500static inline uint32_t
501nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
502 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
503{
504 struct nouveau_bo *nvbo = nouveau_bo(bo);
505
506 if (nvbo->no_vm) {
507 if (mem->mem_type == TTM_PL_TT)
508 return NvDmaGART;
509 return NvDmaVRAM;
510 }
511
512 if (mem->mem_type == TTM_PL_TT)
513 return chan->gart_handle;
514 return chan->vram_handle;
515}
516
517static int 497static int
518nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 498nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
519 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 499 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
520{ 500{
521 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 501 struct nouveau_mem *old_node = old_mem->mm_node;
502 struct nouveau_mem *new_node = new_mem->mm_node;
522 struct nouveau_bo *nvbo = nouveau_bo(bo); 503 struct nouveau_bo *nvbo = nouveau_bo(bo);
523 u64 src_offset = old_mem->start << PAGE_SHIFT;
524 u64 dst_offset = new_mem->start << PAGE_SHIFT;
525 u32 page_count = new_mem->num_pages; 504 u32 page_count = new_mem->num_pages;
505 u64 src_offset, dst_offset;
526 int ret; 506 int ret;
527 507
528 if (!nvbo->no_vm) { 508 src_offset = old_node->tmp_vma.offset;
529 if (old_mem->mem_type == TTM_PL_VRAM) 509 if (new_node->tmp_vma.node)
530 src_offset = nvbo->vma.offset; 510 dst_offset = new_node->tmp_vma.offset;
531 else 511 else
532 src_offset += dev_priv->gart_info.aper_base; 512 dst_offset = nvbo->vma.offset;
533
534 if (new_mem->mem_type == TTM_PL_VRAM)
535 dst_offset = nvbo->vma.offset;
536 else
537 dst_offset += dev_priv->gart_info.aper_base;
538 }
539 513
540 page_count = new_mem->num_pages; 514 page_count = new_mem->num_pages;
541 while (page_count) { 515 while (page_count) {
@@ -570,33 +544,18 @@ static int
570nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 544nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
571 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 545 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
572{ 546{
573 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 547 struct nouveau_mem *old_node = old_mem->mm_node;
548 struct nouveau_mem *new_node = new_mem->mm_node;
574 struct nouveau_bo *nvbo = nouveau_bo(bo); 549 struct nouveau_bo *nvbo = nouveau_bo(bo);
575 u64 length = (new_mem->num_pages << PAGE_SHIFT); 550 u64 length = (new_mem->num_pages << PAGE_SHIFT);
576 u64 src_offset, dst_offset; 551 u64 src_offset, dst_offset;
577 int ret; 552 int ret;
578 553
579 src_offset = old_mem->start << PAGE_SHIFT; 554 src_offset = old_node->tmp_vma.offset;
580 dst_offset = new_mem->start << PAGE_SHIFT; 555 if (new_node->tmp_vma.node)
581 if (!nvbo->no_vm) { 556 dst_offset = new_node->tmp_vma.offset;
582 if (old_mem->mem_type == TTM_PL_VRAM) 557 else
583 src_offset = nvbo->vma.offset; 558 dst_offset = nvbo->vma.offset;
584 else
585 src_offset += dev_priv->gart_info.aper_base;
586
587 if (new_mem->mem_type == TTM_PL_VRAM)
588 dst_offset = nvbo->vma.offset;
589 else
590 dst_offset += dev_priv->gart_info.aper_base;
591 }
592
593 ret = RING_SPACE(chan, 3);
594 if (ret)
595 return ret;
596
597 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
598 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
599 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
600 559
601 while (length) { 560 while (length) {
602 u32 amount, stride, height; 561 u32 amount, stride, height;
@@ -677,6 +636,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
677 return 0; 636 return 0;
678} 637}
679 638
639static inline uint32_t
640nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
641 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
642{
643 if (mem->mem_type == TTM_PL_TT)
644 return chan->gart_handle;
645 return chan->vram_handle;
646}
647
680static int 648static int
681nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 649nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
682 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 650 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -730,15 +698,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
730{ 698{
731 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 699 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
732 struct nouveau_bo *nvbo = nouveau_bo(bo); 700 struct nouveau_bo *nvbo = nouveau_bo(bo);
701 struct ttm_mem_reg *old_mem = &bo->mem;
733 struct nouveau_channel *chan; 702 struct nouveau_channel *chan;
734 int ret; 703 int ret;
735 704
736 chan = nvbo->channel; 705 chan = nvbo->channel;
737 if (!chan || nvbo->no_vm) { 706 if (!chan) {
738 chan = dev_priv->channel; 707 chan = dev_priv->channel;
739 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 708 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
740 } 709 }
741 710
711 /* create temporary vma for old memory, this will get cleaned
712 * up after ttm destroys the ttm_mem_reg
713 */
714 if (dev_priv->card_type >= NV_50) {
715 struct nouveau_mem *node = old_mem->mm_node;
716 if (!node->tmp_vma.node) {
717 u32 page_shift = nvbo->vma.node->type;
718 if (old_mem->mem_type == TTM_PL_TT)
719 page_shift = nvbo->vma.vm->spg_shift;
720
721 ret = nouveau_vm_get(chan->vm,
722 old_mem->num_pages << PAGE_SHIFT,
723 page_shift, NV_MEM_ACCESS_RO,
724 &node->tmp_vma);
725 if (ret)
726 goto out;
727 }
728
729 if (old_mem->mem_type == TTM_PL_VRAM)
730 nouveau_vm_map(&node->tmp_vma, node);
731 else {
732 nouveau_vm_map_sg(&node->tmp_vma, 0,
733 old_mem->num_pages << PAGE_SHIFT,
734 node, node->pages);
735 }
736 }
737
742 if (dev_priv->card_type < NV_50) 738 if (dev_priv->card_type < NV_50)
743 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); 739 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
744 else 740 else
@@ -752,6 +748,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
752 no_wait_gpu, new_mem); 748 no_wait_gpu, new_mem);
753 } 749 }
754 750
751out:
755 if (chan == dev_priv->channel) 752 if (chan == dev_priv->channel)
756 mutex_unlock(&chan->mutex); 753 mutex_unlock(&chan->mutex);
757 return ret; 754 return ret;
@@ -762,6 +759,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
762 bool no_wait_reserve, bool no_wait_gpu, 759 bool no_wait_reserve, bool no_wait_gpu,
763 struct ttm_mem_reg *new_mem) 760 struct ttm_mem_reg *new_mem)
764{ 761{
762 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
765 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 763 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
766 struct ttm_placement placement; 764 struct ttm_placement placement;
767 struct ttm_mem_reg tmp_mem; 765 struct ttm_mem_reg tmp_mem;
@@ -781,11 +779,27 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
781 if (ret) 779 if (ret)
782 goto out; 780 goto out;
783 781
782 if (dev_priv->card_type >= NV_50) {
783 struct nouveau_bo *nvbo = nouveau_bo(bo);
784 struct nouveau_mem *node = tmp_mem.mm_node;
785 struct nouveau_vma *vma = &nvbo->vma;
786 if (vma->node->type != vma->vm->spg_shift)
787 vma = &node->tmp_vma;
788 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
789 node, node->pages);
790 }
791
784 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 792 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
793
794 if (dev_priv->card_type >= NV_50) {
795 struct nouveau_bo *nvbo = nouveau_bo(bo);
796 nouveau_vm_unmap(&nvbo->vma);
797 }
798
785 if (ret) 799 if (ret)
786 goto out; 800 goto out;
787 801
788 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 802 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
789out: 803out:
790 ttm_bo_mem_put(bo, &tmp_mem); 804 ttm_bo_mem_put(bo, &tmp_mem);
791 return ret; 805 return ret;
@@ -811,11 +825,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 if (ret) 825 if (ret)
812 return ret; 826 return ret;
813 827
814 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); 828 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
815 if (ret) 829 if (ret)
816 goto out; 830 goto out;
817 831
818 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 832 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
819 if (ret) 833 if (ret)
820 goto out; 834 goto out;
821 835
@@ -824,6 +838,36 @@ out:
824 return ret; 838 return ret;
825} 839}
826 840
841static void
842nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
843{
844 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
845 struct nouveau_mem *node = new_mem->mm_node;
846 struct nouveau_bo *nvbo = nouveau_bo(bo);
847 struct nouveau_vma *vma = &nvbo->vma;
848 struct nouveau_vm *vm = vma->vm;
849
850 if (dev_priv->card_type < NV_50)
851 return;
852
853 switch (new_mem->mem_type) {
854 case TTM_PL_VRAM:
855 nouveau_vm_map(vma, node);
856 break;
857 case TTM_PL_TT:
858 if (vma->node->type != vm->spg_shift) {
859 nouveau_vm_unmap(vma);
860 vma = &node->tmp_vma;
861 }
862 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
863 node, node->pages);
864 break;
865 default:
866 nouveau_vm_unmap(&nvbo->vma);
867 break;
868 }
869}
870
827static int 871static int
828nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 872nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
829 struct nouveau_tile_reg **new_tile) 873 struct nouveau_tile_reg **new_tile)
@@ -831,19 +875,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
831 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 875 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
832 struct drm_device *dev = dev_priv->dev; 876 struct drm_device *dev = dev_priv->dev;
833 struct nouveau_bo *nvbo = nouveau_bo(bo); 877 struct nouveau_bo *nvbo = nouveau_bo(bo);
834 uint64_t offset; 878 u64 offset = new_mem->start << PAGE_SHIFT;
835 879
836 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 880 *new_tile = NULL;
837 /* Nothing to do. */ 881 if (new_mem->mem_type != TTM_PL_VRAM)
838 *new_tile = NULL;
839 return 0; 882 return 0;
840 }
841 883
842 offset = new_mem->start << PAGE_SHIFT; 884 if (dev_priv->card_type >= NV_10) {
843
844 if (dev_priv->chan_vm) {
845 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
846 } else if (dev_priv->card_type >= NV_10) {
847 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 885 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
848 nvbo->tile_mode, 886 nvbo->tile_mode,
849 nvbo->tile_flags); 887 nvbo->tile_flags);
@@ -860,11 +898,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
860 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 898 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
861 struct drm_device *dev = dev_priv->dev; 899 struct drm_device *dev = dev_priv->dev;
862 900
863 if (dev_priv->card_type >= NV_10 && 901 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
864 dev_priv->card_type < NV_50) { 902 *old_tile = new_tile;
865 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
866 *old_tile = new_tile;
867 }
868} 903}
869 904
870static int 905static int
@@ -878,9 +913,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
878 struct nouveau_tile_reg *new_tile = NULL; 913 struct nouveau_tile_reg *new_tile = NULL;
879 int ret = 0; 914 int ret = 0;
880 915
881 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 916 if (dev_priv->card_type < NV_50) {
882 if (ret) 917 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
883 return ret; 918 if (ret)
919 return ret;
920 }
884 921
885 /* Fake bo copy. */ 922 /* Fake bo copy. */
886 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 923 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -911,10 +948,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
911 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 948 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
912 949
913out: 950out:
914 if (ret) 951 if (dev_priv->card_type < NV_50) {
915 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 952 if (ret)
916 else 953 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
917 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 954 else
955 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
956 }
918 957
919 return ret; 958 return ret;
920} 959}
@@ -955,7 +994,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
955 break; 994 break;
956 case TTM_PL_VRAM: 995 case TTM_PL_VRAM:
957 { 996 {
958 struct nouveau_vram *vram = mem->mm_node; 997 struct nouveau_mem *node = mem->mm_node;
959 u8 page_shift; 998 u8 page_shift;
960 999
961 if (!dev_priv->bar1_vm) { 1000 if (!dev_priv->bar1_vm) {
@@ -966,23 +1005,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
966 } 1005 }
967 1006
968 if (dev_priv->card_type == NV_C0) 1007 if (dev_priv->card_type == NV_C0)
969 page_shift = vram->page_shift; 1008 page_shift = node->page_shift;
970 else 1009 else
971 page_shift = 12; 1010 page_shift = 12;
972 1011
973 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 1012 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
974 page_shift, NV_MEM_ACCESS_RW, 1013 page_shift, NV_MEM_ACCESS_RW,
975 &vram->bar_vma); 1014 &node->bar_vma);
976 if (ret) 1015 if (ret)
977 return ret; 1016 return ret;
978 1017
979 nouveau_vm_map(&vram->bar_vma, vram); 1018 nouveau_vm_map(&node->bar_vma, node);
980 if (ret) { 1019 if (ret) {
981 nouveau_vm_put(&vram->bar_vma); 1020 nouveau_vm_put(&node->bar_vma);
982 return ret; 1021 return ret;
983 } 1022 }
984 1023
985 mem->bus.offset = vram->bar_vma.offset; 1024 mem->bus.offset = node->bar_vma.offset;
986 if (dev_priv->card_type == NV_50) /*XXX*/ 1025 if (dev_priv->card_type == NV_50) /*XXX*/
987 mem->bus.offset -= 0x0020000000ULL; 1026 mem->bus.offset -= 0x0020000000ULL;
988 mem->bus.base = pci_resource_start(dev->pdev, 1); 1027 mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -999,16 +1038,16 @@ static void
999nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1038nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1000{ 1039{
1001 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1040 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1002 struct nouveau_vram *vram = mem->mm_node; 1041 struct nouveau_mem *node = mem->mm_node;
1003 1042
1004 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) 1043 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1005 return; 1044 return;
1006 1045
1007 if (!vram->bar_vma.node) 1046 if (!node->bar_vma.node)
1008 return; 1047 return;
1009 1048
1010 nouveau_vm_unmap(&vram->bar_vma); 1049 nouveau_vm_unmap(&node->bar_vma);
1011 nouveau_vm_put(&vram->bar_vma); 1050 nouveau_vm_put(&node->bar_vma);
1012} 1051}
1013 1052
1014static int 1053static int
@@ -1058,6 +1097,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
1058 .invalidate_caches = nouveau_bo_invalidate_caches, 1097 .invalidate_caches = nouveau_bo_invalidate_caches,
1059 .init_mem_type = nouveau_bo_init_mem_type, 1098 .init_mem_type = nouveau_bo_init_mem_type,
1060 .evict_flags = nouveau_bo_evict_flags, 1099 .evict_flags = nouveau_bo_evict_flags,
1100 .move_notify = nouveau_bo_move_ntfy,
1061 .move = nouveau_bo_move, 1101 .move = nouveau_bo_move,
1062 .verify_access = nouveau_bo_verify_access, 1102 .verify_access = nouveau_bo_verify_access,
1063 .sync_obj_signaled = __nouveau_fence_signalled, 1103 .sync_obj_signaled = __nouveau_fence_signalled,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 3960d66d7aba..3837090d66af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL; 37 struct nouveau_gpuobj *pushbuf = NULL;
38 int ret; 38 int ret = 0;
39 39
40 if (dev_priv->card_type >= NV_50) { 40 if (dev_priv->card_type >= NV_50) {
41 if (dev_priv->card_type < NV_C0) { 41 if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
90 else 90 else
91 location = TTM_PL_FLAG_TT; 91 location = TTM_PL_FLAG_TT;
92 92
93 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, 93 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
94 true, &pushbuf);
95 if (ret) { 94 if (ret) {
96 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); 95 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
97 return NULL; 96 return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e00076839..390d82c3c4b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
507 int high_w = 0, high_h = 0, high_v = 0; 507 int high_w = 0, high_h = 0, high_v = 0;
508 508
509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { 509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
510 mode->vrefresh = drm_mode_vrefresh(mode);
510 if (helper->mode_valid(connector, mode) != MODE_OK || 511 if (helper->mode_valid(connector, mode) != MODE_OK ||
511 (mode->flags & DRM_MODE_FLAG_INTERLACE)) 512 (mode->flags & DRM_MODE_FLAG_INTERLACE))
512 continue; 513 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d75..764c15d537ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,7 @@
32#include "nouveau_hw.h" 32#include "nouveau_hw.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nv50_display.h"
35 36
36static void 37static void
37nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 38nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
61}; 62};
62 63
63int 64int
64nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, 65nouveau_framebuffer_init(struct drm_device *dev,
65 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo) 66 struct nouveau_framebuffer *nv_fb,
67 struct drm_mode_fb_cmd *mode_cmd,
68 struct nouveau_bo *nvbo)
66{ 69{
70 struct drm_nouveau_private *dev_priv = dev->dev_private;
71 struct drm_framebuffer *fb = &nv_fb->base;
67 int ret; 72 int ret;
68 73
69 ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs); 74 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
70 if (ret) { 75 if (ret) {
71 return ret; 76 return ret;
72 } 77 }
73 78
74 drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd); 79 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
75 nouveau_fb->nvbo = nvbo; 80 nv_fb->nvbo = nvbo;
81
82 if (dev_priv->card_type >= NV_50) {
83 u32 tile_flags = nouveau_bo_tile_layout(nvbo);
84 if (tile_flags == 0x7a00 ||
85 tile_flags == 0xfe00)
86 nv_fb->r_dma = NvEvoFB32;
87 else
88 if (tile_flags == 0x7000)
89 nv_fb->r_dma = NvEvoFB16;
90 else
91 nv_fb->r_dma = NvEvoVRAM_LP;
92
93 switch (fb->depth) {
94 case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
95 case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
96 case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
97 case 24:
98 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
99 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
100 default:
101 NV_ERROR(dev, "unknown depth %d\n", fb->depth);
102 return -EINVAL;
103 }
104
105 if (dev_priv->chipset == 0x50)
106 nv_fb->r_format |= (tile_flags << 8);
107
108 if (!tile_flags)
109 nv_fb->r_pitch = 0x00100000 | fb->pitch;
110 else {
111 u32 mode = nvbo->tile_mode;
112 if (dev_priv->card_type >= NV_C0)
113 mode >>= 4;
114 nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
115 }
116 }
117
76 return 0; 118 return 0;
77} 119}
78 120
@@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
182 struct nouveau_page_flip_state *s, 224 struct nouveau_page_flip_state *s,
183 struct nouveau_fence **pfence) 225 struct nouveau_fence **pfence)
184{ 226{
227 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
185 struct drm_device *dev = chan->dev; 228 struct drm_device *dev = chan->dev;
186 unsigned long flags; 229 unsigned long flags;
187 int ret; 230 int ret;
@@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
201 if (ret) 244 if (ret)
202 goto fail; 245 goto fail;
203 246
204 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 247 if (dev_priv->card_type < NV_C0)
205 OUT_RING(chan, 0); 248 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
206 FIRE_RING(chan); 249 else
250 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
251 OUT_RING (chan, 0);
252 FIRE_RING (chan);
207 253
208 ret = nouveau_fence_new(chan, pfence, true); 254 ret = nouveau_fence_new(chan, pfence, true);
209 if (ret) 255 if (ret)
@@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
244 290
245 /* Initialize a page flip struct */ 291 /* Initialize a page flip struct */
246 *s = (struct nouveau_page_flip_state) 292 *s = (struct nouveau_page_flip_state)
247 { { }, s->event, nouveau_crtc(crtc)->index, 293 { { }, event, nouveau_crtc(crtc)->index,
248 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, 294 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
249 new_bo->bo.offset }; 295 new_bo->bo.offset };
250 296
@@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
255 mutex_lock(&chan->mutex); 301 mutex_lock(&chan->mutex);
256 302
257 /* Emit a page flip */ 303 /* Emit a page flip */
304 if (dev_priv->card_type >= NV_50) {
305 ret = nv50_display_flip_next(crtc, fb, chan);
306 if (ret) {
307 nouveau_channel_put(&chan);
308 goto fail_unreserve;
309 }
310 }
311
258 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 312 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
259 nouveau_channel_put(&chan); 313 nouveau_channel_put(&chan);
260 if (ret) 314 if (ret)
@@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
305 } 359 }
306 360
307 list_del(&s->head); 361 list_del(&s->head);
308 *ps = *s; 362 if (ps)
363 *ps = *s;
309 kfree(s); 364 kfree(s);
310 365
311 spin_unlock_irqrestore(&dev->event_lock, flags); 366 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..1ef39be996ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -96,13 +96,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
96 OUT_RING(chan, 0); 96 OUT_RING(chan, 0);
97 97
98 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */ 98 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
99 ret = RING_SPACE(chan, 4); 99 ret = RING_SPACE(chan, 6);
100 if (ret) 100 if (ret)
101 return ret; 101 return ret;
102 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); 102 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
103 OUT_RING(chan, NvM2MF); 103 OUT_RING (chan, NvM2MF);
104 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); 104 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
105 OUT_RING(chan, NvNotify0); 105 OUT_RING (chan, NvNotify0);
106 OUT_RING (chan, chan->vram_handle);
107 OUT_RING (chan, chan->gart_handle);
106 108
107 /* Sit back and pray the channel works.. */ 109 /* Sit back and pray the channel works.. */
108 FIRE_RING(chan); 110 FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c36f1763feaa..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -61,8 +61,6 @@ enum {
61 NvM2MF = 0x80000001, 61 NvM2MF = 0x80000001,
62 NvDmaFB = 0x80000002, 62 NvDmaFB = 0x80000002,
63 NvDmaTT = 0x80000003, 63 NvDmaTT = 0x80000003,
64 NvDmaVRAM = 0x80000004,
65 NvDmaGART = 0x80000005,
66 NvNotify0 = 0x80000006, 64 NvNotify0 = 0x80000006,
67 Nv2D = 0x80000007, 65 Nv2D = 0x80000007,
68 NvCtxSurf2D = 0x80000008, 66 NvCtxSurf2D = 0x80000008,
@@ -73,12 +71,15 @@ enum {
73 NvImageBlit = 0x8000000d, 71 NvImageBlit = 0x8000000d,
74 NvSw = 0x8000000e, 72 NvSw = 0x8000000e,
75 NvSema = 0x8000000f, 73 NvSema = 0x8000000f,
74 NvEvoSema0 = 0x80000010,
75 NvEvoSema1 = 0x80000011,
76 76
77 /* G80+ display objects */ 77 /* G80+ display objects */
78 NvEvoVRAM = 0x01000000, 78 NvEvoVRAM = 0x01000000,
79 NvEvoFB16 = 0x01000001, 79 NvEvoFB16 = 0x01000001,
80 NvEvoFB32 = 0x01000002, 80 NvEvoFB32 = 0x01000002,
81 NvEvoVRAM_LP = 0x01000003 81 NvEvoVRAM_LP = 0x01000003,
82 NvEvoSync = 0xcafe0000
82}; 83};
83 84
84#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 85#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 38d599554bce..7beb82a0315d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
175{ 175{
176 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 176 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
177 struct drm_device *dev = encoder->dev; 177 struct drm_device *dev = encoder->dev;
178 struct bit_displayport_encoder_table_entry *dpse;
179 struct bit_displayport_encoder_table *dpe; 178 struct bit_displayport_encoder_table *dpe;
180 int ret, i, dpe_headerlen, vs = 0, pre = 0; 179 int ret, i, dpe_headerlen, vs = 0, pre = 0;
181 uint8_t request[2]; 180 uint8_t request[2];
@@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
183 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 182 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
184 if (!dpe) 183 if (!dpe)
185 return false; 184 return false;
186 dpse = (void *)((char *)dpe + dpe_headerlen);
187 185
188 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2); 186 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
189 if (ret) 187 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..00aff226397d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
57#include "nouveau_util.h" 57#include "nouveau_util.h"
58 58
59struct nouveau_grctx; 59struct nouveau_grctx;
60struct nouveau_vram; 60struct nouveau_mem;
61#include "nouveau_vm.h" 61#include "nouveau_vm.h"
62 62
63#define MAX_NUM_DCB_ENTRIES 16 63#define MAX_NUM_DCB_ENTRIES 16
@@ -65,13 +65,16 @@ struct nouveau_vram;
65#define NOUVEAU_MAX_CHANNEL_NR 128 65#define NOUVEAU_MAX_CHANNEL_NR 128
66#define NOUVEAU_MAX_TILE_NR 15 66#define NOUVEAU_MAX_TILE_NR 15
67 67
68struct nouveau_vram { 68struct nouveau_mem {
69 struct drm_device *dev; 69 struct drm_device *dev;
70 70
71 struct nouveau_vma bar_vma; 71 struct nouveau_vma bar_vma;
72 struct nouveau_vma tmp_vma;
72 u8 page_shift; 73 u8 page_shift;
73 74
75 struct drm_mm_node *tag;
74 struct list_head regions; 76 struct list_head regions;
77 dma_addr_t *pages;
75 u32 memtype; 78 u32 memtype;
76 u64 offset; 79 u64 offset;
77 u64 size; 80 u64 size;
@@ -90,6 +93,7 @@ struct nouveau_tile_reg {
90struct nouveau_bo { 93struct nouveau_bo {
91 struct ttm_buffer_object bo; 94 struct ttm_buffer_object bo;
92 struct ttm_placement placement; 95 struct ttm_placement placement;
96 u32 valid_domains;
93 u32 placements[3]; 97 u32 placements[3];
94 u32 busy_placements[3]; 98 u32 busy_placements[3];
95 struct ttm_bo_kmap_obj kmap; 99 struct ttm_bo_kmap_obj kmap;
@@ -104,8 +108,6 @@ struct nouveau_bo {
104 struct nouveau_channel *channel; 108 struct nouveau_channel *channel;
105 109
106 struct nouveau_vma vma; 110 struct nouveau_vma vma;
107 bool mappable;
108 bool no_vm;
109 111
110 uint32_t tile_mode; 112 uint32_t tile_mode;
111 uint32_t tile_flags; 113 uint32_t tile_flags;
@@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
387}; 389};
388 390
389struct nouveau_display_engine { 391struct nouveau_display_engine {
392 void *priv;
390 int (*early_init)(struct drm_device *); 393 int (*early_init)(struct drm_device *);
391 void (*late_takedown)(struct drm_device *); 394 void (*late_takedown)(struct drm_device *);
392 int (*create)(struct drm_device *); 395 int (*create)(struct drm_device *);
@@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
509struct nouveau_vram_engine { 512struct nouveau_vram_engine {
510 int (*init)(struct drm_device *); 513 int (*init)(struct drm_device *);
511 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 514 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
512 u32 type, struct nouveau_vram **); 515 u32 type, struct nouveau_mem **);
513 void (*put)(struct drm_device *, struct nouveau_vram **); 516 void (*put)(struct drm_device *, struct nouveau_mem **);
514 517
515 bool (*flags_valid)(struct drm_device *, u32 tile_flags); 518 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
516}; 519};
@@ -652,8 +655,6 @@ struct drm_nouveau_private {
652 /* interrupt handling */ 655 /* interrupt handling */
653 void (*irq_handler[32])(struct drm_device *); 656 void (*irq_handler[32])(struct drm_device *);
654 bool msi_enabled; 657 bool msi_enabled;
655 struct workqueue_struct *wq;
656 struct work_struct irq_work;
657 658
658 struct list_head vbl_waiting; 659 struct list_head vbl_waiting;
659 660
@@ -691,15 +692,22 @@ struct drm_nouveau_private {
691 struct { 692 struct {
692 enum { 693 enum {
693 NOUVEAU_GART_NONE = 0, 694 NOUVEAU_GART_NONE = 0,
694 NOUVEAU_GART_AGP, 695 NOUVEAU_GART_AGP, /* AGP */
695 NOUVEAU_GART_SGDMA 696 NOUVEAU_GART_PDMA, /* paged dma object */
697 NOUVEAU_GART_HW /* on-chip gart/vm */
696 } type; 698 } type;
697 uint64_t aper_base; 699 uint64_t aper_base;
698 uint64_t aper_size; 700 uint64_t aper_size;
699 uint64_t aper_free; 701 uint64_t aper_free;
700 702
703 struct ttm_backend_func *func;
704
705 struct {
706 struct page *page;
707 dma_addr_t addr;
708 } dummy;
709
701 struct nouveau_gpuobj *sg_ctxdma; 710 struct nouveau_gpuobj *sg_ctxdma;
702 struct nouveau_vma vma;
703 } gart_info; 711 } gart_info;
704 712
705 /* nv10-nv40 tiling regions */ 713 /* nv10-nv40 tiling regions */
@@ -740,14 +748,6 @@ struct drm_nouveau_private {
740 748
741 struct backlight_device *backlight; 749 struct backlight_device *backlight;
742 750
743 struct nouveau_channel *evo;
744 u32 evo_alloc;
745 struct {
746 struct dcb_entry *dcb;
747 u16 script;
748 u32 pclk;
749 } evo_irq;
750
751 struct { 751 struct {
752 struct dentry *channel_root; 752 struct dentry *channel_root;
753 } debugfs; 753 } debugfs;
@@ -847,6 +847,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
847 struct nouveau_tile_reg *tile, 847 struct nouveau_tile_reg *tile,
848 struct nouveau_fence *fence); 848 struct nouveau_fence *fence);
849extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 849extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
850extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
850 851
851/* nouveau_notifier.c */ 852/* nouveau_notifier.c */
852extern int nouveau_notifier_init_channel(struct nouveau_channel *); 853extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -1294,7 +1295,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
1294extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, 1295extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
1295 int size, int align, uint32_t flags, 1296 int size, int align, uint32_t flags,
1296 uint32_t tile_mode, uint32_t tile_flags, 1297 uint32_t tile_mode, uint32_t tile_flags,
1297 bool no_vm, bool mappable, struct nouveau_bo **); 1298 struct nouveau_bo **);
1298extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); 1299extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1299extern int nouveau_bo_unpin(struct nouveau_bo *); 1300extern int nouveau_bo_unpin(struct nouveau_bo *);
1300extern int nouveau_bo_map(struct nouveau_bo *); 1301extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1355,9 +1356,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1355 1356
1356/* nouveau_gem.c */ 1357/* nouveau_gem.c */
1357extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, 1358extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
1358 int size, int align, uint32_t flags, 1359 int size, int align, uint32_t domain,
1359 uint32_t tile_mode, uint32_t tile_flags, 1360 uint32_t tile_mode, uint32_t tile_flags,
1360 bool no_vm, bool mappable, struct nouveau_bo **); 1361 struct nouveau_bo **);
1361extern int nouveau_gem_object_new(struct drm_gem_object *); 1362extern int nouveau_gem_object_new(struct drm_gem_object *);
1362extern void nouveau_gem_object_del(struct drm_gem_object *); 1363extern void nouveau_gem_object_del(struct drm_gem_object *);
1363extern int nouveau_gem_ioctl_new(struct drm_device *, void *, 1364extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index d432134b71e0..a3a88ad00f86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,9 @@
30struct nouveau_framebuffer { 30struct nouveau_framebuffer {
31 struct drm_framebuffer base; 31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo; 32 struct nouveau_bo *nvbo;
33 u32 r_dma;
34 u32 r_format;
35 u32 r_pitch;
33}; 36};
34 37
35static inline struct nouveau_framebuffer * 38static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 60769d2f9a66..889c4454682e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
296 size = mode_cmd.pitch * mode_cmd.height; 296 size = mode_cmd.pitch * mode_cmd.height;
297 size = roundup(size, PAGE_SIZE); 297 size = roundup(size, PAGE_SIZE);
298 298
299 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 299 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
300 0, 0x0000, false, true, &nvbo); 300 NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
301 if (ret) { 301 if (ret) {
302 NV_ERROR(dev, "failed to allocate framebuffer\n"); 302 NV_ERROR(dev, "failed to allocate framebuffer\n");
303 goto out; 303 goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 221b8462ea37..a244702bb227 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,8 +32,7 @@
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33 33
34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \ 35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
36 nouveau_private(dev)->card_type < NV_C0)
37 36
38struct nouveau_fence { 37struct nouveau_fence {
39 struct nouveau_channel *channel; 38 struct nouveau_channel *channel;
@@ -259,11 +258,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
259} 258}
260 259
261static struct nouveau_semaphore * 260static struct nouveau_semaphore *
262alloc_semaphore(struct drm_device *dev) 261semaphore_alloc(struct drm_device *dev)
263{ 262{
264 struct drm_nouveau_private *dev_priv = dev->dev_private; 263 struct drm_nouveau_private *dev_priv = dev->dev_private;
265 struct nouveau_semaphore *sema; 264 struct nouveau_semaphore *sema;
266 int ret; 265 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
266 int ret, i;
267 267
268 if (!USE_SEMA(dev)) 268 if (!USE_SEMA(dev))
269 return NULL; 269 return NULL;
@@ -277,9 +277,9 @@ alloc_semaphore(struct drm_device *dev)
277 goto fail; 277 goto fail;
278 278
279 spin_lock(&dev_priv->fence.lock); 279 spin_lock(&dev_priv->fence.lock);
280 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); 280 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
281 if (sema->mem) 281 if (sema->mem)
282 sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); 282 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
283 spin_unlock(&dev_priv->fence.lock); 283 spin_unlock(&dev_priv->fence.lock);
284 284
285 if (!sema->mem) 285 if (!sema->mem)
@@ -287,7 +287,8 @@ alloc_semaphore(struct drm_device *dev)
287 287
288 kref_init(&sema->ref); 288 kref_init(&sema->ref);
289 sema->dev = dev; 289 sema->dev = dev;
290 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0); 290 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
291 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
291 292
292 return sema; 293 return sema;
293fail: 294fail:
@@ -296,7 +297,7 @@ fail:
296} 297}
297 298
298static void 299static void
299free_semaphore(struct kref *ref) 300semaphore_free(struct kref *ref)
300{ 301{
301 struct nouveau_semaphore *sema = 302 struct nouveau_semaphore *sema =
302 container_of(ref, struct nouveau_semaphore, ref); 303 container_of(ref, struct nouveau_semaphore, ref);
@@ -318,61 +319,107 @@ semaphore_work(void *priv, bool signalled)
318 if (unlikely(!signalled)) 319 if (unlikely(!signalled))
319 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1); 320 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
320 321
321 kref_put(&sema->ref, free_semaphore); 322 kref_put(&sema->ref, semaphore_free);
322} 323}
323 324
324static int 325static int
325emit_semaphore(struct nouveau_channel *chan, int method, 326semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
326 struct nouveau_semaphore *sema)
327{ 327{
328 struct drm_nouveau_private *dev_priv = sema->dev->dev_private; 328 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
329 struct nouveau_fence *fence; 329 struct nouveau_fence *fence = NULL;
330 bool smart = (dev_priv->card_type >= NV_50);
331 int ret; 330 int ret;
332 331
333 ret = RING_SPACE(chan, smart ? 8 : 4); 332 if (dev_priv->chipset < 0x84) {
333 ret = RING_SPACE(chan, 3);
334 if (ret)
335 return ret;
336
337 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
338 OUT_RING (chan, sema->mem->start);
339 OUT_RING (chan, 1);
340 } else
341 if (dev_priv->chipset < 0xc0) {
342 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
343 u64 offset = vma->offset + sema->mem->start;
344
345 ret = RING_SPACE(chan, 5);
346 if (ret)
347 return ret;
348
349 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
350 OUT_RING (chan, upper_32_bits(offset));
351 OUT_RING (chan, lower_32_bits(offset));
352 OUT_RING (chan, 1);
353 OUT_RING (chan, 1); /* ACQUIRE_EQ */
354 } else {
355 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
356 u64 offset = vma->offset + sema->mem->start;
357
358 ret = RING_SPACE(chan, 5);
359 if (ret)
360 return ret;
361
362 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
363 OUT_RING (chan, upper_32_bits(offset));
364 OUT_RING (chan, lower_32_bits(offset));
365 OUT_RING (chan, 1);
366 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
367 }
368
369 /* Delay semaphore destruction until its work is done */
370 ret = nouveau_fence_new(chan, &fence, true);
334 if (ret) 371 if (ret)
335 return ret; 372 return ret;
336 373
337 if (smart) { 374 kref_get(&sema->ref);
338 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); 375 nouveau_fence_work(fence, semaphore_work, sema);
339 OUT_RING(chan, NvSema); 376 nouveau_fence_unref(&fence);
340 } 377 return 0;
341 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); 378}
342 OUT_RING(chan, sema->mem->start); 379
343 380static int
344 if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) { 381semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
345 /* 382{
346 * NV50 tries to be too smart and context-switch 383 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
347 * between semaphores instead of doing a "first come, 384 struct nouveau_fence *fence = NULL;
348 * first served" strategy like previous cards 385 int ret;
349 * do. 386
350 * 387 if (dev_priv->chipset < 0x84) {
351 * That's bad because the ACQUIRE latency can get as 388 ret = RING_SPACE(chan, 4);
352 * large as the PFIFO context time slice in the 389 if (ret)
353 * typical DRI2 case where you have several 390 return ret;
354 * outstanding semaphores at the same moment. 391
355 * 392 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
356 * If we're going to ACQUIRE, force the card to 393 OUT_RING (chan, sema->mem->start);
357 * context switch before, just in case the matching 394 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
358 * RELEASE is already scheduled to be executed in 395 OUT_RING (chan, 1);
359 * another channel. 396 } else
360 */ 397 if (dev_priv->chipset < 0xc0) {
361 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); 398 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
362 OUT_RING(chan, 0); 399 u64 offset = vma->offset + sema->mem->start;
363 }
364 400
365 BEGIN_RING(chan, NvSubSw, method, 1); 401 ret = RING_SPACE(chan, 5);
366 OUT_RING(chan, 1); 402 if (ret)
367 403 return ret;
368 if (smart && method == NV_SW_SEMAPHORE_RELEASE) { 404
369 /* 405 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
370 * Force the card to context switch, there may be 406 OUT_RING (chan, upper_32_bits(offset));
371 * another channel waiting for the semaphore we just 407 OUT_RING (chan, lower_32_bits(offset));
372 * released. 408 OUT_RING (chan, 1);
373 */ 409 OUT_RING (chan, 2); /* RELEASE */
374 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); 410 } else {
375 OUT_RING(chan, 0); 411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
412 u64 offset = vma->offset + sema->mem->start;
413
414 ret = RING_SPACE(chan, 5);
415 if (ret)
416 return ret;
417
418 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
419 OUT_RING (chan, upper_32_bits(offset));
420 OUT_RING (chan, lower_32_bits(offset));
421 OUT_RING (chan, 1);
422 OUT_RING (chan, 0x1002); /* RELEASE */
376 } 423 }
377 424
378 /* Delay semaphore destruction until its work is done */ 425 /* Delay semaphore destruction until its work is done */
@@ -383,7 +430,6 @@ emit_semaphore(struct nouveau_channel *chan, int method,
383 kref_get(&sema->ref); 430 kref_get(&sema->ref);
384 nouveau_fence_work(fence, semaphore_work, sema); 431 nouveau_fence_work(fence, semaphore_work, sema);
385 nouveau_fence_unref(&fence); 432 nouveau_fence_unref(&fence);
386
387 return 0; 433 return 0;
388} 434}
389 435
@@ -400,7 +446,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
400 nouveau_fence_signalled(fence))) 446 nouveau_fence_signalled(fence)))
401 goto out; 447 goto out;
402 448
403 sema = alloc_semaphore(dev); 449 sema = semaphore_alloc(dev);
404 if (!sema) { 450 if (!sema) {
405 /* Early card or broken userspace, fall back to 451 /* Early card or broken userspace, fall back to
406 * software sync. */ 452 * software sync. */
@@ -418,17 +464,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
418 } 464 }
419 465
420 /* Make wchan wait until it gets signalled */ 466 /* Make wchan wait until it gets signalled */
421 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); 467 ret = semaphore_acquire(wchan, sema);
422 if (ret) 468 if (ret)
423 goto out_unlock; 469 goto out_unlock;
424 470
425 /* Signal the semaphore from chan */ 471 /* Signal the semaphore from chan */
426 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); 472 ret = semaphore_release(chan, sema);
427 473
428out_unlock: 474out_unlock:
429 mutex_unlock(&chan->mutex); 475 mutex_unlock(&chan->mutex);
430out_unref: 476out_unref:
431 kref_put(&sema->ref, free_semaphore); 477 kref_put(&sema->ref, semaphore_free);
432out: 478out:
433 if (chan) 479 if (chan)
434 nouveau_channel_put_unlocked(&chan); 480 nouveau_channel_put_unlocked(&chan);
@@ -449,22 +495,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
449 struct nouveau_gpuobj *obj = NULL; 495 struct nouveau_gpuobj *obj = NULL;
450 int ret; 496 int ret;
451 497
498 if (dev_priv->card_type >= NV_C0)
499 goto out_initialised;
500
452 /* Create an NV_SW object for various sync purposes */ 501 /* Create an NV_SW object for various sync purposes */
453 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); 502 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
454 if (ret) 503 if (ret)
455 return ret; 504 return ret;
456 505
457 /* we leave subchannel empty for nvc0 */ 506 /* we leave subchannel empty for nvc0 */
458 if (dev_priv->card_type < NV_C0) { 507 ret = RING_SPACE(chan, 2);
459 ret = RING_SPACE(chan, 2); 508 if (ret)
460 if (ret) 509 return ret;
461 return ret; 510 BEGIN_RING(chan, NvSubSw, 0, 1);
462 BEGIN_RING(chan, NvSubSw, 0, 1); 511 OUT_RING(chan, NvSw);
463 OUT_RING(chan, NvSw);
464 }
465 512
466 /* Create a DMA object for the shared cross-channel sync area. */ 513 /* Create a DMA object for the shared cross-channel sync area. */
467 if (USE_SEMA(dev)) { 514 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
468 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 515 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
469 516
470 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 517 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -484,14 +531,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
484 return ret; 531 return ret;
485 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); 532 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
486 OUT_RING(chan, NvSema); 533 OUT_RING(chan, NvSema);
534 } else {
535 ret = RING_SPACE(chan, 2);
536 if (ret)
537 return ret;
538 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
539 OUT_RING (chan, chan->vram_handle); /* whole VM */
487 } 540 }
488 541
489 FIRE_RING(chan); 542 FIRE_RING(chan);
490 543
544out_initialised:
491 INIT_LIST_HEAD(&chan->fence.pending); 545 INIT_LIST_HEAD(&chan->fence.pending);
492 spin_lock_init(&chan->fence.lock); 546 spin_lock_init(&chan->fence.lock);
493 atomic_set(&chan->fence.last_sequence_irq, 0); 547 atomic_set(&chan->fence.last_sequence_irq, 0);
494
495 return 0; 548 return 0;
496} 549}
497 550
@@ -519,12 +572,13 @@ int
519nouveau_fence_init(struct drm_device *dev) 572nouveau_fence_init(struct drm_device *dev)
520{ 573{
521 struct drm_nouveau_private *dev_priv = dev->dev_private; 574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
522 int ret; 576 int ret;
523 577
524 /* Create a shared VRAM heap for cross-channel sync. */ 578 /* Create a shared VRAM heap for cross-channel sync. */
525 if (USE_SEMA(dev)) { 579 if (USE_SEMA(dev)) {
526 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 580 ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
527 0, 0, false, true, &dev_priv->fence.bo); 581 0, 0, &dev_priv->fence.bo);
528 if (ret) 582 if (ret)
529 return ret; 583 return ret;
530 584
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 506c508b7eda..3ce58d2222cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,19 +61,36 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
61 61
62int 62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, 63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64 int size, int align, uint32_t flags, uint32_t tile_mode, 64 int size, int align, uint32_t domain, uint32_t tile_mode,
65 uint32_t tile_flags, bool no_vm, bool mappable, 65 uint32_t tile_flags, struct nouveau_bo **pnvbo)
66 struct nouveau_bo **pnvbo)
67{ 66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_bo *nvbo; 68 struct nouveau_bo *nvbo;
69 u32 flags = 0;
69 int ret; 70 int ret;
70 71
72 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
73 flags |= TTM_PL_FLAG_VRAM;
74 if (domain & NOUVEAU_GEM_DOMAIN_GART)
75 flags |= TTM_PL_FLAG_TT;
76 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
77 flags |= TTM_PL_FLAG_SYSTEM;
78
71 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, 79 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
72 tile_flags, no_vm, mappable, pnvbo); 80 tile_flags, pnvbo);
73 if (ret) 81 if (ret)
74 return ret; 82 return ret;
75 nvbo = *pnvbo; 83 nvbo = *pnvbo;
76 84
85 /* we restrict allowed domains on nv50+ to only the types
86 * that were requested at creation time. not possibly on
87 * earlier chips without busting the ABI.
88 */
89 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
90 NOUVEAU_GEM_DOMAIN_GART;
91 if (dev_priv->card_type >= NV_50)
92 nvbo->valid_domains &= domain;
93
77 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 94 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
78 if (!nvbo->gem) { 95 if (!nvbo->gem) {
79 nouveau_bo_ref(NULL, pnvbo); 96 nouveau_bo_ref(NULL, pnvbo);
@@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
97 114
98 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 115 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
99 rep->offset = nvbo->bo.offset; 116 rep->offset = nvbo->bo.offset;
100 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; 117 rep->map_handle = nvbo->bo.addr_space_offset;
101 rep->tile_mode = nvbo->tile_mode; 118 rep->tile_mode = nvbo->tile_mode;
102 rep->tile_flags = nvbo->tile_flags; 119 rep->tile_flags = nvbo->tile_flags;
103 return 0; 120 return 0;
@@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
111 struct drm_nouveau_gem_new *req = data; 128 struct drm_nouveau_gem_new *req = data;
112 struct nouveau_bo *nvbo = NULL; 129 struct nouveau_bo *nvbo = NULL;
113 struct nouveau_channel *chan = NULL; 130 struct nouveau_channel *chan = NULL;
114 uint32_t flags = 0;
115 int ret = 0; 131 int ret = 0;
116 132
117 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) 133 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
118 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; 134 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
119 135
120 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
121 flags |= TTM_PL_FLAG_VRAM;
122 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
123 flags |= TTM_PL_FLAG_TT;
124 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
125 flags |= TTM_PL_FLAG_SYSTEM;
126
127 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { 136 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
128 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); 137 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
129 return -EINVAL; 138 return -EINVAL;
@@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
135 return PTR_ERR(chan); 144 return PTR_ERR(chan);
136 } 145 }
137 146
138 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, 147 ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
139 req->info.tile_mode, req->info.tile_flags, false, 148 req->info.domain, req->info.tile_mode,
140 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), 149 req->info.tile_flags, &nvbo);
141 &nvbo);
142 if (chan) 150 if (chan)
143 nouveau_channel_put(&chan); 151 nouveau_channel_put(&chan);
144 if (ret) 152 if (ret)
@@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
161{ 169{
162 struct nouveau_bo *nvbo = gem->driver_private; 170 struct nouveau_bo *nvbo = gem->driver_private;
163 struct ttm_buffer_object *bo = &nvbo->bo; 171 struct ttm_buffer_object *bo = &nvbo->bo;
164 uint32_t domains = valid_domains & 172 uint32_t domains = valid_domains & nvbo->valid_domains &
165 (write_domains ? write_domains : read_domains); 173 (write_domains ? write_domains : read_domains);
166 uint32_t pref_flags = 0, valid_flags = 0; 174 uint32_t pref_flags = 0, valid_flags = 0;
167 175
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 123969dd4f56..63b9040b5f30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
152{ 152{
153 struct drm_nouveau_private *dev_priv = dev->dev_private; 153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 154
155 nouveau_bo_unpin(dev_priv->vga_ram);
156 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 155 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
157 156
158 ttm_bo_device_release(&dev_priv->ttm.bdev); 157 ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
393 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 392 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
394 int ret, dma_bits; 393 int ret, dma_bits;
395 394
396 if (dev_priv->card_type >= NV_50 && 395 dma_bits = 32;
397 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 396 if (dev_priv->card_type >= NV_50) {
398 dma_bits = 40; 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
399 else 398 dma_bits = 40;
400 dma_bits = 32; 399 } else
400 if (drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset != 0x40 &&
402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
404 dma_bits = 39;
405 }
401 406
402 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); 407 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
403 if (ret) 408 if (ret)
@@ -455,13 +460,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
455 return ret; 460 return ret;
456 } 461 }
457 462
458 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 463 if (dev_priv->card_type < NV_50) {
459 0, 0, true, true, &dev_priv->vga_ram); 464 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
460 if (ret == 0) 465 0, 0, &dev_priv->vga_ram);
461 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); 466 if (ret == 0)
462 if (ret) { 467 ret = nouveau_bo_pin(dev_priv->vga_ram,
463 NV_WARN(dev, "failed to reserve VGA memory\n"); 468 TTM_PL_FLAG_VRAM);
464 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 469
470 if (ret) {
471 NV_WARN(dev, "failed to reserve VGA memory\n");
472 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
473 }
465 } 474 }
466 475
467 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), 476 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -666,13 +675,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
666{ 675{
667 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 676 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
668 struct nouveau_mm *mm; 677 struct nouveau_mm *mm;
669 u32 b_size; 678 u64 size, block, rsvd;
670 int ret; 679 int ret;
671 680
672 p_size = (p_size << PAGE_SHIFT) >> 12; 681 rsvd = (256 * 1024); /* vga memory */
673 b_size = dev_priv->vram_rblock_size >> 12; 682 size = (p_size << PAGE_SHIFT) - rsvd;
683 block = dev_priv->vram_rblock_size;
674 684
675 ret = nouveau_mm_init(&mm, 0, p_size, b_size); 685 ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
676 if (ret) 686 if (ret)
677 return ret; 687 return ret;
678 688
@@ -700,9 +710,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
700{ 710{
701 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 711 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
702 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 712 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
713 struct nouveau_mem *node = mem->mm_node;
703 struct drm_device *dev = dev_priv->dev; 714 struct drm_device *dev = dev_priv->dev;
704 715
705 vram->put(dev, (struct nouveau_vram **)&mem->mm_node); 716 if (node->tmp_vma.node) {
717 nouveau_vm_unmap(&node->tmp_vma);
718 nouveau_vm_put(&node->tmp_vma);
719 }
720
721 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
706} 722}
707 723
708static int 724static int
@@ -715,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
715 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 731 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
716 struct drm_device *dev = dev_priv->dev; 732 struct drm_device *dev = dev_priv->dev;
717 struct nouveau_bo *nvbo = nouveau_bo(bo); 733 struct nouveau_bo *nvbo = nouveau_bo(bo);
718 struct nouveau_vram *node; 734 struct nouveau_mem *node;
719 u32 size_nc = 0; 735 u32 size_nc = 0;
720 int ret; 736 int ret;
721 737
@@ -724,7 +740,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
724 740
725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 741 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
726 mem->page_alignment << PAGE_SHIFT, size_nc, 742 mem->page_alignment << PAGE_SHIFT, size_nc,
727 (nvbo->tile_flags >> 8) & 0xff, &node); 743 (nvbo->tile_flags >> 8) & 0x3ff, &node);
728 if (ret) 744 if (ret)
729 return ret; 745 return ret;
730 746
@@ -769,3 +785,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
769 nouveau_vram_manager_del, 785 nouveau_vram_manager_del,
770 nouveau_vram_manager_debug 786 nouveau_vram_manager_debug
771}; 787};
788
789static int
790nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
791{
792 return 0;
793}
794
795static int
796nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
797{
798 return 0;
799}
800
801static void
802nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
803 struct ttm_mem_reg *mem)
804{
805 struct nouveau_mem *node = mem->mm_node;
806
807 if (node->tmp_vma.node) {
808 nouveau_vm_unmap(&node->tmp_vma);
809 nouveau_vm_put(&node->tmp_vma);
810 }
811 mem->mm_node = NULL;
812}
813
814static int
815nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
816 struct ttm_buffer_object *bo,
817 struct ttm_placement *placement,
818 struct ttm_mem_reg *mem)
819{
820 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
821 struct nouveau_bo *nvbo = nouveau_bo(bo);
822 struct nouveau_vma *vma = &nvbo->vma;
823 struct nouveau_vm *vm = vma->vm;
824 struct nouveau_mem *node;
825 int ret;
826
827 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
828 dev_priv->gart_info.aper_size))
829 return -ENOMEM;
830
831 node = kzalloc(sizeof(*node), GFP_KERNEL);
832 if (!node)
833 return -ENOMEM;
834
835 /* This node must be for evicting large-paged VRAM
836 * to system memory. Due to a nv50 limitation of
837 * not being able to mix large/small pages within
838 * the same PDE, we need to create a temporary
839 * small-paged VMA for the eviction.
840 */
841 if (vma->node->type != vm->spg_shift) {
842 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
843 vm->spg_shift, NV_MEM_ACCESS_RW,
844 &node->tmp_vma);
845 if (ret) {
846 kfree(node);
847 return ret;
848 }
849 }
850
851 node->page_shift = nvbo->vma.node->type;
852 mem->mm_node = node;
853 mem->start = 0;
854 return 0;
855}
856
857void
858nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
859{
860}
861
862const struct ttm_mem_type_manager_func nouveau_gart_manager = {
863 nouveau_gart_manager_init,
864 nouveau_gart_manager_fini,
865 nouveau_gart_manager_new,
866 nouveau_gart_manager_del,
867 nouveau_gart_manager_debug
868};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691c..1f7483aae9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
53 53
54int nv50_vram_init(struct drm_device *); 54int nv50_vram_init(struct drm_device *);
55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, 55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
56 u32 memtype, struct nouveau_vram **); 56 u32 memtype, struct nouveau_mem **);
57void nv50_vram_del(struct drm_device *, struct nouveau_vram **); 57void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
58bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); 58bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
59 59
60int nvc0_vram_init(struct drm_device *); 60int nvc0_vram_init(struct drm_device *);
61int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin, 61int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
62 u32 memtype, struct nouveau_vram **); 62 u32 memtype, struct nouveau_mem **);
63bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags); 63bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
64 64
65#endif 65#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..a86f27655fc4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -39,12 +39,11 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
39 int ret; 39 int ret;
40 40
41 if (nouveau_vram_notify) 41 if (nouveau_vram_notify)
42 flags = TTM_PL_FLAG_VRAM; 42 flags = NOUVEAU_GEM_DOMAIN_VRAM;
43 else 43 else
44 flags = TTM_PL_FLAG_TT; 44 flags = NOUVEAU_GEM_DOMAIN_GART;
45 45
46 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 46 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
47 0, 0x0000, false, true, &ntfy);
48 if (ret) 47 if (ret)
49 return ret; 48 return ret;
50 49
@@ -99,6 +98,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 98 int size, uint32_t *b_offset)
100{ 99{
101 struct drm_device *dev = chan->dev; 100 struct drm_device *dev = chan->dev;
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 struct nouveau_gpuobj *nobj = NULL; 102 struct nouveau_gpuobj *nobj = NULL;
103 struct drm_mm_node *mem; 103 struct drm_mm_node *mem;
104 uint32_t offset; 104 uint32_t offset;
@@ -112,11 +112,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
112 return -ENOMEM; 112 return -ENOMEM;
113 } 113 }
114 114
115 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) 115 if (dev_priv->card_type < NV_50) {
116 target = NV_MEM_TARGET_VRAM; 116 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
117 else 117 target = NV_MEM_TARGET_VRAM;
118 target = NV_MEM_TARGET_GART; 118 else
119 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 119 target = NV_MEM_TARGET_GART;
120 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
121 } else {
122 target = NV_MEM_TARGET_VM;
123 offset = chan->notifier_bo->vma.offset;
124 }
120 offset += mem->start; 125 offset += mem->start;
121 126
122 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, 127 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 03adfe4c7665..4f00c87ed86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -36,6 +36,7 @@
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_ramht.h" 37#include "nouveau_ramht.h"
38#include "nouveau_vm.h" 38#include "nouveau_vm.h"
39#include "nv50_display.h"
39 40
40struct nouveau_gpuobj_method { 41struct nouveau_gpuobj_method {
41 struct list_head head; 42 struct list_head head;
@@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
490 } 491 }
491 492
492 if (target == NV_MEM_TARGET_GART) { 493 if (target == NV_MEM_TARGET_GART) {
493 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 494 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
494 target = NV_MEM_TARGET_PCI_NOSNOOP; 495
495 base += dev_priv->gart_info.aper_base; 496 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
496 } else 497 if (base == 0) {
497 if (base != 0) { 498 nouveau_gpuobj_ref(gart, pobj);
498 base = nouveau_sgdma_get_physical(dev, base); 499 return 0;
500 }
501
502 base = nouveau_sgdma_get_physical(dev, base);
499 target = NV_MEM_TARGET_PCI; 503 target = NV_MEM_TARGET_PCI;
500 } else { 504 } else {
501 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj); 505 base += dev_priv->gart_info.aper_base;
502 return 0; 506 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
507 target = NV_MEM_TARGET_PCI_NOSNOOP;
508 else
509 target = NV_MEM_TARGET_PCI;
503 } 510 }
504 } 511 }
505 512
@@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
776 struct drm_device *dev = chan->dev; 783 struct drm_device *dev = chan->dev;
777 struct drm_nouveau_private *dev_priv = dev->dev_private; 784 struct drm_nouveau_private *dev_priv = dev->dev_private;
778 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 785 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
779 int ret; 786 int ret, i;
780 787
781 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 788 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
782 789
@@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
841 nouveau_gpuobj_ref(NULL, &ramht); 848 nouveau_gpuobj_ref(NULL, &ramht);
842 if (ret) 849 if (ret)
843 return ret; 850 return ret;
851
852 /* dma objects for display sync channel semaphore blocks */
853 for (i = 0; i < 2; i++) {
854 struct nouveau_gpuobj *sem = NULL;
855 struct nv50_display_crtc *dispc =
856 &nv50_display(dev)->crtc[i];
857 u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
858
859 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
860 NV_MEM_ACCESS_RW,
861 NV_MEM_TARGET_VRAM, &sem);
862 if (ret)
863 return ret;
864
865 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
866 nouveau_gpuobj_ref(NULL, &sem);
867 if (ret)
868 return ret;
869 }
844 } 870 }
845 871
846 /* VRAM ctxdma */ 872 /* VRAM ctxdma */
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index f05c0cddfeca..4399e2f34db4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
544 struct nouveau_pm_level *perflvl; 544 struct nouveau_pm_level *perflvl;
545 545
546 if (pm->cur == &pm->boot) 546 if (!pm->cur || pm->cur == &pm->boot)
547 return; 547 return;
548 548
549 perflvl = pm->cur; 549 perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index bef3e6910418..a24a81f5a89e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); 114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else { 115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { 116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | chan->id; 117 ctx = (gpuobj->cinst << 10) |
118 (chan->id << 28) |
119 chan->id; /* HASH_TAG */
118 } else { 120 } else {
119 ctx = (gpuobj->cinst >> 4) | 121 ctx = (gpuobj->cinst >> 4) |
120 ((gpuobj->engine << 122 ((gpuobj->engine <<
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 07b115184b87..1205f0f345b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -89,8 +89,24 @@ nouveau_sgdma_clear(struct ttm_backend *be)
89 } 89 }
90} 90}
91 91
92static void
93nouveau_sgdma_destroy(struct ttm_backend *be)
94{
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96
97 if (be) {
98 NV_DEBUG(nvbe->dev, "\n");
99
100 if (nvbe) {
101 if (nvbe->pages)
102 be->func->clear(be);
103 kfree(nvbe);
104 }
105 }
106}
107
92static int 108static int
93nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 109nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
94{ 110{
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 111 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96 struct drm_device *dev = nvbe->dev; 112 struct drm_device *dev = nvbe->dev;
@@ -117,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
117} 133}
118 134
119static int 135static int
120nouveau_sgdma_unbind(struct ttm_backend *be) 136nv04_sgdma_unbind(struct ttm_backend *be)
121{ 137{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 138 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
123 struct drm_device *dev = nvbe->dev; 139 struct drm_device *dev = nvbe->dev;
@@ -140,59 +156,245 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
140 return 0; 156 return 0;
141} 157}
142 158
159static struct ttm_backend_func nv04_sgdma_backend = {
160 .populate = nouveau_sgdma_populate,
161 .clear = nouveau_sgdma_clear,
162 .bind = nv04_sgdma_bind,
163 .unbind = nv04_sgdma_unbind,
164 .destroy = nouveau_sgdma_destroy
165};
166
143static void 167static void
144nouveau_sgdma_destroy(struct ttm_backend *be) 168nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
169{
170 struct drm_device *dev = nvbe->dev;
171
172 nv_wr32(dev, 0x100810, 0x00000022);
173 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
174 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
175 nv_rd32(dev, 0x100810));
176 nv_wr32(dev, 0x100810, 0x00000000);
177}
178
179static int
180nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
145{ 181{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 182 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
183 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
184 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
185 dma_addr_t *list = nvbe->pages;
186 u32 pte = mem->start << 2;
187 u32 cnt = nvbe->nr_pages;
147 188
148 if (be) { 189 nvbe->offset = mem->start << PAGE_SHIFT;
149 NV_DEBUG(nvbe->dev, "\n");
150 190
151 if (nvbe) { 191 while (cnt--) {
152 if (nvbe->pages) 192 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
153 be->func->clear(be); 193 pte += 4;
154 kfree(nvbe); 194 }
195
196 nv41_sgdma_flush(nvbe);
197 nvbe->bound = true;
198 return 0;
199}
200
201static int
202nv41_sgdma_unbind(struct ttm_backend *be)
203{
204 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
205 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
206 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
207 u32 pte = (nvbe->offset >> 12) << 2;
208 u32 cnt = nvbe->nr_pages;
209
210 while (cnt--) {
211 nv_wo32(pgt, pte, 0x00000000);
212 pte += 4;
213 }
214
215 nv41_sgdma_flush(nvbe);
216 nvbe->bound = false;
217 return 0;
218}
219
220static struct ttm_backend_func nv41_sgdma_backend = {
221 .populate = nouveau_sgdma_populate,
222 .clear = nouveau_sgdma_clear,
223 .bind = nv41_sgdma_bind,
224 .unbind = nv41_sgdma_unbind,
225 .destroy = nouveau_sgdma_destroy
226};
227
228static void
229nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
230{
231 struct drm_device *dev = nvbe->dev;
232
233 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
234 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
235 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
236 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
237 nv_rd32(dev, 0x100808));
238 nv_wr32(dev, 0x100808, 0x00000000);
239}
240
241static void
242nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
243{
244 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
245 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
246 u32 pte, tmp[4];
247
248 pte = base >> 2;
249 base &= ~0x0000000f;
250
251 tmp[0] = nv_ro32(pgt, base + 0x0);
252 tmp[1] = nv_ro32(pgt, base + 0x4);
253 tmp[2] = nv_ro32(pgt, base + 0x8);
254 tmp[3] = nv_ro32(pgt, base + 0xc);
255 while (cnt--) {
256 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
257 switch (pte++ & 0x3) {
258 case 0:
259 tmp[0] &= ~0x07ffffff;
260 tmp[0] |= addr;
261 break;
262 case 1:
263 tmp[0] &= ~0xf8000000;
264 tmp[0] |= addr << 27;
265 tmp[1] &= ~0x003fffff;
266 tmp[1] |= addr >> 5;
267 break;
268 case 2:
269 tmp[1] &= ~0xffc00000;
270 tmp[1] |= addr << 22;
271 tmp[2] &= ~0x0001ffff;
272 tmp[2] |= addr >> 10;
273 break;
274 case 3:
275 tmp[2] &= ~0xfffe0000;
276 tmp[2] |= addr << 17;
277 tmp[3] &= ~0x00000fff;
278 tmp[3] |= addr >> 15;
279 break;
155 } 280 }
156 } 281 }
282
283 tmp[3] |= 0x40000000;
284
285 nv_wo32(pgt, base + 0x0, tmp[0]);
286 nv_wo32(pgt, base + 0x4, tmp[1]);
287 nv_wo32(pgt, base + 0x8, tmp[2]);
288 nv_wo32(pgt, base + 0xc, tmp[3]);
157} 289}
158 290
159static int 291static int
160nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 292nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
161{ 293{
162 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 294 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
163 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 295 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
296 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
297 dma_addr_t *list = nvbe->pages;
298 u32 pte = mem->start << 2, tmp[4];
299 u32 cnt = nvbe->nr_pages;
300 int i;
164 301
165 nvbe->offset = mem->start << PAGE_SHIFT; 302 nvbe->offset = mem->start << PAGE_SHIFT;
166 303
167 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset, 304 if (pte & 0x0000000c) {
168 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages); 305 u32 max = 4 - ((pte >> 2) & 0x3);
306 u32 part = (cnt > max) ? max : cnt;
307 nv44_sgdma_fill(pgt, list, pte, part);
308 pte += (part << 2);
309 list += part;
310 cnt -= part;
311 }
312
313 while (cnt >= 4) {
314 for (i = 0; i < 4; i++)
315 tmp[i] = *list++ >> 12;
316 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
317 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
318 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
319 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
320 pte += 0x10;
321 cnt -= 4;
322 }
323
324 if (cnt)
325 nv44_sgdma_fill(pgt, list, pte, cnt);
326
327 nv44_sgdma_flush(nvbe);
169 nvbe->bound = true; 328 nvbe->bound = true;
170 return 0; 329 return 0;
171} 330}
172 331
173static int 332static int
174nv50_sgdma_unbind(struct ttm_backend *be) 333nv44_sgdma_unbind(struct ttm_backend *be)
175{ 334{
176 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 335 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
177 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 336 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
337 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
338 u32 pte = (nvbe->offset >> 12) << 2;
339 u32 cnt = nvbe->nr_pages;
340
341 if (pte & 0x0000000c) {
342 u32 max = 4 - ((pte >> 2) & 0x3);
343 u32 part = (cnt > max) ? max : cnt;
344 nv44_sgdma_fill(pgt, NULL, pte, part);
345 pte += (part << 2);
346 cnt -= part;
347 }
178 348
179 if (!nvbe->bound) 349 while (cnt >= 4) {
180 return 0; 350 nv_wo32(pgt, pte + 0x0, 0x00000000);
351 nv_wo32(pgt, pte + 0x4, 0x00000000);
352 nv_wo32(pgt, pte + 0x8, 0x00000000);
353 nv_wo32(pgt, pte + 0xc, 0x00000000);
354 pte += 0x10;
355 cnt -= 4;
356 }
181 357
182 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset, 358 if (cnt)
183 nvbe->nr_pages << PAGE_SHIFT); 359 nv44_sgdma_fill(pgt, NULL, pte, cnt);
360
361 nv44_sgdma_flush(nvbe);
184 nvbe->bound = false; 362 nvbe->bound = false;
185 return 0; 363 return 0;
186} 364}
187 365
188static struct ttm_backend_func nouveau_sgdma_backend = { 366static struct ttm_backend_func nv44_sgdma_backend = {
189 .populate = nouveau_sgdma_populate, 367 .populate = nouveau_sgdma_populate,
190 .clear = nouveau_sgdma_clear, 368 .clear = nouveau_sgdma_clear,
191 .bind = nouveau_sgdma_bind, 369 .bind = nv44_sgdma_bind,
192 .unbind = nouveau_sgdma_unbind, 370 .unbind = nv44_sgdma_unbind,
193 .destroy = nouveau_sgdma_destroy 371 .destroy = nouveau_sgdma_destroy
194}; 372};
195 373
374static int
375nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
376{
377 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
378 struct nouveau_mem *node = mem->mm_node;
379 /* noop: bound in move_notify() */
380 node->pages = nvbe->pages;
381 nvbe->pages = (dma_addr_t *)node;
382 nvbe->bound = true;
383 return 0;
384}
385
386static int
387nv50_sgdma_unbind(struct ttm_backend *be)
388{
389 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
390 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
391 /* noop: unbound in move_notify() */
392 nvbe->pages = node->pages;
393 node->pages = NULL;
394 nvbe->bound = false;
395 return 0;
396}
397
196static struct ttm_backend_func nv50_sgdma_backend = { 398static struct ttm_backend_func nv50_sgdma_backend = {
197 .populate = nouveau_sgdma_populate, 399 .populate = nouveau_sgdma_populate,
198 .clear = nouveau_sgdma_clear, 400 .clear = nouveau_sgdma_clear,
@@ -213,10 +415,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
213 415
214 nvbe->dev = dev; 416 nvbe->dev = dev;
215 417
216 if (dev_priv->card_type < NV_50) 418 nvbe->backend.func = dev_priv->gart_info.func;
217 nvbe->backend.func = &nouveau_sgdma_backend;
218 else
219 nvbe->backend.func = &nv50_sgdma_backend;
220 return &nvbe->backend; 419 return &nvbe->backend;
221} 420}
222 421
@@ -225,21 +424,65 @@ nouveau_sgdma_init(struct drm_device *dev)
225{ 424{
226 struct drm_nouveau_private *dev_priv = dev->dev_private; 425 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_gpuobj *gpuobj = NULL; 426 struct nouveau_gpuobj *gpuobj = NULL;
228 uint32_t aper_size, obj_size; 427 u32 aper_size, align;
229 int i, ret; 428 int ret;
230 429
231 if (dev_priv->card_type < NV_50) { 430 if (dev_priv->card_type >= NV_50 ||
232 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) 431 dev_priv->ramin_rsvd_vram >= 2 * 1024 * 1024)
233 aper_size = 64 * 1024 * 1024; 432 aper_size = 512 * 1024 * 1024;
234 else 433 else
235 aper_size = 512 * 1024 * 1024; 434 aper_size = 64 * 1024 * 1024;
435
436 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
437 * christmas. The cards before it have them, the cards after
438 * it have them, why is NV44 so unloved?
439 */
440 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
441 if (!dev_priv->gart_info.dummy.page)
442 return -ENOMEM;
443
444 dev_priv->gart_info.dummy.addr =
445 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
446 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
447 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
448 NV_ERROR(dev, "error mapping dummy page\n");
449 __free_page(dev_priv->gart_info.dummy.page);
450 dev_priv->gart_info.dummy.page = NULL;
451 return -ENOMEM;
452 }
453
454 if (dev_priv->card_type >= NV_50) {
455 dev_priv->gart_info.aper_base = 0;
456 dev_priv->gart_info.aper_size = aper_size;
457 dev_priv->gart_info.type = NOUVEAU_GART_HW;
458 dev_priv->gart_info.func = &nv50_sgdma_backend;
459 } else
460 if (drm_pci_device_is_pcie(dev) &&
461 dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
462 if (nv44_graph_class(dev)) {
463 dev_priv->gart_info.func = &nv44_sgdma_backend;
464 align = 512 * 1024;
465 } else {
466 dev_priv->gart_info.func = &nv41_sgdma_backend;
467 align = 16;
468 }
236 469
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 470 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
238 obj_size += 8; /* ctxdma header */ 471 NVOBJ_FLAG_ZERO_ALLOC |
472 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
473 if (ret) {
474 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
475 return ret;
476 }
239 477
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, 478 dev_priv->gart_info.sg_ctxdma = gpuobj;
241 NVOBJ_FLAG_ZERO_ALLOC | 479 dev_priv->gart_info.aper_base = 0;
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj); 480 dev_priv->gart_info.aper_size = aper_size;
481 dev_priv->gart_info.type = NOUVEAU_GART_HW;
482 } else {
483 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
484 NVOBJ_FLAG_ZERO_ALLOC |
485 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) { 486 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); 487 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret; 488 return ret;
@@ -251,25 +494,14 @@ nouveau_sgdma_init(struct drm_device *dev)
251 (0 << 14) /* RW */ | 494 (0 << 14) /* RW */ |
252 (2 << 16) /* PCI */); 495 (2 << 16) /* PCI */);
253 nv_wo32(gpuobj, 4, aper_size - 1); 496 nv_wo32(gpuobj, 4, aper_size - 1);
254 for (i = 2; i < 2 + (aper_size >> 12); i++)
255 nv_wo32(gpuobj, i * 4, 0x00000000);
256 497
257 dev_priv->gart_info.sg_ctxdma = gpuobj; 498 dev_priv->gart_info.sg_ctxdma = gpuobj;
258 dev_priv->gart_info.aper_base = 0; 499 dev_priv->gart_info.aper_base = 0;
259 dev_priv->gart_info.aper_size = aper_size; 500 dev_priv->gart_info.aper_size = aper_size;
260 } else 501 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
261 if (dev_priv->chan_vm) { 502 dev_priv->gart_info.func = &nv04_sgdma_backend;
262 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
263 12, NV_MEM_ACCESS_RW,
264 &dev_priv->gart_info.vma);
265 if (ret)
266 return ret;
267
268 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
269 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
270 } 503 }
271 504
272 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
273 return 0; 505 return 0;
274} 506}
275 507
@@ -279,7 +511,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
279 struct drm_nouveau_private *dev_priv = dev->dev_private; 511 struct drm_nouveau_private *dev_priv = dev->dev_private;
280 512
281 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 513 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
282 nouveau_vm_put(&dev_priv->gart_info.vma); 514
515 if (dev_priv->gart_info.dummy.page) {
516 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
517 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
518 __free_page(dev_priv->gart_info.dummy.page);
519 dev_priv->gart_info.dummy.page = NULL;
520 }
283} 521}
284 522
285uint32_t 523uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 2148d01354da..05294910e135 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -544,7 +544,6 @@ static int
544nouveau_card_init_channel(struct drm_device *dev) 544nouveau_card_init_channel(struct drm_device *dev)
545{ 545{
546 struct drm_nouveau_private *dev_priv = dev->dev_private; 546 struct drm_nouveau_private *dev_priv = dev->dev_private;
547 struct nouveau_gpuobj *gpuobj = NULL;
548 int ret; 547 int ret;
549 548
550 ret = nouveau_channel_alloc(dev, &dev_priv->channel, 549 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev)
552 if (ret) 551 if (ret)
553 return ret; 552 return ret;
554 553
555 /* no dma objects on fermi... */
556 if (dev_priv->card_type >= NV_C0)
557 goto out_done;
558
559 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
560 0, dev_priv->vram_size,
561 NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
562 &gpuobj);
563 if (ret)
564 goto out_err;
565
566 ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
567 nouveau_gpuobj_ref(NULL, &gpuobj);
568 if (ret)
569 goto out_err;
570
571 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
572 0, dev_priv->gart_info.aper_size,
573 NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
574 &gpuobj);
575 if (ret)
576 goto out_err;
577
578 ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
579 nouveau_gpuobj_ref(NULL, &gpuobj);
580 if (ret)
581 goto out_err;
582
583out_done:
584 mutex_unlock(&dev_priv->channel->mutex); 554 mutex_unlock(&dev_priv->channel->mutex);
585 return 0; 555 return 0;
586
587out_err:
588 nouveau_channel_put(&dev_priv->channel);
589 return ret;
590} 556}
591 557
592static void nouveau_switcheroo_set_state(struct pci_dev *pdev, 558static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@@ -929,12 +895,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
929 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", 895 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
930 dev->pci_vendor, dev->pci_device, dev->pdev->class); 896 dev->pci_vendor, dev->pci_device, dev->pdev->class);
931 897
932 dev_priv->wq = create_workqueue("nouveau");
933 if (!dev_priv->wq) {
934 ret = -EINVAL;
935 goto err_priv;
936 }
937
938 /* resource 0 is mmio regs */ 898 /* resource 0 is mmio regs */
939 /* resource 1 is linear FB */ 899 /* resource 1 is linear FB */
940 /* resource 2 is RAMIN (mmio regs + 0x1000000) */ 900 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
@@ -947,7 +907,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
947 NV_ERROR(dev, "Unable to initialize the mmio mapping. " 907 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
948 "Please report your setup to " DRIVER_EMAIL "\n"); 908 "Please report your setup to " DRIVER_EMAIL "\n");
949 ret = -EINVAL; 909 ret = -EINVAL;
950 goto err_wq; 910 goto err_priv;
951 } 911 }
952 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", 912 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
953 (unsigned long long)mmio_start_offs); 913 (unsigned long long)mmio_start_offs);
@@ -1054,8 +1014,6 @@ err_ramin:
1054 iounmap(dev_priv->ramin); 1014 iounmap(dev_priv->ramin);
1055err_mmio: 1015err_mmio:
1056 iounmap(dev_priv->mmio); 1016 iounmap(dev_priv->mmio);
1057err_wq:
1058 destroy_workqueue(dev_priv->wq);
1059err_priv: 1017err_priv:
1060 kfree(dev_priv); 1018 kfree(dev_priv);
1061 dev->dev_private = NULL; 1019 dev->dev_private = NULL;
@@ -1126,7 +1084,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1126 getparam->value = 1; 1084 getparam->value = 1;
1127 break; 1085 break;
1128 case NOUVEAU_GETPARAM_HAS_PAGEFLIP: 1086 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
1129 getparam->value = (dev_priv->card_type < NV_50); 1087 getparam->value = 1;
1130 break; 1088 break;
1131 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1089 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1132 /* NV40 and NV50 versions are quite different, but register 1090 /* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 8d9968e1cba8..649b0413b09f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -239,11 +239,9 @@ static bool
239probe_monitoring_device(struct nouveau_i2c_chan *i2c, 239probe_monitoring_device(struct nouveau_i2c_chan *i2c,
240 struct i2c_board_info *info) 240 struct i2c_board_info *info)
241{ 241{
242 char modalias[16] = "i2c:";
243 struct i2c_client *client; 242 struct i2c_client *client;
244 243
245 strlcat(modalias, info->type, sizeof(modalias)); 244 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
246 request_module(modalias);
247 245
248 client = i2c_new_device(&i2c->adapter, info); 246 client = i2c_new_device(&i2c->adapter, info);
249 if (!client) 247 if (!client)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86b..62824c80bcb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
28#include "nouveau_vm.h" 28#include "nouveau_vm.h"
29 29
30void 30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) 31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32{ 32{
33 struct nouveau_vm *vm = vma->vm; 33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r; 34 struct nouveau_mm_node *r;
@@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
40 u32 max = 1 << (vm->pgt_bits - bits); 40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len; 41 u32 end, len;
42 42
43 list_for_each_entry(r, &vram->regions, rl_entry) { 43 delta = 0;
44 list_for_each_entry(r, &node->regions, rl_entry) {
44 u64 phys = (u64)r->offset << 12; 45 u64 phys = (u64)r->offset << 12;
45 u32 num = r->length >> bits; 46 u32 num = r->length >> bits;
46 47
@@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
52 end = max; 53 end = max;
53 len = end - pte; 54 len = end - pte;
54 55
55 vm->map(vma, pgt, vram, pte, len, phys); 56 vm->map(vma, pgt, node, pte, len, phys, delta);
56 57
57 num -= len; 58 num -= len;
58 pte += len; 59 pte += len;
@@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
60 pde++; 61 pde++;
61 pte = 0; 62 pte = 0;
62 } 63 }
64
65 delta += (u64)len << vma->node->type;
63 } 66 }
64 } 67 }
65 68
@@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
67} 70}
68 71
69void 72void
70nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) 73nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
71{ 74{
72 nouveau_vm_map_at(vma, 0, vram); 75 nouveau_vm_map_at(vma, 0, node);
73} 76}
74 77
75void 78void
76nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, 79nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
77 dma_addr_t *list) 80 struct nouveau_mem *mem, dma_addr_t *list)
78{ 81{
79 struct nouveau_vm *vm = vma->vm; 82 struct nouveau_vm *vm = vma->vm;
80 int big = vma->node->type != vm->spg_shift; 83 int big = vma->node->type != vm->spg_shift;
@@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
94 end = max; 97 end = max;
95 len = end - pte; 98 len = end - pte;
96 99
97 vm->map_sg(vma, pgt, pte, list, len); 100 vm->map_sg(vma, pgt, mem, pte, len, list);
98 101
99 num -= len; 102 num -= len;
100 pte += len; 103 pte += len;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771b..2e06b55cfdc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,9 +67,10 @@ struct nouveau_vm {
67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, 67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
68 struct nouveau_gpuobj *pgt[2]); 68 struct nouveau_gpuobj *pgt[2]);
69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
70 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 70 struct nouveau_mem *, u32 pte, u32 cnt,
71 u64 phys, u64 delta);
71 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 72 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
72 u32 pte, dma_addr_t *, u32 cnt); 73 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
73 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 74 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
74 void (*flush)(struct nouveau_vm *); 75 void (*flush)(struct nouveau_vm *);
75}; 76};
@@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
82int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, 83int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
83 u32 access, struct nouveau_vma *); 84 u32 access, struct nouveau_vma *);
84void nouveau_vm_put(struct nouveau_vma *); 85void nouveau_vm_put(struct nouveau_vma *);
85void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); 86void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
86void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); 87void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
87void nouveau_vm_unmap(struct nouveau_vma *); 88void nouveau_vm_unmap(struct nouveau_vma *);
88void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); 89void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
89void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 90void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
90 dma_addr_t *); 91 struct nouveau_mem *, dma_addr_t *);
91 92
92/* nv50_vm.c */ 93/* nv50_vm.c */
93void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 94void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
94 struct nouveau_gpuobj *pgt[2]); 95 struct nouveau_gpuobj *pgt[2]);
95void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, 96void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
96 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 97 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
97void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, 98void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
98 u32 pte, dma_addr_t *, u32 cnt); 99 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
99void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); 100void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
100void nv50_vm_flush(struct nouveau_vm *); 101void nv50_vm_flush(struct nouveau_vm *);
101void nv50_vm_flush_engine(struct drm_device *, int engine); 102void nv50_vm_flush_engine(struct drm_device *, int engine);
@@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
104void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 105void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
105 struct nouveau_gpuobj *pgt[2]); 106 struct nouveau_gpuobj *pgt[2]);
106void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, 107void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
107 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 108 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
108void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, 109void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
109 u32 pte, dma_addr_t *, u32 cnt); 110 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
110void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); 111void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
111void nvc0_vm_flush(struct nouveau_vm *); 112void nvc0_vm_flush(struct nouveau_vm *);
112 113
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 297505eb98d5..a260fbbe3d9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1031 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1031 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1032 1032
1033 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1033 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1034 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); 1034 0, 0x0000, &nv_crtc->cursor.nvbo);
1035 if (!ret) { 1035 if (!ret) {
1036 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1036 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
1037 if (!ret) 1037 if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550407b5..c82db37d9f41 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
342 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 342 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
343 bool duallink, dummy; 343 bool duallink, dummy;
344 344
345 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> 345 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
346 clock, &duallink, &dummy); 346 &duallink, &dummy);
347 if (duallink) 347 if (duallink)
348 regp->fp_control |= (8 << 28); 348 regp->fp_control |= (8 << 28);
349 } else 349 } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
518 return; 518 return;
519 519
520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) { 520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
521 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
522
523 /* when removing an output, crtc may not be set, but PANEL_OFF 521 /* when removing an output, crtc may not be set, but PANEL_OFF
524 * must still be run 522 * must still be run
525 */ 523 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
527 nv04_dfp_get_bound_head(dev, nv_encoder->dcb); 525 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
528 526
529 if (mode == DRM_MODE_DPMS_ON) { 527 if (mode == DRM_MODE_DPMS_ON) {
530 if (!nv_connector->native_mode) {
531 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
532 return;
533 }
534 call_lvds_script(dev, nv_encoder->dcb, head, 528 call_lvds_script(dev, nv_encoder->dcb, head,
535 LVDS_PANEL_ON, nv_connector->native_mode->clock); 529 LVDS_PANEL_ON, nv_encoder->mode.clock);
536 } else 530 } else
537 /* pxclk of 0 is fine for PANEL_OFF, and for a 531 /* pxclk of 0 is fine for PANEL_OFF, and for a
538 * disconnected LVDS encoder there is no native_mode 532 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f89d104698df..dfa600c46186 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -379,6 +379,15 @@ out:
379 return handled; 379 return handled;
380} 380}
381 381
382static const char *nv_dma_state_err(u32 state)
383{
384 static const char * const desc[] = {
385 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
386 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
387 };
388 return desc[(state >> 29) & 0x7];
389}
390
382void 391void
383nv04_fifo_isr(struct drm_device *dev) 392nv04_fifo_isr(struct drm_device *dev)
384{ 393{
@@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
460 if (nouveau_ratelimit()) 469 if (nouveau_ratelimit())
461 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 470 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
462 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 471 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
463 "State 0x%08x Push 0x%08x\n", 472 "State 0x%08x (err: %s) Push 0x%08x\n",
464 chid, ho_get, dma_get, ho_put, 473 chid, ho_get, dma_get, ho_put,
465 dma_put, ib_get, ib_put, state, 474 dma_put, ib_get, ib_put, state,
475 nv_dma_state_err(state),
466 push); 476 push);
467 477
468 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 478 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
476 } 486 }
477 } else { 487 } else {
478 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 488 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
479 "Put 0x%08x State 0x%08x Push 0x%08x\n", 489 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
480 chid, dma_get, dma_put, state, push); 490 chid, dma_get, dma_put, state,
491 nv_dma_state_err(state), push);
481 492
482 if (dma_get != dma_put) 493 if (dma_get != dma_put)
483 nv_wr32(dev, 0x003244, dma_put); 494 nv_wr32(dev, 0x003244, dma_put);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
24 } 24 }
25} 25}
26 26
27static void
28nv40_fb_init_gart(struct drm_device *dev)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
32
33 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
34 nv_wr32(dev, 0x100800, 0x00000001);
35 return;
36 }
37
38 nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
39 nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
40 nv_wr32(dev, 0x100820, 0x00000000);
41}
42
43static void
44nv44_fb_init_gart(struct drm_device *dev)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
48 u32 vinst;
49
50 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
51 nv_wr32(dev, 0x100850, 0x80000000);
52 nv_wr32(dev, 0x100800, 0x00000001);
53 return;
54 }
55
56 /* calculate vram address of this PRAMIN block, object
57 * must be allocated on 512KiB alignment, and not exceed
58 * a total size of 512KiB for this to work correctly
59 */
60 vinst = nv_rd32(dev, 0x10020c);
61 vinst -= ((gart->pinst >> 19) + 1) << 19;
62
63 nv_wr32(dev, 0x100850, 0x80000000);
64 nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
65
66 nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
67 nv_wr32(dev, 0x100850, 0x00008000);
68 nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
69 nv_wr32(dev, 0x100820, 0x00000000);
70 nv_wr32(dev, 0x10082c, 0x00000001);
71 nv_wr32(dev, 0x100800, vinst | 0x00000010);
72}
73
27int 74int
28nv40_fb_init(struct drm_device *dev) 75nv40_fb_init(struct drm_device *dev)
29{ 76{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
32 uint32_t tmp; 79 uint32_t tmp;
33 int i; 80 int i;
34 81
35 /* This is strictly a NV4x register (don't know about NV5x). */ 82 if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
36 /* The blob sets these to all kinds of values, and they mess up our setup. */ 83 if (nv44_graph_class(dev))
37 /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ 84 nv44_fb_init_gart(dev);
38 /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ 85 else
39 /* Any idea what this is? */ 86 nv40_fb_init_gart(dev);
40 nv_wr32(dev, NV40_PFB_UNK_800, 0x1); 87 }
41 88
42 switch (dev_priv->chipset) { 89 switch (dev_priv->chipset) {
43 case 0x40: 90 case 0x40:
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72388c8..18d30c2c1aa6 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212 212
213 switch (dev_priv->chipset) { 213 switch (dev_priv->chipset) {
214 case 0x40:
215 case 0x41: /* guess */
216 case 0x42:
217 case 0x43:
218 case 0x45: /* guess */
219 case 0x4e:
220 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
221 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
222 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
223 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
224 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
225 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
226 break;
214 case 0x44: 227 case 0x44:
215 case 0x4a: 228 case 0x4a:
216 case 0x4e:
217 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 229 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
218 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 230 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
219 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 231 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
220 break; 232 break;
221
222 case 0x46: 233 case 0x46:
223 case 0x47: 234 case 0x47:
224 case 0x49: 235 case 0x49:
225 case 0x4b: 236 case 0x4b:
237 case 0x4c:
238 case 0x67:
239 default:
226 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); 240 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
227 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); 241 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
228 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); 242 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 244 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 245 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
232 break; 246 break;
233
234 default:
235 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
236 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
237 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
238 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
239 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
240 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
241 break;
242 } 247 }
243} 248}
244 249
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
396 break; 401 break;
397 default: 402 default:
398 switch (dev_priv->chipset) { 403 switch (dev_priv->chipset) {
399 case 0x46: 404 case 0x41:
400 case 0x47: 405 case 0x42:
401 case 0x49: 406 case 0x43:
402 case 0x4b: 407 case 0x45:
403 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); 408 case 0x4e:
404 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); 409 case 0x44:
405 break; 410 case 0x4a:
406 default:
407 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); 411 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
408 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); 412 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
409 break; 413 break;
414 default:
415 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
416 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
417 break;
410 } 418 }
411 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); 419 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
412 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); 420 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 9023c4dbb449..2b9984027f41 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
65{ 65{
66 struct drm_device *dev = nv_crtc->base.dev; 66 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *evo = dev_priv->evo; 68 struct nouveau_channel *evo = nv50_display(dev)->master;
69 int index = nv_crtc->index, ret; 69 int index = nv_crtc->index, ret;
70 70
71 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 71 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -135,8 +135,7 @@ static int
135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) 135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
136{ 136{
137 struct drm_device *dev = nv_crtc->base.dev; 137 struct drm_device *dev = nv_crtc->base.dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 struct nouveau_channel *evo = nv50_display(dev)->master;
139 struct nouveau_channel *evo = dev_priv->evo;
140 int ret; 139 int ret;
141 140
142 NV_DEBUG_KMS(dev, "\n"); 141 NV_DEBUG_KMS(dev, "\n");
@@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
186 struct nouveau_connector *nv_connector = 185 struct nouveau_connector *nv_connector =
187 nouveau_crtc_connector_get(nv_crtc); 186 nouveau_crtc_connector_get(nv_crtc);
188 struct drm_device *dev = nv_crtc->base.dev; 187 struct drm_device *dev = nv_crtc->base.dev;
189 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct nouveau_channel *evo = nv50_display(dev)->master;
190 struct nouveau_channel *evo = dev_priv->evo;
191 struct drm_display_mode *native_mode = NULL; 189 struct drm_display_mode *native_mode = NULL;
192 struct drm_display_mode *mode = &nv_crtc->base.mode; 190 struct drm_display_mode *mode = &nv_crtc->base.mode;
193 uint32_t outX, outY, horiz, vert; 191 uint32_t outX, outY, horiz, vert;
@@ -445,6 +443,42 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
445{ 443{
446} 444}
447 445
446static int
447nv50_crtc_wait_complete(struct drm_crtc *crtc)
448{
449 struct drm_device *dev = crtc->dev;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
452 struct nv50_display *disp = nv50_display(dev);
453 struct nouveau_channel *evo = disp->master;
454 u64 start;
455 int ret;
456
457 ret = RING_SPACE(evo, 6);
458 if (ret)
459 return ret;
460 BEGIN_RING(evo, 0, 0x0084, 1);
461 OUT_RING (evo, 0x80000000);
462 BEGIN_RING(evo, 0, 0x0080, 1);
463 OUT_RING (evo, 0);
464 BEGIN_RING(evo, 0, 0x0084, 1);
465 OUT_RING (evo, 0x00000000);
466
467 nv_wo32(disp->ntfy, 0x000, 0x00000000);
468 FIRE_RING (evo);
469
470 start = ptimer->read(dev);
471 do {
472 nv_wr32(dev, 0x61002c, 0x370);
473 nv_wr32(dev, 0x000140, 1);
474
475 if (nv_ro32(disp->ntfy, 0x000))
476 return 0;
477 } while (ptimer->read(dev) - start < 2000000000ULL);
478
479 return -EBUSY;
480}
481
448static void 482static void
449nv50_crtc_prepare(struct drm_crtc *crtc) 483nv50_crtc_prepare(struct drm_crtc *crtc)
450{ 484{
@@ -453,6 +487,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
453 487
454 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 488 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
455 489
490 nv50_display_flip_stop(crtc);
456 drm_vblank_pre_modeset(dev, nv_crtc->index); 491 drm_vblank_pre_modeset(dev, nv_crtc->index);
457 nv50_crtc_blank(nv_crtc, true); 492 nv50_crtc_blank(nv_crtc, true);
458} 493}
@@ -461,24 +496,14 @@ static void
461nv50_crtc_commit(struct drm_crtc *crtc) 496nv50_crtc_commit(struct drm_crtc *crtc)
462{ 497{
463 struct drm_device *dev = crtc->dev; 498 struct drm_device *dev = crtc->dev;
464 struct drm_nouveau_private *dev_priv = dev->dev_private;
465 struct nouveau_channel *evo = dev_priv->evo;
466 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 499 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
467 int ret;
468 500
469 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 501 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
470 502
471 nv50_crtc_blank(nv_crtc, false); 503 nv50_crtc_blank(nv_crtc, false);
472 drm_vblank_post_modeset(dev, nv_crtc->index); 504 drm_vblank_post_modeset(dev, nv_crtc->index);
473 505 nv50_crtc_wait_complete(crtc);
474 ret = RING_SPACE(evo, 2); 506 nv50_display_flip_next(crtc, crtc->fb, NULL);
475 if (ret) {
476 NV_ERROR(dev, "no space while committing crtc\n");
477 return;
478 }
479 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
480 OUT_RING (evo, 0);
481 FIRE_RING (evo);
482} 507}
483 508
484static bool 509static bool
@@ -491,15 +516,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
491static int 516static int
492nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, 517nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
493 struct drm_framebuffer *passed_fb, 518 struct drm_framebuffer *passed_fb,
494 int x, int y, bool update, bool atomic) 519 int x, int y, bool atomic)
495{ 520{
496 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 521 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
497 struct drm_device *dev = nv_crtc->base.dev; 522 struct drm_device *dev = nv_crtc->base.dev;
498 struct drm_nouveau_private *dev_priv = dev->dev_private; 523 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 struct nouveau_channel *evo = dev_priv->evo; 524 struct nouveau_channel *evo = nv50_display(dev)->master;
500 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 525 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
501 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 526 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
502 int ret, format; 527 int ret;
503 528
504 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 529 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
505 530
@@ -525,28 +550,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
525 } 550 }
526 } 551 }
527 552
528 switch (drm_fb->depth) {
529 case 8:
530 format = NV50_EVO_CRTC_FB_DEPTH_8;
531 break;
532 case 15:
533 format = NV50_EVO_CRTC_FB_DEPTH_15;
534 break;
535 case 16:
536 format = NV50_EVO_CRTC_FB_DEPTH_16;
537 break;
538 case 24:
539 case 32:
540 format = NV50_EVO_CRTC_FB_DEPTH_24;
541 break;
542 case 30:
543 format = NV50_EVO_CRTC_FB_DEPTH_30;
544 break;
545 default:
546 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
547 return -EINVAL;
548 }
549
550 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; 553 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
551 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 554 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
552 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 555 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -556,14 +559,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
556 return ret; 559 return ret;
557 560
558 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); 561 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
559 if (nv_crtc->fb.tile_flags == 0x7a00 || 562 OUT_RING (evo, fb->r_dma);
560 nv_crtc->fb.tile_flags == 0xfe00)
561 OUT_RING(evo, NvEvoFB32);
562 else
563 if (nv_crtc->fb.tile_flags == 0x7000)
564 OUT_RING(evo, NvEvoFB16);
565 else
566 OUT_RING(evo, NvEvoVRAM_LP);
567 } 563 }
568 564
569 ret = RING_SPACE(evo, 12); 565 ret = RING_SPACE(evo, 12);
@@ -571,45 +567,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
571 return ret; 567 return ret;
572 568
573 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); 569 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
574 OUT_RING(evo, nv_crtc->fb.offset >> 8); 570 OUT_RING (evo, nv_crtc->fb.offset >> 8);
575 OUT_RING(evo, 0); 571 OUT_RING (evo, 0);
576 OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width); 572 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
577 if (!nv_crtc->fb.tile_flags) { 573 OUT_RING (evo, fb->r_pitch);
578 OUT_RING(evo, drm_fb->pitch | (1 << 20)); 574 OUT_RING (evo, fb->r_format);
579 } else {
580 u32 tile_mode = fb->nvbo->tile_mode;
581 if (dev_priv->card_type >= NV_C0)
582 tile_mode >>= 4;
583 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
584 }
585 if (dev_priv->chipset == 0x50)
586 OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
587 else
588 OUT_RING(evo, format);
589 575
590 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); 576 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
591 OUT_RING(evo, fb->base.depth == 8 ? 577 OUT_RING (evo, fb->base.depth == 8 ?
592 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); 578 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
593 579
594 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); 580 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
595 OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR); 581 OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
596 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); 582 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
597 OUT_RING(evo, (y << 16) | x); 583 OUT_RING (evo, (y << 16) | x);
598 584
599 if (nv_crtc->lut.depth != fb->base.depth) { 585 if (nv_crtc->lut.depth != fb->base.depth) {
600 nv_crtc->lut.depth = fb->base.depth; 586 nv_crtc->lut.depth = fb->base.depth;
601 nv50_crtc_lut_load(crtc); 587 nv50_crtc_lut_load(crtc);
602 } 588 }
603 589
604 if (update) {
605 ret = RING_SPACE(evo, 2);
606 if (ret)
607 return ret;
608 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
609 OUT_RING(evo, 0);
610 FIRE_RING(evo);
611 }
612
613 return 0; 590 return 0;
614} 591}
615 592
@@ -619,8 +596,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
619 struct drm_framebuffer *old_fb) 596 struct drm_framebuffer *old_fb)
620{ 597{
621 struct drm_device *dev = crtc->dev; 598 struct drm_device *dev = crtc->dev;
622 struct drm_nouveau_private *dev_priv = dev->dev_private; 599 struct nouveau_channel *evo = nv50_display(dev)->master;
623 struct nouveau_channel *evo = dev_priv->evo;
624 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 600 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
625 struct nouveau_connector *nv_connector = NULL; 601 struct nouveau_connector *nv_connector = NULL;
626 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end; 602 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@@ -700,14 +676,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
700 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false); 676 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
701 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false); 677 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
702 678
703 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false); 679 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
704} 680}
705 681
706static int 682static int
707nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 683nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
708 struct drm_framebuffer *old_fb) 684 struct drm_framebuffer *old_fb)
709{ 685{
710 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false); 686 int ret;
687
688 nv50_display_flip_stop(crtc);
689 ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
690 if (ret)
691 return ret;
692
693 ret = nv50_crtc_wait_complete(crtc);
694 if (ret)
695 return ret;
696
697 return nv50_display_flip_next(crtc, crtc->fb, NULL);
711} 698}
712 699
713static int 700static int
@@ -715,7 +702,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
715 struct drm_framebuffer *fb, 702 struct drm_framebuffer *fb,
716 int x, int y, enum mode_set_atomic state) 703 int x, int y, enum mode_set_atomic state)
717{ 704{
718 return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true); 705 int ret;
706
707 nv50_display_flip_stop(crtc);
708 ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
709 if (ret)
710 return ret;
711
712 return nv50_crtc_wait_complete(crtc);
719} 713}
720 714
721static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { 715static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -758,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
758 nv_crtc->lut.depth = 0; 752 nv_crtc->lut.depth = 0;
759 753
760 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, 754 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
761 0, 0x0000, false, true, &nv_crtc->lut.nvbo); 755 0, 0x0000, &nv_crtc->lut.nvbo);
762 if (!ret) { 756 if (!ret) {
763 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); 757 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
764 if (!ret) 758 if (!ret)
@@ -784,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
784 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 778 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
785 779
786 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 780 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
787 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); 781 0, 0x0000, &nv_crtc->cursor.nvbo);
788 if (!ret) { 782 if (!ret) {
789 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 783 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
790 if (!ret) 784 if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 1b9ce3021aa3..9752c35bb84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -36,9 +36,9 @@
36static void 36static void
37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) 37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
38{ 38{
39 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
40 struct nouveau_channel *evo = dev_priv->evo;
41 struct drm_device *dev = nv_crtc->base.dev; 39 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 struct nouveau_channel *evo = nv50_display(dev)->master;
42 int ret; 42 int ret;
43 43
44 NV_DEBUG_KMS(dev, "\n"); 44 NV_DEBUG_KMS(dev, "\n");
@@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
71static void 71static void
72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) 72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
73{ 73{
74 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
75 struct nouveau_channel *evo = dev_priv->evo;
76 struct drm_device *dev = nv_crtc->base.dev; 74 struct drm_device *dev = nv_crtc->base.dev;
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_channel *evo = nv50_display(dev)->master;
77 int ret; 77 int ret;
78 78
79 NV_DEBUG_KMS(dev, "\n"); 79 NV_DEBUG_KMS(dev, "\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 875414b09ade..808f3ec8f827 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
41{ 41{
42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
43 struct drm_device *dev = encoder->dev; 43 struct drm_device *dev = encoder->dev;
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 44 struct nouveau_channel *evo = nv50_display(dev)->master;
45 struct nouveau_channel *evo = dev_priv->evo;
46 int ret; 45 int ret;
47 46
48 if (!nv_encoder->crtc) 47 if (!nv_encoder->crtc)
@@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
216{ 215{
217 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 216 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
218 struct drm_device *dev = encoder->dev; 217 struct drm_device *dev = encoder->dev;
219 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct nouveau_channel *evo = nv50_display(dev)->master;
220 struct nouveau_channel *evo = dev_priv->evo;
221 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 219 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
222 uint32_t mode_ctl = 0, mode_ctl2 = 0; 220 uint32_t mode_ctl = 0, mode_ctl2 = 0;
223 int ret; 221 int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7cc94ed9ed95..75a376cc342a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
27#include "nv50_display.h" 28#include "nv50_display.h"
28#include "nouveau_crtc.h" 29#include "nouveau_crtc.h"
29#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
@@ -34,6 +35,7 @@
34#include "drm_crtc_helper.h" 35#include "drm_crtc_helper.h"
35 36
36static void nv50_display_isr(struct drm_device *); 37static void nv50_display_isr(struct drm_device *);
38static void nv50_display_bh(unsigned long);
37 39
38static inline int 40static inline int
39nv50_sor_nr(struct drm_device *dev) 41nv50_sor_nr(struct drm_device *dev)
@@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
172 ret = nv50_evo_init(dev); 174 ret = nv50_evo_init(dev);
173 if (ret) 175 if (ret)
174 return ret; 176 return ret;
175 evo = dev_priv->evo; 177 evo = nv50_display(dev)->master;
176 178
177 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 179 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
178 180
179 ret = RING_SPACE(evo, 11); 181 ret = RING_SPACE(evo, 15);
180 if (ret) 182 if (ret)
181 return ret; 183 return ret;
182 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2); 184 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
183 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED); 185 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
184 OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE); 186 OUT_RING(evo, NvEvoSync);
185 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1); 187 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
186 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); 188 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
187 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1); 189 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
190 OUT_RING(evo, 0); 192 OUT_RING(evo, 0);
191 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1); 193 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
192 OUT_RING(evo, 0); 194 OUT_RING(evo, 0);
195 /* required to make display sync channels not hate life */
196 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
197 OUT_RING (evo, 0x00000311);
198 BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
199 OUT_RING (evo, 0x00000311);
193 FIRE_RING(evo); 200 FIRE_RING(evo);
194 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) 201 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
195 NV_ERROR(dev, "evo pushbuf stalled\n"); 202 NV_ERROR(dev, "evo pushbuf stalled\n");
@@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
201static int nv50_display_disable(struct drm_device *dev) 208static int nv50_display_disable(struct drm_device *dev)
202{ 209{
203 struct drm_nouveau_private *dev_priv = dev->dev_private; 210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nv50_display *disp = nv50_display(dev);
212 struct nouveau_channel *evo = disp->master;
204 struct drm_crtc *drm_crtc; 213 struct drm_crtc *drm_crtc;
205 int ret, i; 214 int ret, i;
206 215
@@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
212 nv50_crtc_blank(crtc, true); 221 nv50_crtc_blank(crtc, true);
213 } 222 }
214 223
215 ret = RING_SPACE(dev_priv->evo, 2); 224 ret = RING_SPACE(evo, 2);
216 if (ret == 0) { 225 if (ret == 0) {
217 BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1); 226 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
218 OUT_RING(dev_priv->evo, 0); 227 OUT_RING(evo, 0);
219 } 228 }
220 FIRE_RING(dev_priv->evo); 229 FIRE_RING(evo);
221 230
222 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of 231 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
223 * cleaning up? 232 * cleaning up?
@@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
267 struct drm_nouveau_private *dev_priv = dev->dev_private; 276 struct drm_nouveau_private *dev_priv = dev->dev_private;
268 struct dcb_table *dcb = &dev_priv->vbios.dcb; 277 struct dcb_table *dcb = &dev_priv->vbios.dcb;
269 struct drm_connector *connector, *ct; 278 struct drm_connector *connector, *ct;
279 struct nv50_display *priv;
270 int ret, i; 280 int ret, i;
271 281
272 NV_DEBUG_KMS(dev, "\n"); 282 NV_DEBUG_KMS(dev, "\n");
273 283
284 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
285 if (!priv)
286 return -ENOMEM;
287 dev_priv->engine.display.priv = priv;
288
274 /* init basic kernel modesetting */ 289 /* init basic kernel modesetting */
275 drm_mode_config_init(dev); 290 drm_mode_config_init(dev);
276 291
@@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
330 } 345 }
331 } 346 }
332 347
333 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 348 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
334 nouveau_irq_register(dev, 26, nv50_display_isr); 349 nouveau_irq_register(dev, 26, nv50_display_isr);
335 350
336 ret = nv50_display_init(dev); 351 ret = nv50_display_init(dev);
@@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
345void 360void
346nv50_display_destroy(struct drm_device *dev) 361nv50_display_destroy(struct drm_device *dev)
347{ 362{
363 struct nv50_display *disp = nv50_display(dev);
364
348 NV_DEBUG_KMS(dev, "\n"); 365 NV_DEBUG_KMS(dev, "\n");
349 366
350 drm_mode_config_cleanup(dev); 367 drm_mode_config_cleanup(dev);
351 368
352 nv50_display_disable(dev); 369 nv50_display_disable(dev);
353 nouveau_irq_unregister(dev, 26); 370 nouveau_irq_unregister(dev, 26);
371 kfree(disp);
372}
373
374void
375nv50_display_flip_stop(struct drm_crtc *crtc)
376{
377 struct nv50_display *disp = nv50_display(crtc->dev);
378 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
379 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
380 struct nouveau_channel *evo = dispc->sync;
381 int ret;
382
383 ret = RING_SPACE(evo, 8);
384 if (ret) {
385 WARN_ON(1);
386 return;
387 }
388
389 BEGIN_RING(evo, 0, 0x0084, 1);
390 OUT_RING (evo, 0x00000000);
391 BEGIN_RING(evo, 0, 0x0094, 1);
392 OUT_RING (evo, 0x00000000);
393 BEGIN_RING(evo, 0, 0x00c0, 1);
394 OUT_RING (evo, 0x00000000);
395 BEGIN_RING(evo, 0, 0x0080, 1);
396 OUT_RING (evo, 0x00000000);
397 FIRE_RING (evo);
398}
399
400int
401nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
402 struct nouveau_channel *chan)
403{
404 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
405 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
406 struct nv50_display *disp = nv50_display(crtc->dev);
407 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
408 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
409 struct nouveau_channel *evo = dispc->sync;
410 int ret;
411
412 ret = RING_SPACE(evo, 24);
413 if (unlikely(ret))
414 return ret;
415
416 /* synchronise with the rendering channel, if necessary */
417 if (likely(chan)) {
418 u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
419
420 ret = RING_SPACE(chan, 10);
421 if (ret) {
422 WIND_RING(evo);
423 return ret;
424 }
425
426 if (dev_priv->chipset < 0xc0) {
427 BEGIN_RING(chan, NvSubSw, 0x0060, 2);
428 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
429 OUT_RING (chan, dispc->sem.offset);
430 BEGIN_RING(chan, NvSubSw, 0x006c, 1);
431 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
432 BEGIN_RING(chan, NvSubSw, 0x0064, 2);
433 OUT_RING (chan, dispc->sem.offset ^ 0x10);
434 OUT_RING (chan, 0x74b1e000);
435 BEGIN_RING(chan, NvSubSw, 0x0060, 1);
436 if (dev_priv->chipset < 0x84)
437 OUT_RING (chan, NvSema);
438 else
439 OUT_RING (chan, chan->vram_handle);
440 } else {
441 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
442 OUT_RING (chan, upper_32_bits(offset));
443 OUT_RING (chan, lower_32_bits(offset));
444 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
445 OUT_RING (chan, 0x1002);
446 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
447 OUT_RING (chan, upper_32_bits(offset));
448 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
449 OUT_RING (chan, 0x74b1e000);
450 OUT_RING (chan, 0x1001);
451 }
452 FIRE_RING (chan);
453 } else {
454 nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
455 0xf00d0000 | dispc->sem.value);
456 }
457
458 /* queue the flip on the crtc's "display sync" channel */
459 BEGIN_RING(evo, 0, 0x0100, 1);
460 OUT_RING (evo, 0xfffe0000);
461 BEGIN_RING(evo, 0, 0x0084, 5);
462 OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
463 OUT_RING (evo, dispc->sem.offset);
464 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
465 OUT_RING (evo, 0x74b1e000);
466 OUT_RING (evo, NvEvoSync);
467 BEGIN_RING(evo, 0, 0x00a0, 2);
468 OUT_RING (evo, 0x00000000);
469 OUT_RING (evo, 0x00000000);
470 BEGIN_RING(evo, 0, 0x00c0, 1);
471 OUT_RING (evo, nv_fb->r_dma);
472 BEGIN_RING(evo, 0, 0x0110, 2);
473 OUT_RING (evo, 0x00000000);
474 OUT_RING (evo, 0x00000000);
475 BEGIN_RING(evo, 0, 0x0800, 5);
476 OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
477 OUT_RING (evo, 0);
478 OUT_RING (evo, (fb->height << 16) | fb->width);
479 OUT_RING (evo, nv_fb->r_pitch);
480 OUT_RING (evo, nv_fb->r_format);
481 BEGIN_RING(evo, 0, 0x0080, 1);
482 OUT_RING (evo, 0x00000000);
483 FIRE_RING (evo);
484
485 dispc->sem.offset ^= 0x10;
486 dispc->sem.value++;
487 return 0;
354} 488}
355 489
356static u16 490static u16
@@ -466,11 +600,12 @@ static void
466nv50_display_unk10_handler(struct drm_device *dev) 600nv50_display_unk10_handler(struct drm_device *dev)
467{ 601{
468 struct drm_nouveau_private *dev_priv = dev->dev_private; 602 struct drm_nouveau_private *dev_priv = dev->dev_private;
603 struct nv50_display *disp = nv50_display(dev);
469 u32 unk30 = nv_rd32(dev, 0x610030), mc; 604 u32 unk30 = nv_rd32(dev, 0x610030), mc;
470 int i, crtc, or, type = OUTPUT_ANY; 605 int i, crtc, or, type = OUTPUT_ANY;
471 606
472 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 607 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
473 dev_priv->evo_irq.dcb = NULL; 608 disp->irq.dcb = NULL;
474 609
475 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); 610 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
476 611
@@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
541 676
542 if (dcb->type == type && (dcb->or & (1 << or))) { 677 if (dcb->type == type && (dcb->or & (1 << or))) {
543 nouveau_bios_run_display_table(dev, dcb, 0, -1); 678 nouveau_bios_run_display_table(dev, dcb, 0, -1);
544 dev_priv->evo_irq.dcb = dcb; 679 disp->irq.dcb = dcb;
545 goto ack; 680 goto ack;
546 } 681 }
547 } 682 }
@@ -587,15 +722,16 @@ static void
587nv50_display_unk20_handler(struct drm_device *dev) 722nv50_display_unk20_handler(struct drm_device *dev)
588{ 723{
589 struct drm_nouveau_private *dev_priv = dev->dev_private; 724 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc; 725 struct nv50_display *disp = nv50_display(dev);
726 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
591 struct dcb_entry *dcb; 727 struct dcb_entry *dcb;
592 int i, crtc, or, type = OUTPUT_ANY; 728 int i, crtc, or, type = OUTPUT_ANY;
593 729
594 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 730 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
595 dcb = dev_priv->evo_irq.dcb; 731 dcb = disp->irq.dcb;
596 if (dcb) { 732 if (dcb) {
597 nouveau_bios_run_display_table(dev, dcb, 0, -2); 733 nouveau_bios_run_display_table(dev, dcb, 0, -2);
598 dev_priv->evo_irq.dcb = NULL; 734 disp->irq.dcb = NULL;
599 } 735 }
600 736
601 /* CRTC clock change requested? */ 737 /* CRTC clock change requested? */
@@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
692 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); 828 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
693 } 829 }
694 830
695 dev_priv->evo_irq.dcb = dcb; 831 disp->irq.dcb = dcb;
696 dev_priv->evo_irq.pclk = pclk; 832 disp->irq.pclk = pclk;
697 dev_priv->evo_irq.script = script; 833 disp->irq.script = script;
698 834
699ack: 835ack:
700 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); 836 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
735static void 871static void
736nv50_display_unk40_handler(struct drm_device *dev) 872nv50_display_unk40_handler(struct drm_device *dev)
737{ 873{
738 struct drm_nouveau_private *dev_priv = dev->dev_private; 874 struct nv50_display *disp = nv50_display(dev);
739 struct dcb_entry *dcb = dev_priv->evo_irq.dcb; 875 struct dcb_entry *dcb = disp->irq.dcb;
740 u16 script = dev_priv->evo_irq.script; 876 u16 script = disp->irq.script;
741 u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk; 877 u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
742 878
743 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 879 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
744 dev_priv->evo_irq.dcb = NULL; 880 disp->irq.dcb = NULL;
745 if (!dcb) 881 if (!dcb)
746 goto ack; 882 goto ack;
747 883
@@ -754,12 +890,10 @@ ack:
754 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8); 890 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
755} 891}
756 892
757void 893static void
758nv50_display_irq_handler_bh(struct work_struct *work) 894nv50_display_bh(unsigned long data)
759{ 895{
760 struct drm_nouveau_private *dev_priv = 896 struct drm_device *dev = (struct drm_device *)data;
761 container_of(work, struct drm_nouveau_private, irq_work);
762 struct drm_device *dev = dev_priv->dev;
763 897
764 for (;;) { 898 for (;;) {
765 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 899 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
807static void 941static void
808nv50_display_isr(struct drm_device *dev) 942nv50_display_isr(struct drm_device *dev)
809{ 943{
810 struct drm_nouveau_private *dev_priv = dev->dev_private; 944 struct nv50_display *disp = nv50_display(dev);
811 uint32_t delayed = 0; 945 uint32_t delayed = 0;
812 946
813 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 947 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
835 NV50_PDISPLAY_INTR_1_CLK_UNK40)); 969 NV50_PDISPLAY_INTR_1_CLK_UNK40));
836 if (clock) { 970 if (clock) {
837 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 971 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
838 if (!work_pending(&dev_priv->irq_work)) 972 tasklet_schedule(&disp->tasklet);
839 queue_work(dev_priv->wq, &dev_priv->irq_work);
840 delayed |= clock; 973 delayed |= clock;
841 intr1 &= ~clock; 974 intr1 &= ~clock;
842 } 975 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index f0e30b78ef6b..c2da503a22aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,7 +35,36 @@
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nv50_evo.h" 36#include "nv50_evo.h"
37 37
38void nv50_display_irq_handler_bh(struct work_struct *work); 38struct nv50_display_crtc {
39 struct nouveau_channel *sync;
40 struct {
41 struct nouveau_bo *bo;
42 u32 offset;
43 u16 value;
44 } sem;
45};
46
47struct nv50_display {
48 struct nouveau_channel *master;
49 struct nouveau_gpuobj *ntfy;
50
51 struct nv50_display_crtc crtc[2];
52
53 struct tasklet_struct tasklet;
54 struct {
55 struct dcb_entry *dcb;
56 u16 script;
57 u32 pclk;
58 } irq;
59};
60
61static inline struct nv50_display *
62nv50_display(struct drm_device *dev)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65 return dev_priv->engine.display.priv;
66}
67
39int nv50_display_early_init(struct drm_device *dev); 68int nv50_display_early_init(struct drm_device *dev);
40void nv50_display_late_takedown(struct drm_device *dev); 69void nv50_display_late_takedown(struct drm_device *dev);
41int nv50_display_create(struct drm_device *dev); 70int nv50_display_create(struct drm_device *dev);
@@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
44int nv50_crtc_blank(struct nouveau_crtc *, bool blank); 73int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
45int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); 74int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
46 75
76int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
77 struct nouveau_channel *chan);
78void nv50_display_flip_stop(struct drm_crtc *);
79
80int nv50_evo_init(struct drm_device *dev);
81void nv50_evo_fini(struct drm_device *dev);
82void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
83 u64 size);
84int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
85 u64 base, u64 size, struct nouveau_gpuobj **);
86
47#endif /* __NV50_DISPLAY_H__ */ 87#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 0ea090f4244a..a2cfaa691e9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -27,20 +27,17 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_dma.h" 28#include "nouveau_dma.h"
29#include "nouveau_ramht.h" 29#include "nouveau_ramht.h"
30#include "nv50_display.h"
30 31
31static void 32static void
32nv50_evo_channel_del(struct nouveau_channel **pevo) 33nv50_evo_channel_del(struct nouveau_channel **pevo)
33{ 34{
34 struct drm_nouveau_private *dev_priv;
35 struct nouveau_channel *evo = *pevo; 35 struct nouveau_channel *evo = *pevo;
36 36
37 if (!evo) 37 if (!evo)
38 return; 38 return;
39 *pevo = NULL; 39 *pevo = NULL;
40 40
41 dev_priv = evo->dev->dev_private;
42 dev_priv->evo_alloc &= ~(1 << evo->id);
43
44 nouveau_gpuobj_channel_takedown(evo); 41 nouveau_gpuobj_channel_takedown(evo);
45 nouveau_bo_unmap(evo->pushbuf_bo); 42 nouveau_bo_unmap(evo->pushbuf_bo);
46 nouveau_bo_ref(NULL, &evo->pushbuf_bo); 43 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
51 kfree(evo); 48 kfree(evo);
52} 49}
53 50
51void
52nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
53{
54 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
55 u32 flags5;
56
57 if (dev_priv->chipset < 0xc0) {
58 /* not supported on 0x50, specified in format mthd */
59 if (dev_priv->chipset == 0x50)
60 memtype = 0;
61 flags5 = 0x00010000;
62 } else {
63 if (memtype & 0x80000000)
64 flags5 = 0x00000000; /* large pages */
65 else
66 flags5 = 0x00020000;
67 }
68
69 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
70 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
71 nv_wo32(obj, 0x14, flags5);
72 dev_priv->engine.instmem.flush(obj->dev);
73}
74
54int 75int
55nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, 76nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
56 u32 tile_flags, u32 magic_flags, u32 offset, u32 limit, 77 u64 base, u64 size, struct nouveau_gpuobj **pobj)
57 u32 flags5)
58{ 78{
59 struct drm_nouveau_private *dev_priv = evo->dev->dev_private; 79 struct nv50_display *disp = nv50_display(evo->dev);
60 struct drm_device *dev = evo->dev;
61 struct nouveau_gpuobj *obj = NULL; 80 struct nouveau_gpuobj *obj = NULL;
62 int ret; 81 int ret;
63 82
64 ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj); 83 ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
65 if (ret) 84 if (ret)
66 return ret; 85 return ret;
67 obj->engine = NVOBJ_ENGINE_DISPLAY; 86 obj->engine = NVOBJ_ENGINE_DISPLAY;
68 87
69 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); 88 nv50_evo_dmaobj_init(obj, memtype, base, size);
70 nv_wo32(obj, 4, limit);
71 nv_wo32(obj, 8, offset);
72 nv_wo32(obj, 12, 0x00000000);
73 nv_wo32(obj, 16, 0x00000000);
74 nv_wo32(obj, 20, flags5);
75 dev_priv->engine.instmem.flush(dev);
76 89
77 ret = nouveau_ramht_insert(evo, name, obj); 90 ret = nouveau_ramht_insert(evo, handle, obj);
78 nouveau_gpuobj_ref(NULL, &obj); 91 if (ret)
79 if (ret) { 92 goto out;
80 return ret;
81 }
82 93
83 return 0; 94 if (pobj)
95 nouveau_gpuobj_ref(obj, pobj);
96out:
97 nouveau_gpuobj_ref(NULL, &obj);
98 return ret;
84} 99}
85 100
86static int 101static int
87nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) 102nv50_evo_channel_new(struct drm_device *dev, int chid,
103 struct nouveau_channel **pevo)
88{ 104{
89 struct drm_nouveau_private *dev_priv = dev->dev_private; 105 struct nv50_display *disp = nv50_display(dev);
90 struct nouveau_channel *evo; 106 struct nouveau_channel *evo;
91 int ret; 107 int ret;
92 108
@@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
95 return -ENOMEM; 111 return -ENOMEM;
96 *pevo = evo; 112 *pevo = evo;
97 113
98 for (evo->id = 0; evo->id < 5; evo->id++) { 114 evo->id = chid;
99 if (dev_priv->evo_alloc & (1 << evo->id))
100 continue;
101
102 dev_priv->evo_alloc |= (1 << evo->id);
103 break;
104 }
105
106 if (evo->id == 5) {
107 kfree(evo);
108 return -ENODEV;
109 }
110
111 evo->dev = dev; 115 evo->dev = dev;
112 evo->user_get = 4; 116 evo->user_get = 4;
113 evo->user_put = 0; 117 evo->user_put = 0;
114 118
115 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, 119 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
116 false, true, &evo->pushbuf_bo); 120 &evo->pushbuf_bo);
117 if (ret == 0) 121 if (ret == 0)
118 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 122 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
119 if (ret) { 123 if (ret) {
@@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
138 } 142 }
139 143
140 /* bind primary evo channel's ramht to the channel */ 144 /* bind primary evo channel's ramht to the channel */
141 if (dev_priv->evo && evo != dev_priv->evo) 145 if (disp->master && evo != disp->master)
142 nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL); 146 nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
143 147
144 return 0; 148 return 0;
145} 149}
@@ -212,21 +216,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
212 } 216 }
213} 217}
214 218
219static void
220nv50_evo_destroy(struct drm_device *dev)
221{
222 struct nv50_display *disp = nv50_display(dev);
223 int i;
224
225 for (i = 0; i < 2; i++) {
226 if (disp->crtc[i].sem.bo) {
227 nouveau_bo_unmap(disp->crtc[i].sem.bo);
228 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
229 }
230 nv50_evo_channel_del(&disp->crtc[i].sync);
231 }
232 nouveau_gpuobj_ref(NULL, &disp->ntfy);
233 nv50_evo_channel_del(&disp->master);
234}
235
215static int 236static int
216nv50_evo_create(struct drm_device *dev) 237nv50_evo_create(struct drm_device *dev)
217{ 238{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 239 struct drm_nouveau_private *dev_priv = dev->dev_private;
240 struct nv50_display *disp = nv50_display(dev);
219 struct nouveau_gpuobj *ramht = NULL; 241 struct nouveau_gpuobj *ramht = NULL;
220 struct nouveau_channel *evo; 242 struct nouveau_channel *evo;
221 int ret; 243 int ret, i, j;
222 244
223 /* create primary evo channel, the one we use for modesetting 245 /* create primary evo channel, the one we use for modesetting
224 * purporses 246 * purporses
225 */ 247 */
226 ret = nv50_evo_channel_new(dev, &dev_priv->evo); 248 ret = nv50_evo_channel_new(dev, 0, &disp->master);
227 if (ret) 249 if (ret)
228 return ret; 250 return ret;
229 evo = dev_priv->evo; 251 evo = disp->master;
230 252
231 /* setup object management on it, any other evo channel will 253 /* setup object management on it, any other evo channel will
232 * use this also as there's no per-channel support on the 254 * use this also as there's no per-channel support on the
@@ -236,109 +258,167 @@ nv50_evo_create(struct drm_device *dev)
236 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); 258 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
237 if (ret) { 259 if (ret) {
238 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); 260 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
239 nv50_evo_channel_del(&dev_priv->evo); 261 goto err;
240 return ret;
241 } 262 }
242 263
243 ret = drm_mm_init(&evo->ramin_heap, 0, 32768); 264 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
244 if (ret) { 265 if (ret) {
245 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); 266 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
246 nv50_evo_channel_del(&dev_priv->evo); 267 goto err;
247 return ret;
248 } 268 }
249 269
250 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); 270 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
251 if (ret) { 271 if (ret) {
252 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); 272 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
253 nv50_evo_channel_del(&dev_priv->evo); 273 goto err;
254 return ret;
255 } 274 }
256 275
257 ret = nouveau_ramht_new(dev, ramht, &evo->ramht); 276 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
258 nouveau_gpuobj_ref(NULL, &ramht); 277 nouveau_gpuobj_ref(NULL, &ramht);
259 if (ret) { 278 if (ret)
260 nv50_evo_channel_del(&dev_priv->evo); 279 goto err;
261 return ret; 280
262 } 281 /* not sure exactly what this is..
282 *
283 * the first dword of the structure is used by nvidia to wait on
284 * full completion of an EVO "update" command.
285 *
286 * method 0x8c on the master evo channel will fill a lot more of
287 * this structure with some undefined info
288 */
289 ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
290 NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
291 if (ret)
292 goto err;
293
294 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
295 disp->ntfy->vinst, disp->ntfy->size, NULL);
296 if (ret)
297 goto err;
263 298
264 /* create some default objects for the scanout memtypes we support */ 299 /* create some default objects for the scanout memtypes we support */
265 if (dev_priv->card_type >= NV_C0) { 300 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
266 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19, 301 0, dev_priv->vram_size, NULL);
267 0, 0xffffffff, 0x00000000); 302 if (ret)
268 if (ret) { 303 goto err;
269 nv50_evo_channel_del(&dev_priv->evo);
270 return ret;
271 }
272 304
273 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, 305 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
274 0, dev_priv->vram_size, 0x00020000); 306 0, dev_priv->vram_size, NULL);
275 if (ret) { 307 if (ret)
276 nv50_evo_channel_del(&dev_priv->evo); 308 goto err;
277 return ret;
278 }
279 309
280 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19, 310 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
281 0, dev_priv->vram_size, 0x00000000); 311 (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
282 if (ret) { 312 0, dev_priv->vram_size, NULL);
283 nv50_evo_channel_del(&dev_priv->evo); 313 if (ret)
284 return ret; 314 goto err;
285 }
286 } else {
287 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
288 0, 0xffffffff, 0x00010000);
289 if (ret) {
290 nv50_evo_channel_del(&dev_priv->evo);
291 return ret;
292 }
293 315
316 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
317 (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
318 0, dev_priv->vram_size, NULL);
319 if (ret)
320 goto err;
294 321
295 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, 322 /* create "display sync" channels and other structures we need
296 0, 0xffffffff, 0x00010000); 323 * to implement page flipping
297 if (ret) { 324 */
298 nv50_evo_channel_del(&dev_priv->evo); 325 for (i = 0; i < 2; i++) {
299 return ret; 326 struct nv50_display_crtc *dispc = &disp->crtc[i];
300 } 327 u64 offset;
301 328
302 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, 329 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
303 0, dev_priv->vram_size, 0x00010000); 330 if (ret)
304 if (ret) { 331 goto err;
305 nv50_evo_channel_del(&dev_priv->evo); 332
306 return ret; 333 ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
334 0, 0x0000, &dispc->sem.bo);
335 if (!ret) {
336 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
337
338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
339 if (!ret)
340 ret = nouveau_bo_map(dispc->sem.bo);
341 if (ret)
342 nouveau_bo_ref(NULL, &dispc->sem.bo);
307 } 343 }
308 344
309 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19, 345 if (ret)
310 0, dev_priv->vram_size, 0x00010000); 346 goto err;
311 if (ret) { 347
312 nv50_evo_channel_del(&dev_priv->evo); 348 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
313 return ret; 349 offset, 4096, NULL);
314 } 350 if (ret)
351 goto err;
352
353 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
354 0, dev_priv->vram_size, NULL);
355 if (ret)
356 goto err;
357
358 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
359 (dev_priv->chipset < 0xc0 ?
360 0x7a00 : 0xfe00),
361 0, dev_priv->vram_size, NULL);
362 if (ret)
363 goto err;
364
365 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
366 (dev_priv->chipset < 0xc0 ?
367 0x7000 : 0xfe00),
368 0, dev_priv->vram_size, NULL);
369 if (ret)
370 goto err;
371
372 for (j = 0; j < 4096; j += 4)
373 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
374 dispc->sem.offset = 0;
315 } 375 }
316 376
317 return 0; 377 return 0;
378
379err:
380 nv50_evo_destroy(dev);
381 return ret;
318} 382}
319 383
320int 384int
321nv50_evo_init(struct drm_device *dev) 385nv50_evo_init(struct drm_device *dev)
322{ 386{
323 struct drm_nouveau_private *dev_priv = dev->dev_private; 387 struct nv50_display *disp = nv50_display(dev);
324 int ret; 388 int ret, i;
325 389
326 if (!dev_priv->evo) { 390 if (!disp->master) {
327 ret = nv50_evo_create(dev); 391 ret = nv50_evo_create(dev);
328 if (ret) 392 if (ret)
329 return ret; 393 return ret;
330 } 394 }
331 395
332 return nv50_evo_channel_init(dev_priv->evo); 396 ret = nv50_evo_channel_init(disp->master);
397 if (ret)
398 return ret;
399
400 for (i = 0; i < 2; i++) {
401 ret = nv50_evo_channel_init(disp->crtc[i].sync);
402 if (ret)
403 return ret;
404 }
405
406 return 0;
333} 407}
334 408
335void 409void
336nv50_evo_fini(struct drm_device *dev) 410nv50_evo_fini(struct drm_device *dev)
337{ 411{
338 struct drm_nouveau_private *dev_priv = dev->dev_private; 412 struct nv50_display *disp = nv50_display(dev);
413 int i;
339 414
340 if (dev_priv->evo) { 415 for (i = 0; i < 2; i++) {
341 nv50_evo_channel_fini(dev_priv->evo); 416 if (disp->crtc[i].sync)
342 nv50_evo_channel_del(&dev_priv->evo); 417 nv50_evo_channel_fini(disp->crtc[i].sync);
343 } 418 }
419
420 if (disp->master)
421 nv50_evo_channel_fini(disp->master);
422
423 nv50_evo_destroy(dev);
344} 424}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aa4f0d3cea8e..3860ca62cb19 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -27,12 +27,6 @@
27#ifndef __NV50_EVO_H__ 27#ifndef __NV50_EVO_H__
28#define __NV50_EVO_H__ 28#define __NV50_EVO_H__
29 29
30int nv50_evo_init(struct drm_device *dev);
31void nv50_evo_fini(struct drm_device *dev);
32int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
33 u32 tile_flags, u32 magic_flags,
34 u32 offset, u32 limit);
35
36#define NV50_EVO_UPDATE 0x00000080 30#define NV50_EVO_UPDATE 0x00000080
37#define NV50_EVO_UNK84 0x00000084 31#define NV50_EVO_UNK84 0x00000084
38#define NV50_EVO_UNK84_NOTIFY 0x40000000 32#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
119/* Both of these are needed, otherwise nothing happens. */ 113/* Both of these are needed, otherwise nothing happens. */
120#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 114#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
121#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc 115#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
116#define NV50_EVO_CRTC_UNK900 0x00000900
117#define NV50_EVO_CRTC_UNK904 0x00000904
122 118
123#endif 119#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 50290dea0ac4..ed411d88451d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -8,31 +8,61 @@ struct nv50_fb_priv {
8 dma_addr_t r100c08; 8 dma_addr_t r100c08;
9}; 9};
10 10
11static void
12nv50_fb_destroy(struct drm_device *dev)
13{
14 struct drm_nouveau_private *dev_priv = dev->dev_private;
15 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
16 struct nv50_fb_priv *priv = pfb->priv;
17
18 if (drm_mm_initialized(&pfb->tag_heap))
19 drm_mm_takedown(&pfb->tag_heap);
20
21 if (priv->r100c08_page) {
22 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
23 PCI_DMA_BIDIRECTIONAL);
24 __free_page(priv->r100c08_page);
25 }
26
27 kfree(priv);
28 pfb->priv = NULL;
29}
30
11static int 31static int
12nv50_fb_create(struct drm_device *dev) 32nv50_fb_create(struct drm_device *dev)
13{ 33{
14 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
15 struct nv50_fb_priv *priv; 36 struct nv50_fb_priv *priv;
37 u32 tagmem;
38 int ret;
16 39
17 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 40 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
18 if (!priv) 41 if (!priv)
19 return -ENOMEM; 42 return -ENOMEM;
43 pfb->priv = priv;
20 44
21 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 45 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
22 if (!priv->r100c08_page) { 46 if (!priv->r100c08_page) {
23 kfree(priv); 47 nv50_fb_destroy(dev);
24 return -ENOMEM; 48 return -ENOMEM;
25 } 49 }
26 50
27 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, 51 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
28 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 52 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
29 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { 53 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
30 __free_page(priv->r100c08_page); 54 nv50_fb_destroy(dev);
31 kfree(priv);
32 return -EFAULT; 55 return -EFAULT;
33 } 56 }
34 57
35 dev_priv->engine.fb.priv = priv; 58 tagmem = nv_rd32(dev, 0x100320);
59 NV_DEBUG(dev, "%d tags available\n", tagmem);
60 ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
61 if (ret) {
62 nv50_fb_destroy(dev);
63 return ret;
64 }
65
36 return 0; 66 return 0;
37} 67}
38 68
@@ -81,18 +111,7 @@ nv50_fb_init(struct drm_device *dev)
81void 111void
82nv50_fb_takedown(struct drm_device *dev) 112nv50_fb_takedown(struct drm_device *dev)
83{ 113{
84 struct drm_nouveau_private *dev_priv = dev->dev_private; 114 nv50_fb_destroy(dev);
85 struct nv50_fb_priv *priv;
86
87 priv = dev_priv->engine.fb.priv;
88 if (!priv)
89 return;
90 dev_priv->engine.fb.priv = NULL;
91
92 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
93 PCI_DMA_BIDIRECTIONAL);
94 __free_page(priv->r100c08_page);
95 kfree(priv);
96} 115}
97 116
98void 117void
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 8dd04c5dac67..c34a074f7ea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
149 nv_wr32(dev, 0x3204, 0); 149 nv_wr32(dev, 0x3204, 0);
150 nv_wr32(dev, 0x3210, 0); 150 nv_wr32(dev, 0x3210, 0);
151 nv_wr32(dev, 0x3270, 0); 151 nv_wr32(dev, 0x3270, 0);
152 nv_wr32(dev, 0x2044, 0x01003fff);
152 153
153 /* Enable dummy channels setup by nv50_instmem.c */ 154 /* Enable dummy channels setup by nv50_instmem.c */
154 nv50_fifo_channel_enable(dev, 0); 155 nv50_fifo_channel_enable(dev, 0);
@@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
273 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 274 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
274 (4 << 24) /* SEARCH_FULL */ | 275 (4 << 24) /* SEARCH_FULL */ |
275 (chan->ramht->gpuobj->cinst >> 4)); 276 (chan->ramht->gpuobj->cinst >> 4));
276 nv_wo32(ramfc, 0x44, 0x2101ffff); 277 nv_wo32(ramfc, 0x44, 0x01003fff);
277 nv_wo32(ramfc, 0x60, 0x7fffffff); 278 nv_wo32(ramfc, 0x60, 0x7fffffff);
278 nv_wo32(ramfc, 0x40, 0x00000000); 279 nv_wo32(ramfc, 0x40, 0x00000000);
279 nv_wo32(ramfc, 0x7c, 0x30000001); 280 nv_wo32(ramfc, 0x7c, 0x30000001);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 6b149c0cc06d..d4f4206dad7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
137 struct nv50_gpio_priv *priv = pgpio->priv; 137 struct nv50_gpio_priv *priv = pgpio->priv;
138 struct nv50_gpio_handler *gpioh, *tmp; 138 struct nv50_gpio_handler *gpioh, *tmp;
139 struct dcb_gpio_entry *gpio; 139 struct dcb_gpio_entry *gpio;
140 LIST_HEAD(tofree);
140 unsigned long flags; 141 unsigned long flags;
141 142
142 gpio = nouveau_bios_gpio_entry(dev, tag); 143 gpio = nouveau_bios_gpio_entry(dev, tag);
@@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
149 gpioh->handler != handler || 150 gpioh->handler != handler ||
150 gpioh->data != data) 151 gpioh->data != data)
151 continue; 152 continue;
152 list_del(&gpioh->head); 153 list_move(&gpioh->head, &tofree);
153 kfree(gpioh);
154 } 154 }
155 spin_unlock_irqrestore(&priv->lock, flags); 155 spin_unlock_irqrestore(&priv->lock, flags);
156
157 list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
158 flush_work_sync(&gpioh->work);
159 kfree(gpioh);
160 }
156} 161}
157 162
158bool 163bool
@@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
205{ 210{
206 struct drm_nouveau_private *dev_priv = dev->dev_private; 211 struct drm_nouveau_private *dev_priv = dev->dev_private;
207 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 212 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
208 struct nv50_gpio_priv *priv;
209 int ret; 213 int ret;
210 214
211 if (!pgpio->priv) { 215 if (!pgpio->priv) {
@@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
213 if (ret) 217 if (ret)
214 return ret; 218 return ret;
215 } 219 }
216 priv = pgpio->priv;
217 220
218 /* disable, and ack any pending gpio interrupts */ 221 /* disable, and ack any pending gpio interrupts */
219 nv_wr32(dev, 0xe050, 0x00000000); 222 nv_wr32(dev, 0xe050, 0x00000000);
@@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
293 continue; 296 continue;
294 gpioh->inhibit = true; 297 gpioh->inhibit = true;
295 298
296 queue_work(dev_priv->wq, &gpioh->work); 299 schedule_work(&gpioh->work);
297 } 300 }
298 spin_unlock(&priv->lock); 301 spin_unlock(&priv->lock);
299} 302}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 37e21d2be95b..e1267a1f6d10 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -95,13 +95,41 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
95} 95}
96 96
97static void 97static void
98nv50_graph_init_regs(struct drm_device *dev) 98nv50_graph_init_zcull(struct drm_device *dev)
99{ 99{
100 struct drm_nouveau_private *dev_priv = dev->dev_private;
101 int i;
102
100 NV_DEBUG(dev, "\n"); 103 NV_DEBUG(dev, "\n");
101 104
102 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 105 switch (dev_priv->chipset & 0xf0) {
103 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */); 106 case 0x50:
104 nv_wr32(dev, 0x402ca8, 0x800); 107 case 0x80:
108 case 0x90:
109 nv_wr32(dev, 0x402ca8, 0x00000800);
110 break;
111 case 0xa0:
112 default:
113 nv_wr32(dev, 0x402cc0, 0x00000000);
114 if (dev_priv->chipset == 0xa0 ||
115 dev_priv->chipset == 0xaa ||
116 dev_priv->chipset == 0xac) {
117 nv_wr32(dev, 0x402ca8, 0x00000802);
118 } else {
119 nv_wr32(dev, 0x402cc0, 0x00000000);
120 nv_wr32(dev, 0x402ca8, 0x00000002);
121 }
122
123 break;
124 }
125
126 /* zero out zcull regions */
127 for (i = 0; i < 8; i++) {
128 nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
129 nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
130 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
131 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
132 }
105} 133}
106 134
107static int 135static int
@@ -136,6 +164,7 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
136 } 164 }
137 kfree(cp); 165 kfree(cp);
138 166
167 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
139 nv_wr32(dev, 0x400320, 4); 168 nv_wr32(dev, 0x400320, 4);
140 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); 169 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
141 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); 170 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
@@ -151,7 +180,7 @@ nv50_graph_init(struct drm_device *dev)
151 180
152 nv50_graph_init_reset(dev); 181 nv50_graph_init_reset(dev);
153 nv50_graph_init_regs__nv(dev); 182 nv50_graph_init_regs__nv(dev);
154 nv50_graph_init_regs(dev); 183 nv50_graph_init_zcull(dev);
155 184
156 ret = nv50_graph_init_ctxctl(dev); 185 ret = nv50_graph_init_ctxctl(dev);
157 if (ret) 186 if (ret)
@@ -409,12 +438,7 @@ static int
409nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, 438nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
410 u32 class, u32 mthd, u32 data) 439 u32 class, u32 mthd, u32 data)
411{ 440{
412 struct nouveau_page_flip_state s; 441 nouveau_finish_page_flip(chan, NULL);
413
414 if (!nouveau_finish_page_flip(chan, &s)) {
415 /* XXX - Do something here */
416 }
417
418 return 0; 442 return 0;
419} 443}
420 444
@@ -912,10 +936,10 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
912 printk("\n"); 936 printk("\n");
913 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" 937 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
914 " %08x %08x %08x\n", 938 " %08x %08x %08x\n",
915 nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), 939 nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
916 nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c), 940 nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
917 nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814), 941 nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
918 nv_rd32(dev, 0x40581c)); 942 nv_rd32(dev, 0x40501c));
919 943
920 } 944 }
921 945
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 300285ae8e9e..306d4b1f585f 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
300} 300}
301 301
302struct nv50_gpuobj_node { 302struct nv50_gpuobj_node {
303 struct nouveau_vram *vram; 303 struct nouveau_mem *vram;
304 struct nouveau_vma chan_vma; 304 struct nouveau_vma chan_vma;
305 u32 align; 305 u32 align;
306}; 306};
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b4a5ecb199f9..c25c59386420 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -41,8 +41,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
41{ 41{
42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
43 struct drm_device *dev = encoder->dev; 43 struct drm_device *dev = encoder->dev;
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 44 struct nouveau_channel *evo = nv50_display(dev)->master;
45 struct nouveau_channel *evo = dev_priv->evo;
46 int ret; 45 int ret;
47 46
48 if (!nv_encoder->crtc) 47 if (!nv_encoder->crtc)
@@ -184,8 +183,7 @@ static void
184nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 183nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
185 struct drm_display_mode *adjusted_mode) 184 struct drm_display_mode *adjusted_mode)
186{ 185{
187 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 186 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
188 struct nouveau_channel *evo = dev_priv->evo;
189 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 187 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
190 struct drm_device *dev = encoder->dev; 188 struct drm_device *dev = encoder->dev;
191 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 189 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..b23794c8859b 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -31,7 +31,6 @@ void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt[2]) 32 struct nouveau_gpuobj *pgt[2])
33{ 33{
34 struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
35 u64 phys = 0xdeadcafe00000000ULL; 34 u64 phys = 0xdeadcafe00000000ULL;
36 u32 coverage = 0; 35 u32 coverage = 0;
37 36
@@ -58,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
58} 57}
59 58
60static inline u64 59static inline u64
61nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 60nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
62 u64 phys, u32 memtype, u32 target)
63{ 61{
64 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; 62 struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
65 63
66 phys |= 1; /* present */ 64 phys |= 1; /* present */
67 phys |= (u64)memtype << 40; 65 phys |= (u64)memtype << 40;
@@ -85,12 +83,13 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
85 83
86void 84void
87nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 85nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
88 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) 86 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
89{ 87{
88 u32 comp = (mem->memtype & 0x180) >> 7;
90 u32 block; 89 u32 block;
91 int i; 90 int i;
92 91
93 phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); 92 phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
94 pte <<= 3; 93 pte <<= 3;
95 cnt <<= 3; 94 cnt <<= 3;
96 95
@@ -107,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
107 106
108 phys += block << (vma->node->type - 3); 107 phys += block << (vma->node->type - 3);
109 cnt -= block; 108 cnt -= block;
109 if (comp) {
110 u32 tag = mem->tag->start + ((delta >> 16) * comp);
111 offset_h |= (tag << 17);
112 delta += block << (vma->node->type - 3);
113 }
110 114
111 while (block) { 115 while (block) {
112 nv_wo32(pgt, pte + 0, offset_l); 116 nv_wo32(pgt, pte + 0, offset_l);
@@ -119,11 +123,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
119 123
120void 124void
121nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 125nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
122 u32 pte, dma_addr_t *list, u32 cnt) 126 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
123{ 127{
124 pte <<= 3; 128 pte <<= 3;
125 while (cnt--) { 129 while (cnt--) {
126 u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2); 130 u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
127 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 131 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
128 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 132 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
129 pte += 8; 133 pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad36347..ffbc3d8cf5be 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,49 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
48} 48}
49 49
50void 50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv; 56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this; 57 struct nouveau_mm_node *this;
58 struct nouveau_vram *vram; 58 struct nouveau_mem *mem;
59 59
60 vram = *pvram; 60 mem = *pmem;
61 *pvram = NULL; 61 *pmem = NULL;
62 if (unlikely(vram == NULL)) 62 if (unlikely(mem == NULL))
63 return; 63 return;
64 64
65 mutex_lock(&mm->mutex); 65 mutex_lock(&mm->mutex);
66 while (!list_empty(&vram->regions)) { 66 while (!list_empty(&mem->regions)) {
67 this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 67 this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
68 68
69 list_del(&this->rl_entry); 69 list_del(&this->rl_entry);
70 nouveau_mm_put(mm, this); 70 nouveau_mm_put(mm, this);
71 } 71 }
72
73 if (mem->tag) {
74 drm_mm_put_block(mem->tag);
75 mem->tag = NULL;
76 }
72 mutex_unlock(&mm->mutex); 77 mutex_unlock(&mm->mutex);
73 78
74 kfree(vram); 79 kfree(mem);
75} 80}
76 81
77int 82int
78nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, 83nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
79 u32 type, struct nouveau_vram **pvram) 84 u32 memtype, struct nouveau_mem **pmem)
80{ 85{
81 struct drm_nouveau_private *dev_priv = dev->dev_private; 86 struct drm_nouveau_private *dev_priv = dev->dev_private;
82 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 87 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
83 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 88 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
84 struct nouveau_mm *mm = man->priv; 89 struct nouveau_mm *mm = man->priv;
85 struct nouveau_mm_node *r; 90 struct nouveau_mm_node *r;
86 struct nouveau_vram *vram; 91 struct nouveau_mem *mem;
92 int comp = (memtype & 0x300) >> 8;
93 int type = (memtype & 0x07f);
87 int ret; 94 int ret;
88 95
89 if (!types[type]) 96 if (!types[type])
@@ -92,32 +99,46 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
92 align >>= 12; 99 align >>= 12;
93 size_nc >>= 12; 100 size_nc >>= 12;
94 101
95 vram = kzalloc(sizeof(*vram), GFP_KERNEL); 102 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
96 if (!vram) 103 if (!mem)
97 return -ENOMEM; 104 return -ENOMEM;
98 105
99 INIT_LIST_HEAD(&vram->regions);
100 vram->dev = dev_priv->dev;
101 vram->memtype = type;
102 vram->size = size;
103
104 mutex_lock(&mm->mutex); 106 mutex_lock(&mm->mutex);
107 if (comp) {
108 if (align == 16) {
109 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
110 int n = (size >> 4) * comp;
111
112 mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
113 if (mem->tag)
114 mem->tag = drm_mm_get_block(mem->tag, n, 0);
115 }
116
117 if (unlikely(!mem->tag))
118 comp = 0;
119 }
120
121 INIT_LIST_HEAD(&mem->regions);
122 mem->dev = dev_priv->dev;
123 mem->memtype = (comp << 7) | type;
124 mem->size = size;
125
105 do { 126 do {
106 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); 127 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
107 if (ret) { 128 if (ret) {
108 mutex_unlock(&mm->mutex); 129 mutex_unlock(&mm->mutex);
109 nv50_vram_del(dev, &vram); 130 nv50_vram_del(dev, &mem);
110 return ret; 131 return ret;
111 } 132 }
112 133
113 list_add_tail(&r->rl_entry, &vram->regions); 134 list_add_tail(&r->rl_entry, &mem->regions);
114 size -= r->length; 135 size -= r->length;
115 } while (size); 136 } while (size);
116 mutex_unlock(&mm->mutex); 137 mutex_unlock(&mm->mutex);
117 138
118 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 139 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
119 vram->offset = (u64)r->offset << 12; 140 mem->offset = (u64)r->offset << 12;
120 *pvram = vram; 141 *pmem = mem;
121 return 0; 142 return 0;
122} 143}
123 144
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index e6f92c541dba..2886f2726a9e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
116 116
117 /* allocate vram for control regs, map into polling area */ 117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, 118 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
119 0, 0, true, true, &fifoch->user); 119 0, 0, &fifoch->user);
120 if (ret) 120 if (ret)
121 goto error; 121 goto error;
122 122
@@ -418,6 +418,12 @@ nvc0_fifo_isr(struct drm_device *dev)
418{ 418{
419 u32 stat = nv_rd32(dev, 0x002100); 419 u32 stat = nv_rd32(dev, 0x002100);
420 420
421 if (stat & 0x00000100) {
422 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
423 nv_wr32(dev, 0x002100, 0x00000100);
424 stat &= ~0x00000100;
425 }
426
421 if (stat & 0x10000000) { 427 if (stat & 0x10000000) {
422 u32 units = nv_rd32(dev, 0x00259c); 428 u32 units = nv_rd32(dev, 0x00259c);
423 u32 u = units; 429 u32 u = units;
@@ -446,10 +452,15 @@ nvc0_fifo_isr(struct drm_device *dev)
446 stat &= ~0x20000000; 452 stat &= ~0x20000000;
447 } 453 }
448 454
455 if (stat & 0x40000000) {
456 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
457 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
458 stat &= ~0x40000000;
459 }
460
449 if (stat) { 461 if (stat) {
450 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat); 462 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
451 nv_wr32(dev, 0x002100, stat); 463 nv_wr32(dev, 0x002100, stat);
464 nv_wr32(dev, 0x002140, 0);
452 } 465 }
453
454 nv_wr32(dev, 0x2140, 0);
455} 466}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index eb18a7e89f5b..3de9b721d8db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -299,6 +299,14 @@ nvc0_graph_takedown(struct drm_device *dev)
299} 299}
300 300
301static int 301static int
302nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
303 u32 class, u32 mthd, u32 data)
304{
305 nouveau_finish_page_flip(chan, NULL);
306 return 0;
307}
308
309static int
302nvc0_graph_create(struct drm_device *dev) 310nvc0_graph_create(struct drm_device *dev)
303{ 311{
304 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -395,6 +403,7 @@ nvc0_graph_create(struct drm_device *dev)
395 nouveau_irq_register(dev, 25, nvc0_runk140_isr); 403 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
396 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 404 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
397 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 405 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
406 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
398 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 407 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
399 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ 408 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
400 return 0; 409 return 0;
@@ -640,7 +649,6 @@ nvc0_graph_init(struct drm_device *dev)
640{ 649{
641 struct drm_nouveau_private *dev_priv = dev->dev_private; 650 struct drm_nouveau_private *dev_priv = dev->dev_private;
642 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 651 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
643 struct nvc0_graph_priv *priv;
644 int ret; 652 int ret;
645 653
646 dev_priv->engine.graph.accel_blocked = true; 654 dev_priv->engine.graph.accel_blocked = true;
@@ -665,7 +673,6 @@ nvc0_graph_init(struct drm_device *dev)
665 if (ret) 673 if (ret)
666 return ret; 674 return ret;
667 } 675 }
668 priv = pgraph->priv;
669 676
670 nvc0_graph_init_obj418880(dev); 677 nvc0_graph_init_obj418880(dev);
671 nvc0_graph_init_regs(dev); 678 nvc0_graph_init_regs(dev);
@@ -730,9 +737,12 @@ nvc0_graph_isr(struct drm_device *dev)
730 u32 class = nv_rd32(dev, 0x404200 + (subc * 4)); 737 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
731 738
732 if (stat & 0x00000010) { 739 if (stat & 0x00000010) {
733 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d " 740 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
734 "class 0x%04x mthd 0x%04x data 0x%08x\n", 741 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
735 chid, inst, subc, class, mthd, data); 742 "subc %d class 0x%04x mthd 0x%04x "
743 "data 0x%08x\n",
744 chid, inst, subc, class, mthd, data);
745 }
736 nv_wr32(dev, 0x400100, 0x00000010); 746 nv_wr32(dev, 0x400100, 0x00000010);
737 stat &= ~0x00000010; 747 stat &= ~0x00000010;
738 } 748 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5b..69af0ba7edd3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
59 59
60void 60void
61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
62 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) 62 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
63{ 63{
64 u32 next = 1 << (vma->node->type - 8); 64 u32 next = 1 << (vma->node->type - 8);
65 65
@@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
75 75
76void 76void
77nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 77nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
78 u32 pte, dma_addr_t *list, u32 cnt) 78 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
79{ 79{
80 pte <<= 3; 80 pte <<= 3;
81 while (cnt--) { 81 while (cnt--) {
82 u64 phys = nvc0_vm_addr(vma, *list++, 0, 5); 82 u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
83 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 83 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
84 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 84 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
85 pte += 8; 85 pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 858eda5dedd1..67c6ec6f34ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -26,64 +26,78 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_mm.h" 27#include "nouveau_mm.h"
28 28
29/* 0 = unsupported
30 * 1 = non-compressed
31 * 3 = compressed
32 */
33static const u8 types[256] = {
34 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
35 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
37 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
38 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
50};
51
29bool 52bool
30nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags) 53nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
31{ 54{
32 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { 55 u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
33 case 0x0000: 56 return likely((types[memtype] == 1));
34 case 0xfe00:
35 case 0xdb00:
36 case 0x1100:
37 return true;
38 default:
39 break;
40 }
41
42 return false;
43} 57}
44 58
45int 59int
46nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, 60nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
47 u32 type, struct nouveau_vram **pvram) 61 u32 type, struct nouveau_mem **pmem)
48{ 62{
49 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 64 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
51 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 65 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
52 struct nouveau_mm *mm = man->priv; 66 struct nouveau_mm *mm = man->priv;
53 struct nouveau_mm_node *r; 67 struct nouveau_mm_node *r;
54 struct nouveau_vram *vram; 68 struct nouveau_mem *mem;
55 int ret; 69 int ret;
56 70
57 size >>= 12; 71 size >>= 12;
58 align >>= 12; 72 align >>= 12;
59 ncmin >>= 12; 73 ncmin >>= 12;
60 74
61 vram = kzalloc(sizeof(*vram), GFP_KERNEL); 75 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
62 if (!vram) 76 if (!mem)
63 return -ENOMEM; 77 return -ENOMEM;
64 78
65 INIT_LIST_HEAD(&vram->regions); 79 INIT_LIST_HEAD(&mem->regions);
66 vram->dev = dev_priv->dev; 80 mem->dev = dev_priv->dev;
67 vram->memtype = type; 81 mem->memtype = (type & 0xff);
68 vram->size = size; 82 mem->size = size;
69 83
70 mutex_lock(&mm->mutex); 84 mutex_lock(&mm->mutex);
71 do { 85 do {
72 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r); 86 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
73 if (ret) { 87 if (ret) {
74 mutex_unlock(&mm->mutex); 88 mutex_unlock(&mm->mutex);
75 nv50_vram_del(dev, &vram); 89 nv50_vram_del(dev, &mem);
76 return ret; 90 return ret;
77 } 91 }
78 92
79 list_add_tail(&r->rl_entry, &vram->regions); 93 list_add_tail(&r->rl_entry, &mem->regions);
80 size -= r->length; 94 size -= r->length;
81 } while (size); 95 } while (size);
82 mutex_unlock(&mm->mutex); 96 mutex_unlock(&mm->mutex);
83 97
84 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 98 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
85 vram->offset = (u64)r->offset << 12; 99 mem->offset = (u64)r->offset << 12;
86 *pvram = vram; 100 *pmem = mem;
87 return 0; 101 return 0;
88} 102}
89 103
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index e2cfe80f6fca..5edd3a76fffa 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -94,6 +94,7 @@ struct drm_nouveau_setparam {
94#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 94#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
95#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 95#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
96 96
97#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
97#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 98#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
98#define NOUVEAU_GEM_TILE_16BPP 0x00000001 99#define NOUVEAU_GEM_TILE_16BPP 0x00000001
99#define NOUVEAU_GEM_TILE_32BPP 0x00000002 100#define NOUVEAU_GEM_TILE_32BPP 0x00000002