aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorFrancisco Jerez <currojerez@riseup.net>2009-12-11 10:51:09 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-01-10 18:03:34 -0500
commita0af9add499cf29a16707b7b148842992077f4cb (patch)
tree1ca7dcbff4e328d9cd3182424b621ae4ba28c55c /drivers/gpu/drm
parentcb00f7c1419d09ee0daf7d8bed86f253d98db7b4 (diff)
drm/nouveau: Make the MM aware of pre-G80 tiling.
This commit has also the following 3 bugfix commits squashed into it from the nouveau git tree: drm/nouveau: Fix up the tiling alignment restrictions for nv1x. drm/nouveau: Fix up the nv2x tiling alignment restrictions. drm/nv50: fix align typo for g9x Signed-off-by: Francisco Jerez <currojerez@riseup.net>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c221
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c87
3 files changed, 265 insertions, 65 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0cad6d834eb..1d6036fabd5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -37,6 +37,7 @@ static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 37nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{ 38{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40 struct drm_device *dev = dev_priv->dev;
40 struct nouveau_bo *nvbo = nouveau_bo(bo); 41 struct nouveau_bo *nvbo = nouveau_bo(bo);
41 42
42 ttm_bo_kunmap(&nvbo->kmap); 43 ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +45,83 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 if (unlikely(nvbo->gem)) 45 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo); 46 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46 47
48 if (nvbo->tile)
49 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
50
47 spin_lock(&dev_priv->ttm.bo_list_lock); 51 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head); 52 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock); 53 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo); 54 kfree(nvbo);
51} 55}
52 56
57static void
58nouveau_bo_fixup_align(struct drm_device *dev,
59 uint32_t tile_mode, uint32_t tile_flags,
60 int *align, int *size)
61{
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63
64 /*
65 * Some of the tile_flags have a periodic structure of N*4096 bytes,
66 * align to to that as well as the page size. Overallocate memory to
67 * avoid corruption of other buffer objects.
68 */
69 if (dev_priv->card_type == NV_50) {
70 switch (tile_flags) {
71 case 0x1800:
72 case 0x2800:
73 case 0x4800:
74 case 0x7a00:
75 if (dev_priv->chipset >= 0xA0) {
76 /* This is based on high end cards with 448 bits
77 * memory bus, could be different elsewhere.*/
78 *size += 6 * 28672;
79 /* 8 * 28672 is the actual alignment requirement
80 * but we must also align to page size. */
81 *align = 2 * 8 * 28672;
82 } else if (dev_priv->chipset >= 0x90) {
83 *size += 3 * 16384;
84 *align = 12 * 16384;
85 } else {
86 *size += 3 * 8192;
87 /* 12 * 8192 is the actual alignment requirement
88 * but we must also align to page size. */
89 *align = 2 * 12 * 8192;
90 }
91 break;
92 default:
93 break;
94 }
95
96 } else {
97 if (tile_mode) {
98 if (dev_priv->chipset >= 0x40) {
99 *align = 65536;
100 *size = roundup(*size, 64 * tile_mode);
101
102 } else if (dev_priv->chipset >= 0x30) {
103 *align = 32768;
104 *size = roundup(*size, 64 * tile_mode);
105
106 } else if (dev_priv->chipset >= 0x20) {
107 *align = 16384;
108 *size = roundup(*size, 64 * tile_mode);
109
110 } else if (dev_priv->chipset >= 0x10) {
111 *align = 16384;
112 *size = roundup(*size, 32 * tile_mode);
113 }
114 }
115 }
116
117 *size = ALIGN(*size, PAGE_SIZE);
118
119 if (dev_priv->card_type == NV_50) {
120 *size = ALIGN(*size, 65536);
121 *align = max(65536, *align);
122 }
123}
124
53int 125int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 126nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode, 127 int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -70,46 +142,9 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
70 nvbo->tile_mode = tile_mode; 142 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags; 143 nvbo->tile_flags = tile_flags;
72 144
73 /* 145 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT; 146 align >>= PAGE_SHIFT;
105 147
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM) 148 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; 149 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT) 150 if (flags & TTM_PL_FLAG_TT)
@@ -421,6 +456,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
421/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access 456/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
422 * TTM_PL_{VRAM,TT} directly. 457 * TTM_PL_{VRAM,TT} directly.
423 */ 458 */
459
424static int 460static int
425nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 461nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
426 struct nouveau_bo *nvbo, bool evict, bool no_wait, 462 struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -455,11 +491,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
455} 491}
456 492
457static int 493static int
458nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, 494nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
459 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 495 int no_wait, struct ttm_mem_reg *new_mem)
460{ 496{
461 struct nouveau_bo *nvbo = nouveau_bo(bo); 497 struct nouveau_bo *nvbo = nouveau_bo(bo);
462 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 498 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
499 struct ttm_mem_reg *old_mem = &bo->mem;
463 struct nouveau_channel *chan; 500 struct nouveau_channel *chan;
464 uint64_t src_offset, dst_offset; 501 uint64_t src_offset, dst_offset;
465 uint32_t page_count; 502 uint32_t page_count;
@@ -559,7 +596,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
559 if (ret) 596 if (ret)
560 goto out; 597 goto out;
561 598
562 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); 599 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
563 if (ret) 600 if (ret)
564 goto out; 601 goto out;
565 602
@@ -597,7 +634,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
597 if (ret) 634 if (ret)
598 goto out; 635 goto out;
599 636
600 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); 637 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, new_mem);
601 if (ret) 638 if (ret)
602 goto out; 639 goto out;
603 640
@@ -612,52 +649,106 @@ out:
612} 649}
613 650
614static int 651static int
615nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 652nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
616 bool no_wait, struct ttm_mem_reg *new_mem) 653 struct nouveau_tile_reg **new_tile)
617{ 654{
618 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 655 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
619 struct nouveau_bo *nvbo = nouveau_bo(bo);
620 struct drm_device *dev = dev_priv->dev; 656 struct drm_device *dev = dev_priv->dev;
621 struct ttm_mem_reg *old_mem = &bo->mem; 657 struct nouveau_bo *nvbo = nouveau_bo(bo);
658 uint64_t offset;
622 int ret; 659 int ret;
623 660
624 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && 661 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
625 !nvbo->no_vm) { 662 /* Nothing to do. */
626 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; 663 *new_tile = NULL;
664 return 0;
665 }
666
667 offset = new_mem->mm_node->start << PAGE_SHIFT;
627 668
669 if (dev_priv->card_type == NV_50) {
628 ret = nv50_mem_vm_bind_linear(dev, 670 ret = nv50_mem_vm_bind_linear(dev,
629 offset + dev_priv->vm_vram_base, 671 offset + dev_priv->vm_vram_base,
630 new_mem->size, nvbo->tile_flags, 672 new_mem->size, nvbo->tile_flags,
631 offset); 673 offset);
632 if (ret) 674 if (ret)
633 return ret; 675 return ret;
676
677 } else if (dev_priv->card_type >= NV_10) {
678 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
679 nvbo->tile_mode);
634 } 680 }
635 681
682 return 0;
683}
684
685static void
686nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
687 struct nouveau_tile_reg *new_tile,
688 struct nouveau_tile_reg **old_tile)
689{
690 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
691 struct drm_device *dev = dev_priv->dev;
692
693 if (dev_priv->card_type >= NV_10 &&
694 dev_priv->card_type < NV_50) {
695 if (*old_tile)
696 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
697
698 *old_tile = new_tile;
699 }
700}
701
702static int
703nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
704 bool no_wait, struct ttm_mem_reg *new_mem)
705{
706 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
707 struct nouveau_bo *nvbo = nouveau_bo(bo);
708 struct ttm_mem_reg *old_mem = &bo->mem;
709 struct nouveau_tile_reg *new_tile = NULL;
710 int ret = 0;
711
712 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
713 if (ret)
714 return ret;
715
716 /* Software copy if the card isn't up and running yet. */
636 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 717 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
637 !dev_priv->channel) 718 !dev_priv->channel) {
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 719 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
720 goto out;
721 }
639 722
723 /* Fake bo copy. */
640 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 724 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
641 BUG_ON(bo->mem.mm_node != NULL); 725 BUG_ON(bo->mem.mm_node != NULL);
642 bo->mem = *new_mem; 726 bo->mem = *new_mem;
643 new_mem->mm_node = NULL; 727 new_mem->mm_node = NULL;
644 return 0; 728 goto out;
645 } 729 }
646 730
647 if (new_mem->mem_type == TTM_PL_SYSTEM) { 731 /* Hardware assisted copy. */
648 if (old_mem->mem_type == TTM_PL_SYSTEM) 732 if (new_mem->mem_type == TTM_PL_SYSTEM)
649 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 733 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
650 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) 734 else if (old_mem->mem_type == TTM_PL_SYSTEM)
651 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 735 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
652 } else if (old_mem->mem_type == TTM_PL_SYSTEM) { 736 else
653 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) 737 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
654 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
655 } else {
656 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
657 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
658 }
659 738
660 return 0; 739 if (!ret)
740 goto out;
741
742 /* Fallback to software copy. */
743 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
744
745out:
746 if (ret)
747 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
748 else
749 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
750
751 return ret;
661} 752}
662 753
663static int 754static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 446a92ad2ee..9c9815bf505 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -59,11 +59,19 @@ struct nouveau_grctx;
59#define MAX_NUM_DCB_ENTRIES 16 59#define MAX_NUM_DCB_ENTRIES 16
60 60
61#define NOUVEAU_MAX_CHANNEL_NR 128 61#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15
62 63
63#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) 64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
64#define NV50_VM_BLOCK (512*1024*1024ULL) 65#define NV50_VM_BLOCK (512*1024*1024ULL)
65#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
66 67
68struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used;
73};
74
67struct nouveau_bo { 75struct nouveau_bo {
68 struct ttm_buffer_object bo; 76 struct ttm_buffer_object bo;
69 struct ttm_placement placement; 77 struct ttm_placement placement;
@@ -83,6 +91,7 @@ struct nouveau_bo {
83 91
84 uint32_t tile_mode; 92 uint32_t tile_mode;
85 uint32_t tile_flags; 93 uint32_t tile_flags;
94 struct nouveau_tile_reg *tile;
86 95
87 struct drm_gem_object *gem; 96 struct drm_gem_object *gem;
88 struct drm_file *cpu_filp; 97 struct drm_file *cpu_filp;
@@ -558,6 +567,12 @@ struct drm_nouveau_private {
558 unsigned long sg_handle; 567 unsigned long sg_handle;
559 } gart_info; 568 } gart_info;
560 569
570 /* nv10-nv40 tiling regions */
571 struct {
572 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
573 spinlock_t lock;
574 } tile;
575
561 /* G8x/G9x virtual address space */ 576 /* G8x/G9x virtual address space */
562 uint64_t vm_gart_base; 577 uint64_t vm_gart_base;
563 uint64_t vm_gart_size; 578 uint64_t vm_gart_size;
@@ -695,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
695extern int nouveau_mem_init(struct drm_device *); 710extern int nouveau_mem_init(struct drm_device *);
696extern int nouveau_mem_init_agp(struct drm_device *); 711extern int nouveau_mem_init_agp(struct drm_device *);
697extern void nouveau_mem_close(struct drm_device *); 712extern void nouveau_mem_close(struct drm_device *);
713extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
714 uint32_t addr,
715 uint32_t size,
716 uint32_t pitch);
717extern void nv10_mem_expire_tiling(struct drm_device *dev,
718 struct nouveau_tile_reg *tile,
719 struct nouveau_fence *fence);
698extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 720extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
699 uint32_t size, uint32_t flags, 721 uint32_t size, uint32_t flags,
700 uint64_t phys); 722 uint64_t phys);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5158a12f784..fb9bdd6edf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
192} 192}
193 193
194/* 194/*
195 * NV10-NV40 tiling helpers
196 */
197
198static void
199nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
207
208 tile->addr = addr;
209 tile->size = size;
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
212
213 if (!pfifo->cache_flush(dev))
214 return;
215
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
219
220 nouveau_wait_for_idle(dev);
221
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
224
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
227}
228
229struct nouveau_tile_reg *
230nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
231 uint32_t pitch)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
236 int i;
237
238 spin_lock(&dev_priv->tile.lock);
239
240 for (i = 0; i < pfb->num_tiles; i++) {
241 if (tile[i].used)
242 /* Tile region in use. */
243 continue;
244
245 if (tile[i].fence &&
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
248 continue;
249
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
254
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
258 found = &tile[i];
259 }
260 }
261
262 spin_unlock(&dev_priv->tile.lock);
263
264 return found;
265}
266
267void
268nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
270{
271 if (fence) {
272 /* Mark it as pending. */
273 tile->fence = fence;
274 nouveau_fence_ref(fence);
275 }
276
277 tile->used = false;
278}
279
280/*
195 * NV50 VM helpers 281 * NV50 VM helpers
196 */ 282 */
197int 283int
@@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev)
513 599
514 INIT_LIST_HEAD(&dev_priv->ttm.bo_list); 600 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
515 spin_lock_init(&dev_priv->ttm.bo_list_lock); 601 spin_lock_init(&dev_priv->ttm.bo_list_lock);
602 spin_lock_init(&dev_priv->tile.lock);
516 603
517 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 604 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
518 605