diff options
author | Dave Airlie <airlied@redhat.com> | 2011-01-16 21:20:31 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-01-16 21:20:31 -0500 |
commit | 51fda92223f5b668cccffd51bf3d7bedc93609ff (patch) | |
tree | 326cb5d43889b55fad0b76cba9621a5c107b602f /drivers/gpu/drm/nouveau | |
parent | e78bf5e6cbe837daa6ab628a5f679548742994d3 (diff) | |
parent | f01a9720cb149e76155dc6e0e051058450305f4f (diff) |
Merge remote branch 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next into drm-fixes
* 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next:
drm/nouveau: fix gpu page faults triggered by plymouthd
drm/nouveau: greatly simplify mm, killing some bugs in the process
drm/nvc0: enable protection of system-use-only structures in vm
drm/nv40: initialise 0x17xx on all chipsets that have it
drm/nv40: make detection of 0x4097-ful chipsets available everywhere
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mm.c | 182 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mm.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_grctx.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_mc.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_graph.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_vm.c | 4 |
11 files changed, 86 insertions, 197 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 46e32573b3a3..01bffc4412d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -160,6 +160,7 @@ enum nouveau_flags { | |||
160 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 160 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
161 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | 161 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) |
162 | #define NVOBJ_FLAG_VM (1 << 3) | 162 | #define NVOBJ_FLAG_VM (1 << 3) |
163 | #define NVOBJ_FLAG_VM_USER (1 << 4) | ||
163 | 164 | ||
164 | #define NVOBJ_CINST_GLOBAL 0xdeadbeef | 165 | #define NVOBJ_CINST_GLOBAL 0xdeadbeef |
165 | 166 | ||
@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device, | |||
1576 | dev->pdev->subsystem_device == sub_device; | 1577 | dev->pdev->subsystem_device == sub_device; |
1577 | } | 1578 | } |
1578 | 1579 | ||
1580 | /* returns 1 if device is one of the nv4x using the 0x4497 object class, | ||
1581 | * helpful to determine a number of other hardware features | ||
1582 | */ | ||
1583 | static inline int | ||
1584 | nv44_graph_class(struct drm_device *dev) | ||
1585 | { | ||
1586 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1587 | |||
1588 | if ((dev_priv->chipset & 0xf0) == 0x60) | ||
1589 | return 1; | ||
1590 | |||
1591 | return !(0x0baf & (1 << (dev_priv->chipset & 0x0f))); | ||
1592 | } | ||
1593 | |||
1579 | /* memory type/access flags, do not match hardware values */ | 1594 | /* memory type/access flags, do not match hardware values */ |
1580 | #define NV_MEM_ACCESS_RO 1 | 1595 | #define NV_MEM_ACCESS_RO 1 |
1581 | #define NV_MEM_ACCESS_WO 2 | 1596 | #define NV_MEM_ACCESS_WO 2 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 6d56a54b6e2e..60769d2f9a66 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
352 | FBINFO_HWACCEL_IMAGEBLIT; | 352 | FBINFO_HWACCEL_IMAGEBLIT; |
353 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; | 353 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; |
354 | info->fbops = &nouveau_fbcon_sw_ops; | 354 | info->fbops = &nouveau_fbcon_sw_ops; |
355 | info->fix.smem_start = dev->mode_config.fb_base + | 355 | info->fix.smem_start = nvbo->bo.mem.bus.base + |
356 | (nvbo->bo.mem.start << PAGE_SHIFT); | 356 | nvbo->bo.mem.bus.offset; |
357 | info->fix.smem_len = size; | 357 | info->fix.smem_len = size; |
358 | 358 | ||
359 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); | 359 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 69044eb104bb..26347b7cd872 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | |||
742 | { | 742 | { |
743 | struct nouveau_mm *mm = man->priv; | 743 | struct nouveau_mm *mm = man->priv; |
744 | struct nouveau_mm_node *r; | 744 | struct nouveau_mm_node *r; |
745 | u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; | 745 | u32 total = 0, free = 0; |
746 | int i; | ||
747 | 746 | ||
748 | mutex_lock(&mm->mutex); | 747 | mutex_lock(&mm->mutex); |
749 | list_for_each_entry(r, &mm->nodes, nl_entry) { | 748 | list_for_each_entry(r, &mm->nodes, nl_entry) { |
750 | printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", | 749 | printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", |
751 | prefix, r->free ? "free" : "used", r->type, | 750 | prefix, r->type, ((u64)r->offset << 12), |
752 | ((u64)r->offset << 12), | ||
753 | (((u64)r->offset + r->length) << 12)); | 751 | (((u64)r->offset + r->length) << 12)); |
752 | |||
754 | total += r->length; | 753 | total += r->length; |
755 | ttotal[r->type] += r->length; | 754 | if (!r->type) |
756 | if (r->free) | 755 | free += r->length; |
757 | tfree[r->type] += r->length; | ||
758 | else | ||
759 | tused[r->type] += r->length; | ||
760 | } | 756 | } |
761 | mutex_unlock(&mm->mutex); | 757 | mutex_unlock(&mm->mutex); |
762 | 758 | ||
763 | printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); | 759 | printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", |
764 | for (i = 0; i < 3; i++) { | 760 | prefix, (u64)total << 12, (u64)free << 12); |
765 | printk(KERN_DEBUG "%s type %d: 0x%010llx, " | 761 | printk(KERN_DEBUG "%s block: 0x%08x\n", |
766 | "used 0x%010llx, free 0x%010llx\n", prefix, | 762 | prefix, mm->block_size << 12); |
767 | i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); | ||
768 | } | ||
769 | } | 763 | } |
770 | 764 | ||
771 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | 765 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index cdbb11eb701b..8844b50c3e54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | |||
48 | 48 | ||
49 | b->offset = a->offset; | 49 | b->offset = a->offset; |
50 | b->length = size; | 50 | b->length = size; |
51 | b->free = a->free; | ||
52 | b->type = a->type; | 51 | b->type = a->type; |
53 | a->offset += size; | 52 | a->offset += size; |
54 | a->length -= size; | 53 | a->length -= size; |
55 | list_add_tail(&b->nl_entry, &a->nl_entry); | 54 | list_add_tail(&b->nl_entry, &a->nl_entry); |
56 | if (b->free) | 55 | if (b->type == 0) |
57 | list_add_tail(&b->fl_entry, &a->fl_entry); | 56 | list_add_tail(&b->fl_entry, &a->fl_entry); |
58 | return b; | 57 | return b; |
59 | } | 58 | } |
60 | 59 | ||
61 | static struct nouveau_mm_node * | 60 | #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ |
62 | nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | 61 | list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) |
63 | { | ||
64 | struct nouveau_mm_node *prev, *next; | ||
65 | |||
66 | /* try to merge with free adjacent entries of same type */ | ||
67 | prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); | ||
68 | if (this->nl_entry.prev != &rmm->nodes) { | ||
69 | if (prev->free && prev->type == this->type) { | ||
70 | prev->length += this->length; | ||
71 | region_put(rmm, this); | ||
72 | this = prev; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | ||
77 | if (this->nl_entry.next != &rmm->nodes) { | ||
78 | if (next->free && next->type == this->type) { | ||
79 | next->offset = this->offset; | ||
80 | next->length += this->length; | ||
81 | region_put(rmm, this); | ||
82 | this = next; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | return this; | ||
87 | } | ||
88 | 62 | ||
89 | void | 63 | void |
90 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | 64 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) |
91 | { | 65 | { |
92 | u32 block_s, block_l; | 66 | struct nouveau_mm_node *prev = node(this, prev); |
67 | struct nouveau_mm_node *next = node(this, next); | ||
93 | 68 | ||
94 | this->free = true; | ||
95 | list_add(&this->fl_entry, &rmm->free); | 69 | list_add(&this->fl_entry, &rmm->free); |
96 | this = nouveau_mm_merge(rmm, this); | 70 | this->type = 0; |
97 | |||
98 | /* any entirely free blocks now? we'll want to remove typing | ||
99 | * on them now so they can be use for any memory allocation | ||
100 | */ | ||
101 | block_s = roundup(this->offset, rmm->block_size); | ||
102 | if (block_s + rmm->block_size > this->offset + this->length) | ||
103 | return; | ||
104 | 71 | ||
105 | /* split off any still-typed region at the start */ | 72 | if (prev && prev->type == 0) { |
106 | if (block_s != this->offset) { | 73 | prev->length += this->length; |
107 | if (!region_split(rmm, this, block_s - this->offset)) | 74 | region_put(rmm, this); |
108 | return; | 75 | this = prev; |
109 | } | 76 | } |
110 | 77 | ||
111 | /* split off the soon-to-be-untyped block(s) */ | 78 | if (next && next->type == 0) { |
112 | block_l = rounddown(this->length, rmm->block_size); | 79 | next->offset = this->offset; |
113 | if (block_l != this->length) { | 80 | next->length += this->length; |
114 | this = region_split(rmm, this, block_l); | 81 | region_put(rmm, this); |
115 | if (!this) | ||
116 | return; | ||
117 | } | 82 | } |
118 | |||
119 | /* mark as having no type, and retry merge with any adjacent | ||
120 | * untyped blocks | ||
121 | */ | ||
122 | this->type = 0; | ||
123 | nouveau_mm_merge(rmm, this); | ||
124 | } | 83 | } |
125 | 84 | ||
126 | int | 85 | int |
127 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | 86 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, |
128 | u32 align, struct nouveau_mm_node **pnode) | 87 | u32 align, struct nouveau_mm_node **pnode) |
129 | { | 88 | { |
130 | struct nouveau_mm_node *this, *tmp, *next; | 89 | struct nouveau_mm_node *prev, *this, *next; |
131 | u32 splitoff, avail, alloc; | 90 | u32 min = size_nc ? size_nc : size; |
132 | 91 | u32 align_mask = align - 1; | |
133 | list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { | 92 | u32 splitoff; |
134 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | 93 | u32 s, e; |
135 | if (this->nl_entry.next == &rmm->nodes) | 94 | |
136 | next = NULL; | 95 | list_for_each_entry(this, &rmm->free, fl_entry) { |
137 | 96 | e = this->offset + this->length; | |
138 | /* skip wrongly typed blocks */ | 97 | s = this->offset; |
139 | if (this->type && this->type != type) | 98 | |
99 | prev = node(this, prev); | ||
100 | if (prev && prev->type != type) | ||
101 | s = roundup(s, rmm->block_size); | ||
102 | |||
103 | next = node(this, next); | ||
104 | if (next && next->type != type) | ||
105 | e = rounddown(e, rmm->block_size); | ||
106 | |||
107 | s = (s + align_mask) & ~align_mask; | ||
108 | e &= ~align_mask; | ||
109 | if (s > e || e - s < min) | ||
140 | continue; | 110 | continue; |
141 | 111 | ||
142 | /* account for alignment */ | 112 | splitoff = s - this->offset; |
143 | splitoff = this->offset & (align - 1); | 113 | if (splitoff && !region_split(rmm, this, splitoff)) |
144 | if (splitoff) | 114 | return -ENOMEM; |
145 | splitoff = align - splitoff; | ||
146 | |||
147 | if (this->length <= splitoff) | ||
148 | continue; | ||
149 | |||
150 | /* determine total memory available from this, and | ||
151 | * the next block (if appropriate) | ||
152 | */ | ||
153 | avail = this->length; | ||
154 | if (next && next->free && (!next->type || next->type == type)) | ||
155 | avail += next->length; | ||
156 | |||
157 | avail -= splitoff; | ||
158 | |||
159 | /* determine allocation size */ | ||
160 | if (size_nc) { | ||
161 | alloc = min(avail, size); | ||
162 | alloc = rounddown(alloc, size_nc); | ||
163 | if (alloc == 0) | ||
164 | continue; | ||
165 | } else { | ||
166 | alloc = size; | ||
167 | if (avail < alloc) | ||
168 | continue; | ||
169 | } | ||
170 | |||
171 | /* untyped block, split off a chunk that's a multiple | ||
172 | * of block_size and type it | ||
173 | */ | ||
174 | if (!this->type) { | ||
175 | u32 block = roundup(alloc + splitoff, rmm->block_size); | ||
176 | if (this->length < block) | ||
177 | continue; | ||
178 | |||
179 | this = region_split(rmm, this, block); | ||
180 | if (!this) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | this->type = type; | ||
184 | } | ||
185 | |||
186 | /* stealing memory from adjacent block */ | ||
187 | if (alloc > this->length) { | ||
188 | u32 amount = alloc - (this->length - splitoff); | ||
189 | |||
190 | if (!next->type) { | ||
191 | amount = roundup(amount, rmm->block_size); | ||
192 | |||
193 | next = region_split(rmm, next, amount); | ||
194 | if (!next) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | next->type = type; | ||
198 | } | ||
199 | |||
200 | this->length += amount; | ||
201 | next->offset += amount; | ||
202 | next->length -= amount; | ||
203 | if (!next->length) { | ||
204 | list_del(&next->nl_entry); | ||
205 | list_del(&next->fl_entry); | ||
206 | kfree(next); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | if (splitoff) { | ||
211 | if (!region_split(rmm, this, splitoff)) | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | 115 | ||
215 | this = region_split(rmm, this, alloc); | 116 | this = region_split(rmm, this, min(size, e - s)); |
216 | if (this == NULL) | 117 | if (!this) |
217 | return -ENOMEM; | 118 | return -ENOMEM; |
218 | 119 | ||
219 | this->free = false; | 120 | this->type = type; |
220 | list_del(&this->fl_entry); | 121 | list_del(&this->fl_entry); |
221 | *pnode = this; | 122 | *pnode = this; |
222 | return 0; | 123 | return 0; |
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) | |||
234 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); | 135 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); |
235 | if (!heap) | 136 | if (!heap) |
236 | return -ENOMEM; | 137 | return -ENOMEM; |
237 | heap->free = true; | ||
238 | heap->offset = roundup(offset, block); | 138 | heap->offset = roundup(offset, block); |
239 | heap->length = rounddown(offset + length, block) - heap->offset; | 139 | heap->length = rounddown(offset + length, block) - heap->offset; |
240 | 140 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index af3844933036..798eaf39691c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h | |||
@@ -30,9 +30,7 @@ struct nouveau_mm_node { | |||
30 | struct list_head fl_entry; | 30 | struct list_head fl_entry; |
31 | struct list_head rl_entry; | 31 | struct list_head rl_entry; |
32 | 32 | ||
33 | bool free; | 33 | u8 type; |
34 | int type; | ||
35 | |||
36 | u32 offset; | 34 | u32 offset; |
37 | u32 length; | 35 | u32 length; |
38 | }; | 36 | }; |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 19ef92a0375a..8870d72388c8 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev) | |||
451 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | 451 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ |
452 | 452 | ||
453 | /* curie */ | 453 | /* curie */ |
454 | if (dev_priv->chipset >= 0x60 || | 454 | if (nv44_graph_class(dev)) |
455 | 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) | ||
456 | NVOBJ_CLASS(dev, 0x4497, GR); | 455 | NVOBJ_CLASS(dev, 0x4497, GR); |
457 | else | 456 | else |
458 | NVOBJ_CLASS(dev, 0x4097, GR); | 457 | NVOBJ_CLASS(dev, 0x4097, GR); |
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c index ce585093264e..f70447d131d7 100644 --- a/drivers/gpu/drm/nouveau/nv40_grctx.c +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c | |||
@@ -118,17 +118,6 @@ | |||
118 | */ | 118 | */ |
119 | 119 | ||
120 | static int | 120 | static int |
121 | nv40_graph_4097(struct drm_device *dev) | ||
122 | { | ||
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
124 | |||
125 | if ((dev_priv->chipset & 0xf0) == 0x60) | ||
126 | return 0; | ||
127 | |||
128 | return !!(0x0baf & (1 << dev_priv->chipset)); | ||
129 | } | ||
130 | |||
131 | static int | ||
132 | nv40_graph_vs_count(struct drm_device *dev) | 121 | nv40_graph_vs_count(struct drm_device *dev) |
133 | { | 122 | { |
134 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 123 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) | |||
219 | gr_def(ctx, 0x4009dc, 0x80000000); | 208 | gr_def(ctx, 0x4009dc, 0x80000000); |
220 | } else { | 209 | } else { |
221 | cp_ctx(ctx, 0x400840, 20); | 210 | cp_ctx(ctx, 0x400840, 20); |
222 | if (!nv40_graph_4097(ctx->dev)) { | 211 | if (nv44_graph_class(ctx->dev)) { |
223 | for (i = 0; i < 8; i++) | 212 | for (i = 0; i < 8; i++) |
224 | gr_def(ctx, 0x400860 + (i * 4), 0x00000001); | 213 | gr_def(ctx, 0x400860 + (i * 4), 0x00000001); |
225 | } | 214 | } |
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) | |||
228 | gr_def(ctx, 0x400888, 0x00000040); | 217 | gr_def(ctx, 0x400888, 0x00000040); |
229 | cp_ctx(ctx, 0x400894, 11); | 218 | cp_ctx(ctx, 0x400894, 11); |
230 | gr_def(ctx, 0x400894, 0x00000040); | 219 | gr_def(ctx, 0x400894, 0x00000040); |
231 | if (nv40_graph_4097(ctx->dev)) { | 220 | if (!nv44_graph_class(ctx->dev)) { |
232 | for (i = 0; i < 8; i++) | 221 | for (i = 0; i < 8; i++) |
233 | gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); | 222 | gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); |
234 | } | 223 | } |
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) | |||
546 | static void | 535 | static void |
547 | nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) | 536 | nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) |
548 | { | 537 | { |
549 | int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; | 538 | int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; |
550 | 539 | ||
551 | cp_out (ctx, 0x300000); | 540 | cp_out (ctx, 0x300000); |
552 | cp_lsr (ctx, len - 4); | 541 | cp_lsr (ctx, len - 4); |
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) | |||
582 | } else { | 571 | } else { |
583 | b0_offset = 0x1d40/4; /* 2200 */ | 572 | b0_offset = 0x1d40/4; /* 2200 */ |
584 | b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ | 573 | b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ |
585 | vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; | 574 | vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; |
586 | } | 575 | } |
587 | 576 | ||
588 | cp_lsr(ctx, vs_len * vs_nr + 0x300/4); | 577 | cp_lsr(ctx, vs_len * vs_nr + 0x300/4); |
589 | cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); | 578 | cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); |
590 | 579 | ||
591 | offset = ctx->ctxvals_pos; | 580 | offset = ctx->ctxvals_pos; |
592 | ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); | 581 | ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); |
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c index e4e72c12ab6a..03c0d4c3f355 100644 --- a/drivers/gpu/drm/nouveau/nv40_mc.c +++ b/drivers/gpu/drm/nouveau/nv40_mc.c | |||
@@ -6,27 +6,17 @@ | |||
6 | int | 6 | int |
7 | nv40_mc_init(struct drm_device *dev) | 7 | nv40_mc_init(struct drm_device *dev) |
8 | { | 8 | { |
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | uint32_t tmp; | ||
11 | |||
12 | /* Power up everything, resetting each individual unit will | 9 | /* Power up everything, resetting each individual unit will |
13 | * be done later if needed. | 10 | * be done later if needed. |
14 | */ | 11 | */ |
15 | nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); | 12 | nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); |
16 | 13 | ||
17 | switch (dev_priv->chipset) { | 14 | if (nv44_graph_class(dev)) { |
18 | case 0x44: | 15 | u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); |
19 | case 0x46: /* G72 */ | ||
20 | case 0x4e: | ||
21 | case 0x4c: /* C51_G7X */ | ||
22 | tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
23 | nv_wr32(dev, NV40_PMC_1700, tmp); | 16 | nv_wr32(dev, NV40_PMC_1700, tmp); |
24 | nv_wr32(dev, NV40_PMC_1704, 0); | 17 | nv_wr32(dev, NV40_PMC_1704, 0); |
25 | nv_wr32(dev, NV40_PMC_1708, 0); | 18 | nv_wr32(dev, NV40_PMC_1708, 0); |
26 | nv_wr32(dev, NV40_PMC_170C, tmp); | 19 | nv_wr32(dev, NV40_PMC_170C, tmp); |
27 | break; | ||
28 | default: | ||
29 | break; | ||
30 | } | 20 | } |
31 | 21 | ||
32 | return 0; | 22 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 2e1b1cd19a4b..ea0041810ae3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) | |||
332 | gpuobj->vinst = node->vram->offset; | 332 | gpuobj->vinst = node->vram->offset; |
333 | 333 | ||
334 | if (gpuobj->flags & NVOBJ_FLAG_VM) { | 334 | if (gpuobj->flags & NVOBJ_FLAG_VM) { |
335 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, | 335 | u32 flags = NV_MEM_ACCESS_RW; |
336 | NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, | 336 | if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) |
337 | flags |= NV_MEM_ACCESS_SYS; | ||
338 | |||
339 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, | ||
337 | &node->chan_vma); | 340 | &node->chan_vma); |
338 | if (ret) { | 341 | if (ret) { |
339 | vram->put(dev, &node->vram); | 342 | vram->put(dev, &node->vram); |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 5feacd5d5fa4..e6ea7d83187f 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
105 | if (ret) | 105 | if (ret) |
106 | return ret; | 106 | return ret; |
107 | 107 | ||
108 | ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, | 108 | ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, |
109 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, | ||
109 | &grch->unk418810); | 110 | &grch->unk418810); |
110 | if (ret) | 111 | if (ret) |
111 | return ret; | 112 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index 4b9251bb0ff4..e4e83c2caf5b 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) | |||
48 | phys >>= 8; | 48 | phys >>= 8; |
49 | 49 | ||
50 | phys |= 0x00000001; /* present */ | 50 | phys |= 0x00000001; /* present */ |
51 | // if (vma->access & NV_MEM_ACCESS_SYS) | 51 | if (vma->access & NV_MEM_ACCESS_SYS) |
52 | // phys |= 0x00000002; | 52 | phys |= 0x00000002; |
53 | 53 | ||
54 | phys |= ((u64)target << 32); | 54 | phys |= ((u64)target << 32); |
55 | phys |= ((u64)memtype << 36); | 55 | phys |= ((u64)memtype << 36); |