diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/gpu | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/gpu')
88 files changed, 37494 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c new file mode 100644 index 00000000000..f7eba0a0973 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/i2c.h> | ||
30 | #include "drm_dp_helper.h" | ||
31 | #include "drmP.h" | ||
32 | |||
33 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | ||
34 | static int | ||
35 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | ||
36 | uint8_t write_byte, uint8_t *read_byte) | ||
37 | { | ||
38 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
39 | int ret; | ||
40 | |||
41 | ret = (*algo_data->aux_ch)(adapter, mode, | ||
42 | write_byte, read_byte); | ||
43 | return ret; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * I2C over AUX CH | ||
48 | */ | ||
49 | |||
50 | /* | ||
51 | * Send the address. If the I2C link is running, this 'restarts' | ||
52 | * the connection with the new address, this is used for doing | ||
53 | * a write followed by a read (as needed for DDC) | ||
54 | */ | ||
55 | static int | ||
56 | i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) | ||
57 | { | ||
58 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
59 | int mode = MODE_I2C_START; | ||
60 | int ret; | ||
61 | |||
62 | if (reading) | ||
63 | mode |= MODE_I2C_READ; | ||
64 | else | ||
65 | mode |= MODE_I2C_WRITE; | ||
66 | algo_data->address = address; | ||
67 | algo_data->running = true; | ||
68 | ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Stop the I2C transaction. This closes out the link, sending | ||
74 | * a bare address packet with the MOT bit turned off | ||
75 | */ | ||
76 | static void | ||
77 | i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) | ||
78 | { | ||
79 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
80 | int mode = MODE_I2C_STOP; | ||
81 | |||
82 | if (reading) | ||
83 | mode |= MODE_I2C_READ; | ||
84 | else | ||
85 | mode |= MODE_I2C_WRITE; | ||
86 | if (algo_data->running) { | ||
87 | (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
88 | algo_data->running = false; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Write a single byte to the current I2C address, the | ||
94 | * the I2C link must be running or this returns -EIO | ||
95 | */ | ||
96 | static int | ||
97 | i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) | ||
98 | { | ||
99 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
100 | int ret; | ||
101 | |||
102 | if (!algo_data->running) | ||
103 | return -EIO; | ||
104 | |||
105 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Read a single byte from the current I2C address, the | ||
111 | * I2C link must be running or this returns -EIO | ||
112 | */ | ||
113 | static int | ||
114 | i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) | ||
115 | { | ||
116 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
117 | int ret; | ||
118 | |||
119 | if (!algo_data->running) | ||
120 | return -EIO; | ||
121 | |||
122 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int | ||
127 | i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, | ||
128 | struct i2c_msg *msgs, | ||
129 | int num) | ||
130 | { | ||
131 | int ret = 0; | ||
132 | bool reading = false; | ||
133 | int m; | ||
134 | int b; | ||
135 | |||
136 | for (m = 0; m < num; m++) { | ||
137 | u16 len = msgs[m].len; | ||
138 | u8 *buf = msgs[m].buf; | ||
139 | reading = (msgs[m].flags & I2C_M_RD) != 0; | ||
140 | ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); | ||
141 | if (ret < 0) | ||
142 | break; | ||
143 | if (reading) { | ||
144 | for (b = 0; b < len; b++) { | ||
145 | ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); | ||
146 | if (ret < 0) | ||
147 | break; | ||
148 | } | ||
149 | } else { | ||
150 | for (b = 0; b < len; b++) { | ||
151 | ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); | ||
152 | if (ret < 0) | ||
153 | break; | ||
154 | } | ||
155 | } | ||
156 | if (ret < 0) | ||
157 | break; | ||
158 | } | ||
159 | if (ret >= 0) | ||
160 | ret = num; | ||
161 | i2c_algo_dp_aux_stop(adapter, reading); | ||
162 | DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret); | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static u32 | ||
167 | i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) | ||
168 | { | ||
169 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
170 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | ||
171 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | | ||
172 | I2C_FUNC_10BIT_ADDR; | ||
173 | } | ||
174 | |||
175 | static const struct i2c_algorithm i2c_dp_aux_algo = { | ||
176 | .master_xfer = i2c_algo_dp_aux_xfer, | ||
177 | .functionality = i2c_algo_dp_aux_functionality, | ||
178 | }; | ||
179 | |||
180 | static void | ||
181 | i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) | ||
182 | { | ||
183 | (void) i2c_algo_dp_aux_address(adapter, 0, false); | ||
184 | (void) i2c_algo_dp_aux_stop(adapter, false); | ||
185 | |||
186 | } | ||
187 | |||
188 | static int | ||
189 | i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) | ||
190 | { | ||
191 | adapter->algo = &i2c_dp_aux_algo; | ||
192 | adapter->retries = 3; | ||
193 | i2c_dp_aux_reset_bus(adapter); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | int | ||
198 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | ||
199 | { | ||
200 | int error; | ||
201 | |||
202 | error = i2c_dp_aux_prepare_bus(adapter); | ||
203 | if (error) | ||
204 | return error; | ||
205 | error = i2c_add_adapter(adapter); | ||
206 | return error; | ||
207 | } | ||
208 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); | ||
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c new file mode 100644 index 00000000000..34664587a74 --- /dev/null +++ b/drivers/gpu/drm/drm_sman.c | |||
@@ -0,0 +1,350 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
18 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
19 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
20 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * The above copyright notice and this permission notice (including the | ||
23 | * next paragraph) shall be included in all copies or substantial portions | ||
24 | * of the Software. | ||
25 | * | ||
26 | * | ||
27 | **************************************************************************/ | ||
28 | /* | ||
29 | * Simple memory manager interface that keeps track on allocate regions on a | ||
30 | * per "owner" basis. All regions associated with an "owner" can be released | ||
31 | * with a simple call. Typically if the "owner" exists. The owner is any | ||
32 | * "unsigned long" identifier. Can typically be a pointer to a file private | ||
33 | * struct or a context identifier. | ||
34 | * | ||
35 | * Authors: | ||
36 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | ||
37 | */ | ||
38 | |||
39 | #include "drm_sman.h" | ||
40 | |||
41 | struct drm_owner_item { | ||
42 | struct drm_hash_item owner_hash; | ||
43 | struct list_head sman_list; | ||
44 | struct list_head mem_blocks; | ||
45 | }; | ||
46 | |||
47 | void drm_sman_takedown(struct drm_sman * sman) | ||
48 | { | ||
49 | drm_ht_remove(&sman->user_hash_tab); | ||
50 | drm_ht_remove(&sman->owner_hash_tab); | ||
51 | kfree(sman->mm); | ||
52 | } | ||
53 | |||
54 | EXPORT_SYMBOL(drm_sman_takedown); | ||
55 | |||
56 | int | ||
57 | drm_sman_init(struct drm_sman * sman, unsigned int num_managers, | ||
58 | unsigned int user_order, unsigned int owner_order) | ||
59 | { | ||
60 | int ret = 0; | ||
61 | |||
62 | sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL); | ||
63 | if (!sman->mm) { | ||
64 | ret = -ENOMEM; | ||
65 | goto out; | ||
66 | } | ||
67 | sman->num_managers = num_managers; | ||
68 | INIT_LIST_HEAD(&sman->owner_items); | ||
69 | ret = drm_ht_create(&sman->owner_hash_tab, owner_order); | ||
70 | if (ret) | ||
71 | goto out1; | ||
72 | ret = drm_ht_create(&sman->user_hash_tab, user_order); | ||
73 | if (!ret) | ||
74 | goto out; | ||
75 | |||
76 | drm_ht_remove(&sman->owner_hash_tab); | ||
77 | out1: | ||
78 | kfree(sman->mm); | ||
79 | out: | ||
80 | return ret; | ||
81 | } | ||
82 | |||
83 | EXPORT_SYMBOL(drm_sman_init); | ||
84 | |||
85 | static void *drm_sman_mm_allocate(void *private, unsigned long size, | ||
86 | unsigned alignment) | ||
87 | { | ||
88 | struct drm_mm *mm = (struct drm_mm *) private; | ||
89 | struct drm_mm_node *tmp; | ||
90 | |||
91 | tmp = drm_mm_search_free(mm, size, alignment, 1); | ||
92 | if (!tmp) { | ||
93 | return NULL; | ||
94 | } | ||
95 | tmp = drm_mm_get_block(tmp, size, alignment); | ||
96 | return tmp; | ||
97 | } | ||
98 | |||
99 | static void drm_sman_mm_free(void *private, void *ref) | ||
100 | { | ||
101 | struct drm_mm_node *node = (struct drm_mm_node *) ref; | ||
102 | |||
103 | drm_mm_put_block(node); | ||
104 | } | ||
105 | |||
106 | static void drm_sman_mm_destroy(void *private) | ||
107 | { | ||
108 | struct drm_mm *mm = (struct drm_mm *) private; | ||
109 | drm_mm_takedown(mm); | ||
110 | kfree(mm); | ||
111 | } | ||
112 | |||
113 | static unsigned long drm_sman_mm_offset(void *private, void *ref) | ||
114 | { | ||
115 | struct drm_mm_node *node = (struct drm_mm_node *) ref; | ||
116 | return node->start; | ||
117 | } | ||
118 | |||
119 | int | ||
120 | drm_sman_set_range(struct drm_sman * sman, unsigned int manager, | ||
121 | unsigned long start, unsigned long size) | ||
122 | { | ||
123 | struct drm_sman_mm *sman_mm; | ||
124 | struct drm_mm *mm; | ||
125 | int ret; | ||
126 | |||
127 | BUG_ON(manager >= sman->num_managers); | ||
128 | |||
129 | sman_mm = &sman->mm[manager]; | ||
130 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | ||
131 | if (!mm) { | ||
132 | return -ENOMEM; | ||
133 | } | ||
134 | sman_mm->private = mm; | ||
135 | ret = drm_mm_init(mm, start, size); | ||
136 | |||
137 | if (ret) { | ||
138 | kfree(mm); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | sman_mm->allocate = drm_sman_mm_allocate; | ||
143 | sman_mm->free = drm_sman_mm_free; | ||
144 | sman_mm->destroy = drm_sman_mm_destroy; | ||
145 | sman_mm->offset = drm_sman_mm_offset; | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | EXPORT_SYMBOL(drm_sman_set_range); | ||
151 | |||
152 | int | ||
153 | drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, | ||
154 | struct drm_sman_mm * allocator) | ||
155 | { | ||
156 | BUG_ON(manager >= sman->num_managers); | ||
157 | sman->mm[manager] = *allocator; | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | EXPORT_SYMBOL(drm_sman_set_manager); | ||
162 | |||
163 | static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, | ||
164 | unsigned long owner) | ||
165 | { | ||
166 | int ret; | ||
167 | struct drm_hash_item *owner_hash_item; | ||
168 | struct drm_owner_item *owner_item; | ||
169 | |||
170 | ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); | ||
171 | if (!ret) { | ||
172 | return drm_hash_entry(owner_hash_item, struct drm_owner_item, | ||
173 | owner_hash); | ||
174 | } | ||
175 | |||
176 | owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); | ||
177 | if (!owner_item) | ||
178 | goto out; | ||
179 | |||
180 | INIT_LIST_HEAD(&owner_item->mem_blocks); | ||
181 | owner_item->owner_hash.key = owner; | ||
182 | if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) | ||
183 | goto out1; | ||
184 | |||
185 | list_add_tail(&owner_item->sman_list, &sman->owner_items); | ||
186 | return owner_item; | ||
187 | |||
188 | out1: | ||
189 | kfree(owner_item); | ||
190 | out: | ||
191 | return NULL; | ||
192 | } | ||
193 | |||
194 | struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, | ||
195 | unsigned long size, unsigned alignment, | ||
196 | unsigned long owner) | ||
197 | { | ||
198 | void *tmp; | ||
199 | struct drm_sman_mm *sman_mm; | ||
200 | struct drm_owner_item *owner_item; | ||
201 | struct drm_memblock_item *memblock; | ||
202 | |||
203 | BUG_ON(manager >= sman->num_managers); | ||
204 | |||
205 | sman_mm = &sman->mm[manager]; | ||
206 | tmp = sman_mm->allocate(sman_mm->private, size, alignment); | ||
207 | |||
208 | if (!tmp) { | ||
209 | return NULL; | ||
210 | } | ||
211 | |||
212 | memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); | ||
213 | |||
214 | if (!memblock) | ||
215 | goto out; | ||
216 | |||
217 | memblock->mm_info = tmp; | ||
218 | memblock->mm = sman_mm; | ||
219 | memblock->sman = sman; | ||
220 | |||
221 | if (drm_ht_just_insert_please | ||
222 | (&sman->user_hash_tab, &memblock->user_hash, | ||
223 | (unsigned long)memblock, 32, 0, 0)) | ||
224 | goto out1; | ||
225 | |||
226 | owner_item = drm_sman_get_owner_item(sman, owner); | ||
227 | if (!owner_item) | ||
228 | goto out2; | ||
229 | |||
230 | list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); | ||
231 | |||
232 | return memblock; | ||
233 | |||
234 | out2: | ||
235 | drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); | ||
236 | out1: | ||
237 | kfree(memblock); | ||
238 | out: | ||
239 | sman_mm->free(sman_mm->private, tmp); | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | EXPORT_SYMBOL(drm_sman_alloc); | ||
245 | |||
246 | static void drm_sman_free(struct drm_memblock_item *item) | ||
247 | { | ||
248 | struct drm_sman *sman = item->sman; | ||
249 | |||
250 | list_del(&item->owner_list); | ||
251 | drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); | ||
252 | item->mm->free(item->mm->private, item->mm_info); | ||
253 | kfree(item); | ||
254 | } | ||
255 | |||
256 | int drm_sman_free_key(struct drm_sman *sman, unsigned int key) | ||
257 | { | ||
258 | struct drm_hash_item *hash_item; | ||
259 | struct drm_memblock_item *memblock_item; | ||
260 | |||
261 | if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) | ||
262 | return -EINVAL; | ||
263 | |||
264 | memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, | ||
265 | user_hash); | ||
266 | drm_sman_free(memblock_item); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | EXPORT_SYMBOL(drm_sman_free_key); | ||
271 | |||
272 | static void drm_sman_remove_owner(struct drm_sman *sman, | ||
273 | struct drm_owner_item *owner_item) | ||
274 | { | ||
275 | list_del(&owner_item->sman_list); | ||
276 | drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); | ||
277 | kfree(owner_item); | ||
278 | } | ||
279 | |||
280 | int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) | ||
281 | { | ||
282 | |||
283 | struct drm_hash_item *hash_item; | ||
284 | struct drm_owner_item *owner_item; | ||
285 | |||
286 | if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { | ||
287 | return -1; | ||
288 | } | ||
289 | |||
290 | owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); | ||
291 | if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { | ||
292 | drm_sman_remove_owner(sman, owner_item); | ||
293 | return -1; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | EXPORT_SYMBOL(drm_sman_owner_clean); | ||
300 | |||
301 | static void drm_sman_do_owner_cleanup(struct drm_sman *sman, | ||
302 | struct drm_owner_item *owner_item) | ||
303 | { | ||
304 | struct drm_memblock_item *entry, *next; | ||
305 | |||
306 | list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, | ||
307 | owner_list) { | ||
308 | drm_sman_free(entry); | ||
309 | } | ||
310 | drm_sman_remove_owner(sman, owner_item); | ||
311 | } | ||
312 | |||
313 | void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) | ||
314 | { | ||
315 | |||
316 | struct drm_hash_item *hash_item; | ||
317 | struct drm_owner_item *owner_item; | ||
318 | |||
319 | if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { | ||
320 | |||
321 | return; | ||
322 | } | ||
323 | |||
324 | owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); | ||
325 | drm_sman_do_owner_cleanup(sman, owner_item); | ||
326 | } | ||
327 | |||
328 | EXPORT_SYMBOL(drm_sman_owner_cleanup); | ||
329 | |||
330 | void drm_sman_cleanup(struct drm_sman *sman) | ||
331 | { | ||
332 | struct drm_owner_item *entry, *next; | ||
333 | unsigned int i; | ||
334 | struct drm_sman_mm *sman_mm; | ||
335 | |||
336 | list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { | ||
337 | drm_sman_do_owner_cleanup(sman, entry); | ||
338 | } | ||
339 | if (sman->mm) { | ||
340 | for (i = 0; i < sman->num_managers; ++i) { | ||
341 | sman_mm = &sman->mm[i]; | ||
342 | if (sman_mm->private) { | ||
343 | sman_mm->destroy(sman_mm->private); | ||
344 | sman_mm->private = NULL; | ||
345 | } | ||
346 | } | ||
347 | } | ||
348 | } | ||
349 | |||
350 | EXPORT_SYMBOL(drm_sman_cleanup); | ||
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c new file mode 100644 index 00000000000..83b7b81bb2b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mem.c | |||
@@ -0,0 +1,387 @@ | |||
1 | /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- | ||
2 | */ | ||
3 | /* | ||
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | ||
5 | * All Rights Reserved. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | * copy of this software and associated documentation files (the | ||
9 | * "Software"), to deal in the Software without restriction, including | ||
10 | * without limitation the rights to use, copy, modify, merge, publish, | ||
11 | * distribute, sub license, and/or sell copies of the Software, and to | ||
12 | * permit persons to whom the Software is furnished to do so, subject to | ||
13 | * the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the | ||
16 | * next paragraph) shall be included in all copies or substantial portions | ||
17 | * of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | ||
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drm.h" | ||
32 | #include "i915_drv.h" | ||
33 | |||
34 | /* This memory manager is integrated into the global/local lru | ||
35 | * mechanisms used by the clients. Specifically, it operates by | ||
36 | * setting the 'in_use' fields of the global LRU to indicate whether | ||
37 | * this region is privately allocated to a client. | ||
38 | * | ||
39 | * This does require the client to actually respect that field. | ||
40 | * | ||
41 | * Currently no effort is made to allocate 'private' memory in any | ||
42 | * clever way - the LRU information isn't used to determine which | ||
43 | * block to allocate, and the ring is drained prior to allocations -- | ||
44 | * in other words allocation is expensive. | ||
45 | */ | ||
46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) | ||
47 | { | ||
48 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
49 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
50 | drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
51 | struct drm_tex_region *list; | ||
52 | unsigned shift, nr; | ||
53 | unsigned start; | ||
54 | unsigned end; | ||
55 | unsigned i; | ||
56 | int age; | ||
57 | |||
58 | shift = dev_priv->tex_lru_log_granularity; | ||
59 | nr = I915_NR_TEX_REGIONS; | ||
60 | |||
61 | start = p->start >> shift; | ||
62 | end = (p->start + p->size - 1) >> shift; | ||
63 | |||
64 | age = ++sarea_priv->texAge; | ||
65 | list = sarea_priv->texList; | ||
66 | |||
67 | /* Mark the regions with the new flag and update their age. Move | ||
68 | * them to head of list to preserve LRU semantics. | ||
69 | */ | ||
70 | for (i = start; i <= end; i++) { | ||
71 | list[i].in_use = in_use; | ||
72 | list[i].age = age; | ||
73 | |||
74 | /* remove_from_list(i) | ||
75 | */ | ||
76 | list[(unsigned)list[i].next].prev = list[i].prev; | ||
77 | list[(unsigned)list[i].prev].next = list[i].next; | ||
78 | |||
79 | /* insert_at_head(list, i) | ||
80 | */ | ||
81 | list[i].prev = nr; | ||
82 | list[i].next = list[nr].next; | ||
83 | list[(unsigned)list[nr].next].prev = i; | ||
84 | list[nr].next = i; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Very simple allocator for agp memory, working on a static range | ||
89 | * already mapped into each client's address space. | ||
90 | */ | ||
91 | |||
92 | static struct mem_block *split_block(struct mem_block *p, int start, int size, | ||
93 | struct drm_file *file_priv) | ||
94 | { | ||
95 | /* Maybe cut off the start of an existing block */ | ||
96 | if (start > p->start) { | ||
97 | struct mem_block *newblock = kmalloc(sizeof(*newblock), | ||
98 | GFP_KERNEL); | ||
99 | if (!newblock) | ||
100 | goto out; | ||
101 | newblock->start = start; | ||
102 | newblock->size = p->size - (start - p->start); | ||
103 | newblock->file_priv = NULL; | ||
104 | newblock->next = p->next; | ||
105 | newblock->prev = p; | ||
106 | p->next->prev = newblock; | ||
107 | p->next = newblock; | ||
108 | p->size -= newblock->size; | ||
109 | p = newblock; | ||
110 | } | ||
111 | |||
112 | /* Maybe cut off the end of an existing block */ | ||
113 | if (size < p->size) { | ||
114 | struct mem_block *newblock = kmalloc(sizeof(*newblock), | ||
115 | GFP_KERNEL); | ||
116 | if (!newblock) | ||
117 | goto out; | ||
118 | newblock->start = start + size; | ||
119 | newblock->size = p->size - size; | ||
120 | newblock->file_priv = NULL; | ||
121 | newblock->next = p->next; | ||
122 | newblock->prev = p; | ||
123 | p->next->prev = newblock; | ||
124 | p->next = newblock; | ||
125 | p->size = size; | ||
126 | } | ||
127 | |||
128 | out: | ||
129 | /* Our block is in the middle */ | ||
130 | p->file_priv = file_priv; | ||
131 | return p; | ||
132 | } | ||
133 | |||
134 | static struct mem_block *alloc_block(struct mem_block *heap, int size, | ||
135 | int align2, struct drm_file *file_priv) | ||
136 | { | ||
137 | struct mem_block *p; | ||
138 | int mask = (1 << align2) - 1; | ||
139 | |||
140 | for (p = heap->next; p != heap; p = p->next) { | ||
141 | int start = (p->start + mask) & ~mask; | ||
142 | if (p->file_priv == NULL && start + size <= p->start + p->size) | ||
143 | return split_block(p, start, size, file_priv); | ||
144 | } | ||
145 | |||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | static struct mem_block *find_block(struct mem_block *heap, int start) | ||
150 | { | ||
151 | struct mem_block *p; | ||
152 | |||
153 | for (p = heap->next; p != heap; p = p->next) | ||
154 | if (p->start == start) | ||
155 | return p; | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | static void free_block(struct mem_block *p) | ||
161 | { | ||
162 | p->file_priv = NULL; | ||
163 | |||
164 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
165 | * 'heap' to stop it being subsumed. | ||
166 | */ | ||
167 | if (p->next->file_priv == NULL) { | ||
168 | struct mem_block *q = p->next; | ||
169 | p->size += q->size; | ||
170 | p->next = q->next; | ||
171 | p->next->prev = p; | ||
172 | kfree(q); | ||
173 | } | ||
174 | |||
175 | if (p->prev->file_priv == NULL) { | ||
176 | struct mem_block *q = p->prev; | ||
177 | q->size += p->size; | ||
178 | q->next = p->next; | ||
179 | q->next->prev = q; | ||
180 | kfree(p); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | /* Initialize. How to check for an uninitialized heap? | ||
185 | */ | ||
186 | static int init_heap(struct mem_block **heap, int start, int size) | ||
187 | { | ||
188 | struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); | ||
189 | |||
190 | if (!blocks) | ||
191 | return -ENOMEM; | ||
192 | |||
193 | *heap = kmalloc(sizeof(**heap), GFP_KERNEL); | ||
194 | if (!*heap) { | ||
195 | kfree(blocks); | ||
196 | return -ENOMEM; | ||
197 | } | ||
198 | |||
199 | blocks->start = start; | ||
200 | blocks->size = size; | ||
201 | blocks->file_priv = NULL; | ||
202 | blocks->next = blocks->prev = *heap; | ||
203 | |||
204 | memset(*heap, 0, sizeof(**heap)); | ||
205 | (*heap)->file_priv = (struct drm_file *) - 1; | ||
206 | (*heap)->next = (*heap)->prev = blocks; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* Free all blocks associated with the releasing file. | ||
211 | */ | ||
212 | void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, | ||
213 | struct mem_block *heap) | ||
214 | { | ||
215 | struct mem_block *p; | ||
216 | |||
217 | if (!heap || !heap->next) | ||
218 | return; | ||
219 | |||
220 | for (p = heap->next; p != heap; p = p->next) { | ||
221 | if (p->file_priv == file_priv) { | ||
222 | p->file_priv = NULL; | ||
223 | mark_block(dev, p, 0); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
228 | * 'heap' to stop it being subsumed. | ||
229 | */ | ||
230 | for (p = heap->next; p != heap; p = p->next) { | ||
231 | while (p->file_priv == NULL && p->next->file_priv == NULL) { | ||
232 | struct mem_block *q = p->next; | ||
233 | p->size += q->size; | ||
234 | p->next = q->next; | ||
235 | p->next->prev = p; | ||
236 | kfree(q); | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* Shutdown. | ||
242 | */ | ||
243 | void i915_mem_takedown(struct mem_block **heap) | ||
244 | { | ||
245 | struct mem_block *p; | ||
246 | |||
247 | if (!*heap) | ||
248 | return; | ||
249 | |||
250 | for (p = (*heap)->next; p != *heap;) { | ||
251 | struct mem_block *q = p; | ||
252 | p = p->next; | ||
253 | kfree(q); | ||
254 | } | ||
255 | |||
256 | kfree(*heap); | ||
257 | *heap = NULL; | ||
258 | } | ||
259 | |||
260 | static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) | ||
261 | { | ||
262 | switch (region) { | ||
263 | case I915_MEM_REGION_AGP: | ||
264 | return &dev_priv->agp_heap; | ||
265 | default: | ||
266 | return NULL; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | /* IOCTL HANDLERS */ | ||
271 | |||
272 | int i915_mem_alloc(struct drm_device *dev, void *data, | ||
273 | struct drm_file *file_priv) | ||
274 | { | ||
275 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
276 | drm_i915_mem_alloc_t *alloc = data; | ||
277 | struct mem_block *block, **heap; | ||
278 | |||
279 | if (!dev_priv) { | ||
280 | DRM_ERROR("called with no initialization\n"); | ||
281 | return -EINVAL; | ||
282 | } | ||
283 | |||
284 | heap = get_heap(dev_priv, alloc->region); | ||
285 | if (!heap || !*heap) | ||
286 | return -EFAULT; | ||
287 | |||
288 | /* Make things easier on ourselves: all allocations at least | ||
289 | * 4k aligned. | ||
290 | */ | ||
291 | if (alloc->alignment < 12) | ||
292 | alloc->alignment = 12; | ||
293 | |||
294 | block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); | ||
295 | |||
296 | if (!block) | ||
297 | return -ENOMEM; | ||
298 | |||
299 | mark_block(dev, block, 1); | ||
300 | |||
301 | if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, | ||
302 | sizeof(int))) { | ||
303 | DRM_ERROR("copy_to_user\n"); | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | int i915_mem_free(struct drm_device *dev, void *data, | ||
311 | struct drm_file *file_priv) | ||
312 | { | ||
313 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
314 | drm_i915_mem_free_t *memfree = data; | ||
315 | struct mem_block *block, **heap; | ||
316 | |||
317 | if (!dev_priv) { | ||
318 | DRM_ERROR("called with no initialization\n"); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | |||
322 | heap = get_heap(dev_priv, memfree->region); | ||
323 | if (!heap || !*heap) | ||
324 | return -EFAULT; | ||
325 | |||
326 | block = find_block(*heap, memfree->region_offset); | ||
327 | if (!block) | ||
328 | return -EFAULT; | ||
329 | |||
330 | if (block->file_priv != file_priv) | ||
331 | return -EPERM; | ||
332 | |||
333 | mark_block(dev, block, 0); | ||
334 | free_block(block); | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | int i915_mem_init_heap(struct drm_device *dev, void *data, | ||
339 | struct drm_file *file_priv) | ||
340 | { | ||
341 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
342 | drm_i915_mem_init_heap_t *initheap = data; | ||
343 | struct mem_block **heap; | ||
344 | |||
345 | if (!dev_priv) { | ||
346 | DRM_ERROR("called with no initialization\n"); | ||
347 | return -EINVAL; | ||
348 | } | ||
349 | |||
350 | heap = get_heap(dev_priv, initheap->region); | ||
351 | if (!heap) | ||
352 | return -EFAULT; | ||
353 | |||
354 | if (*heap) { | ||
355 | DRM_ERROR("heap already initialized?"); | ||
356 | return -EFAULT; | ||
357 | } | ||
358 | |||
359 | return init_heap(heap, initheap->start, initheap->size); | ||
360 | } | ||
361 | |||
362 | int i915_mem_destroy_heap( struct drm_device *dev, void *data, | ||
363 | struct drm_file *file_priv ) | ||
364 | { | ||
365 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
366 | drm_i915_mem_destroy_heap_t *destroyheap = data; | ||
367 | struct mem_block **heap; | ||
368 | |||
369 | if ( !dev_priv ) { | ||
370 | DRM_ERROR( "called with no initialization\n" ); | ||
371 | return -EINVAL; | ||
372 | } | ||
373 | |||
374 | heap = get_heap( dev_priv, destroyheap->region ); | ||
375 | if (!heap) { | ||
376 | DRM_ERROR("get_heap failed"); | ||
377 | return -EFAULT; | ||
378 | } | ||
379 | |||
380 | if (!*heap) { | ||
381 | DRM_ERROR("heap not initialized?"); | ||
382 | return -EFAULT; | ||
383 | } | ||
384 | |||
385 | i915_mem_takedown( heap ); | ||
386 | return 0; | ||
387 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c new file mode 100644 index 00000000000..0e3241c39b8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * Copyright 2005-2006 Stephane Marchesin | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_drm.h" | ||
29 | #include "nouveau_dma.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | |||
32 | static int | ||
33 | nouveau_channel_pushbuf_init(struct nouveau_channel *chan) | ||
34 | { | ||
35 | u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT; | ||
36 | struct drm_device *dev = chan->dev; | ||
37 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
38 | int ret; | ||
39 | |||
40 | /* allocate buffer object */ | ||
41 | ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); | ||
42 | if (ret) | ||
43 | goto out; | ||
44 | |||
45 | ret = nouveau_bo_pin(chan->pushbuf_bo, mem); | ||
46 | if (ret) | ||
47 | goto out; | ||
48 | |||
49 | ret = nouveau_bo_map(chan->pushbuf_bo); | ||
50 | if (ret) | ||
51 | goto out; | ||
52 | |||
53 | /* create DMA object covering the entire memtype where the push | ||
54 | * buffer resides, userspace can submit its own push buffers from | ||
55 | * anywhere within the same memtype. | ||
56 | */ | ||
57 | chan->pushbuf_base = chan->pushbuf_bo->bo.offset; | ||
58 | if (dev_priv->card_type >= NV_50) { | ||
59 | ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm, | ||
60 | &chan->pushbuf_vma); | ||
61 | if (ret) | ||
62 | goto out; | ||
63 | |||
64 | if (dev_priv->card_type < NV_C0) { | ||
65 | ret = nouveau_gpuobj_dma_new(chan, | ||
66 | NV_CLASS_DMA_IN_MEMORY, 0, | ||
67 | (1ULL << 40), | ||
68 | NV_MEM_ACCESS_RO, | ||
69 | NV_MEM_TARGET_VM, | ||
70 | &chan->pushbuf); | ||
71 | } | ||
72 | chan->pushbuf_base = chan->pushbuf_vma.offset; | ||
73 | } else | ||
74 | if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { | ||
75 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | ||
76 | dev_priv->gart_info.aper_size, | ||
77 | NV_MEM_ACCESS_RO, | ||
78 | NV_MEM_TARGET_GART, | ||
79 | &chan->pushbuf); | ||
80 | } else | ||
81 | if (dev_priv->card_type != NV_04) { | ||
82 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | ||
83 | dev_priv->fb_available_size, | ||
84 | NV_MEM_ACCESS_RO, | ||
85 | NV_MEM_TARGET_VRAM, | ||
86 | &chan->pushbuf); | ||
87 | } else { | ||
88 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | ||
89 | * exact reason for existing :) PCI access to cmdbuf in | ||
90 | * VRAM. | ||
91 | */ | ||
92 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
93 | pci_resource_start(dev->pdev, 1), | ||
94 | dev_priv->fb_available_size, | ||
95 | NV_MEM_ACCESS_RO, | ||
96 | NV_MEM_TARGET_PCI, | ||
97 | &chan->pushbuf); | ||
98 | } | ||
99 | |||
100 | out: | ||
101 | if (ret) { | ||
102 | NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); | ||
103 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); | ||
104 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | ||
105 | if (chan->pushbuf_bo) { | ||
106 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
107 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /* allocates and initializes a fifo for user space consumption */ | ||
115 | int | ||
116 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | ||
117 | struct drm_file *file_priv, | ||
118 | uint32_t vram_handle, uint32_t gart_handle) | ||
119 | { | ||
120 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
121 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
122 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
123 | struct nouveau_channel *chan; | ||
124 | unsigned long flags; | ||
125 | int ret; | ||
126 | |||
127 | /* allocate and lock channel structure */ | ||
128 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | ||
129 | if (!chan) | ||
130 | return -ENOMEM; | ||
131 | chan->dev = dev; | ||
132 | chan->file_priv = file_priv; | ||
133 | chan->vram_handle = vram_handle; | ||
134 | chan->gart_handle = gart_handle; | ||
135 | |||
136 | kref_init(&chan->ref); | ||
137 | atomic_set(&chan->users, 1); | ||
138 | mutex_init(&chan->mutex); | ||
139 | mutex_lock(&chan->mutex); | ||
140 | |||
141 | /* allocate hw channel id */ | ||
142 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
143 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | ||
144 | if (!dev_priv->channels.ptr[chan->id]) { | ||
145 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); | ||
146 | break; | ||
147 | } | ||
148 | } | ||
149 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
150 | |||
151 | if (chan->id == pfifo->channels) { | ||
152 | mutex_unlock(&chan->mutex); | ||
153 | kfree(chan); | ||
154 | return -ENODEV; | ||
155 | } | ||
156 | |||
157 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); | ||
158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | ||
159 | INIT_LIST_HEAD(&chan->nvsw.flip); | ||
160 | INIT_LIST_HEAD(&chan->fence.pending); | ||
161 | spin_lock_init(&chan->fence.lock); | ||
162 | |||
163 | /* setup channel's memory and vm */ | ||
164 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | ||
165 | if (ret) { | ||
166 | NV_ERROR(dev, "gpuobj %d\n", ret); | ||
167 | nouveau_channel_put(&chan); | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | /* Allocate space for per-channel fixed notifier memory */ | ||
172 | ret = nouveau_notifier_init_channel(chan); | ||
173 | if (ret) { | ||
174 | NV_ERROR(dev, "ntfy %d\n", ret); | ||
175 | nouveau_channel_put(&chan); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* Allocate DMA push buffer */ | ||
180 | ret = nouveau_channel_pushbuf_init(chan); | ||
181 | if (ret) { | ||
182 | NV_ERROR(dev, "pushbuf %d\n", ret); | ||
183 | nouveau_channel_put(&chan); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | nouveau_dma_pre_init(chan); | ||
188 | chan->user_put = 0x40; | ||
189 | chan->user_get = 0x44; | ||
190 | |||
191 | /* disable the fifo caches */ | ||
192 | pfifo->reassign(dev, false); | ||
193 | |||
194 | /* Construct initial RAMFC for new channel */ | ||
195 | ret = pfifo->create_context(chan); | ||
196 | if (ret) { | ||
197 | nouveau_channel_put(&chan); | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | pfifo->reassign(dev, true); | ||
202 | |||
203 | ret = nouveau_dma_init(chan); | ||
204 | if (!ret) | ||
205 | ret = nouveau_fence_channel_init(chan); | ||
206 | if (ret) { | ||
207 | nouveau_channel_put(&chan); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | nouveau_debugfs_channel_init(chan); | ||
212 | |||
213 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); | ||
214 | if (fpriv) { | ||
215 | spin_lock(&fpriv->lock); | ||
216 | list_add(&chan->list, &fpriv->channels); | ||
217 | spin_unlock(&fpriv->lock); | ||
218 | } | ||
219 | *chan_ret = chan; | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | struct nouveau_channel * | ||
224 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | ||
225 | { | ||
226 | struct nouveau_channel *chan = NULL; | ||
227 | |||
228 | if (likely(ref && atomic_inc_not_zero(&ref->users))) | ||
229 | nouveau_channel_ref(ref, &chan); | ||
230 | |||
231 | return chan; | ||
232 | } | ||
233 | |||
234 | struct nouveau_channel * | ||
235 | nouveau_channel_get(struct drm_file *file_priv, int id) | ||
236 | { | ||
237 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
238 | struct nouveau_channel *chan; | ||
239 | |||
240 | spin_lock(&fpriv->lock); | ||
241 | list_for_each_entry(chan, &fpriv->channels, list) { | ||
242 | if (chan->id == id) { | ||
243 | chan = nouveau_channel_get_unlocked(chan); | ||
244 | spin_unlock(&fpriv->lock); | ||
245 | mutex_lock(&chan->mutex); | ||
246 | return chan; | ||
247 | } | ||
248 | } | ||
249 | spin_unlock(&fpriv->lock); | ||
250 | |||
251 | return ERR_PTR(-EINVAL); | ||
252 | } | ||
253 | |||
254 | void | ||
255 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | ||
256 | { | ||
257 | struct nouveau_channel *chan = *pchan; | ||
258 | struct drm_device *dev = chan->dev; | ||
259 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
260 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
261 | unsigned long flags; | ||
262 | int i; | ||
263 | |||
264 | /* decrement the refcount, and we're done if there's still refs */ | ||
265 | if (likely(!atomic_dec_and_test(&chan->users))) { | ||
266 | nouveau_channel_ref(NULL, pchan); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | /* no one wants the channel anymore */ | ||
271 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | ||
272 | nouveau_debugfs_channel_fini(chan); | ||
273 | |||
274 | /* give it chance to idle */ | ||
275 | nouveau_channel_idle(chan); | ||
276 | |||
277 | /* ensure all outstanding fences are signaled. they should be if the | ||
278 | * above attempts at idling were OK, but if we failed this'll tell TTM | ||
279 | * we're done with the buffers. | ||
280 | */ | ||
281 | nouveau_fence_channel_fini(chan); | ||
282 | |||
283 | /* boot it off the hardware */ | ||
284 | pfifo->reassign(dev, false); | ||
285 | |||
286 | /* destroy the engine specific contexts */ | ||
287 | pfifo->destroy_context(chan); | ||
288 | for (i = 0; i < NVOBJ_ENGINE_NR; i++) { | ||
289 | if (chan->engctx[i]) | ||
290 | dev_priv->eng[i]->context_del(chan, i); | ||
291 | } | ||
292 | |||
293 | pfifo->reassign(dev, true); | ||
294 | |||
295 | /* aside from its resources, the channel should now be dead, | ||
296 | * remove it from the channel list | ||
297 | */ | ||
298 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
299 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); | ||
300 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
301 | |||
302 | /* destroy any resources the channel owned */ | ||
303 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | ||
304 | if (chan->pushbuf_bo) { | ||
305 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); | ||
306 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
307 | nouveau_bo_unpin(chan->pushbuf_bo); | ||
308 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
309 | } | ||
310 | nouveau_ramht_ref(NULL, &chan->ramht, chan); | ||
311 | nouveau_notifier_takedown_channel(chan); | ||
312 | nouveau_gpuobj_channel_takedown(chan); | ||
313 | |||
314 | nouveau_channel_ref(NULL, pchan); | ||
315 | } | ||
316 | |||
317 | void | ||
318 | nouveau_channel_put(struct nouveau_channel **pchan) | ||
319 | { | ||
320 | mutex_unlock(&(*pchan)->mutex); | ||
321 | nouveau_channel_put_unlocked(pchan); | ||
322 | } | ||
323 | |||
324 | static void | ||
325 | nouveau_channel_del(struct kref *ref) | ||
326 | { | ||
327 | struct nouveau_channel *chan = | ||
328 | container_of(ref, struct nouveau_channel, ref); | ||
329 | |||
330 | kfree(chan); | ||
331 | } | ||
332 | |||
333 | void | ||
334 | nouveau_channel_ref(struct nouveau_channel *chan, | ||
335 | struct nouveau_channel **pchan) | ||
336 | { | ||
337 | if (chan) | ||
338 | kref_get(&chan->ref); | ||
339 | |||
340 | if (*pchan) | ||
341 | kref_put(&(*pchan)->ref, nouveau_channel_del); | ||
342 | |||
343 | *pchan = chan; | ||
344 | } | ||
345 | |||
346 | void | ||
347 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
348 | { | ||
349 | struct drm_device *dev = chan->dev; | ||
350 | struct nouveau_fence *fence = NULL; | ||
351 | int ret; | ||
352 | |||
353 | nouveau_fence_update(chan); | ||
354 | |||
355 | if (chan->fence.sequence != chan->fence.sequence_ack) { | ||
356 | ret = nouveau_fence_new(chan, &fence, true); | ||
357 | if (!ret) { | ||
358 | ret = nouveau_fence_wait(fence, false, false); | ||
359 | nouveau_fence_unref(&fence); | ||
360 | } | ||
361 | |||
362 | if (ret) | ||
363 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /* cleans up all the fifos from file_priv */ | ||
368 | void | ||
369 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | ||
370 | { | ||
371 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
372 | struct nouveau_engine *engine = &dev_priv->engine; | ||
373 | struct nouveau_channel *chan; | ||
374 | int i; | ||
375 | |||
376 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | ||
377 | for (i = 0; i < engine->fifo.channels; i++) { | ||
378 | chan = nouveau_channel_get(file_priv, i); | ||
379 | if (IS_ERR(chan)) | ||
380 | continue; | ||
381 | |||
382 | list_del(&chan->list); | ||
383 | atomic_dec(&chan->users); | ||
384 | nouveau_channel_put(&chan); | ||
385 | } | ||
386 | } | ||
387 | |||
388 | |||
389 | /*********************************** | ||
390 | * ioctls wrapping the functions | ||
391 | ***********************************/ | ||
392 | |||
393 | static int | ||
394 | nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | ||
395 | struct drm_file *file_priv) | ||
396 | { | ||
397 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
398 | struct drm_nouveau_channel_alloc *init = data; | ||
399 | struct nouveau_channel *chan; | ||
400 | int ret; | ||
401 | |||
402 | if (!dev_priv->eng[NVOBJ_ENGINE_GR]) | ||
403 | return -ENODEV; | ||
404 | |||
405 | if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) | ||
406 | return -EINVAL; | ||
407 | |||
408 | ret = nouveau_channel_alloc(dev, &chan, file_priv, | ||
409 | init->fb_ctxdma_handle, | ||
410 | init->tt_ctxdma_handle); | ||
411 | if (ret) | ||
412 | return ret; | ||
413 | init->channel = chan->id; | ||
414 | |||
415 | if (chan->dma.ib_max) | ||
416 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | ||
417 | NOUVEAU_GEM_DOMAIN_GART; | ||
418 | else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | ||
419 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | ||
420 | else | ||
421 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | ||
422 | |||
423 | if (dev_priv->card_type < NV_C0) { | ||
424 | init->subchan[0].handle = NvM2MF; | ||
425 | if (dev_priv->card_type < NV_50) | ||
426 | init->subchan[0].grclass = 0x0039; | ||
427 | else | ||
428 | init->subchan[0].grclass = 0x5039; | ||
429 | init->subchan[1].handle = NvSw; | ||
430 | init->subchan[1].grclass = NV_SW; | ||
431 | init->nr_subchan = 2; | ||
432 | } else { | ||
433 | init->subchan[0].handle = 0x9039; | ||
434 | init->subchan[0].grclass = 0x9039; | ||
435 | init->nr_subchan = 1; | ||
436 | } | ||
437 | |||
438 | /* Named memory object area */ | ||
439 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | ||
440 | &init->notifier_handle); | ||
441 | |||
442 | if (ret == 0) | ||
443 | atomic_inc(&chan->users); /* userspace reference */ | ||
444 | nouveau_channel_put(&chan); | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | static int | ||
449 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | ||
450 | struct drm_file *file_priv) | ||
451 | { | ||
452 | struct drm_nouveau_channel_free *req = data; | ||
453 | struct nouveau_channel *chan; | ||
454 | |||
455 | chan = nouveau_channel_get(file_priv, req->channel); | ||
456 | if (IS_ERR(chan)) | ||
457 | return PTR_ERR(chan); | ||
458 | |||
459 | list_del(&chan->list); | ||
460 | atomic_dec(&chan->users); | ||
461 | nouveau_channel_put(&chan); | ||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | /*********************************** | ||
466 | * finally, the ioctl table | ||
467 | ***********************************/ | ||
468 | |||
469 | struct drm_ioctl_desc nouveau_ioctls[] = { | ||
470 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), | ||
471 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||
472 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), | ||
473 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), | ||
474 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), | ||
475 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), | ||
476 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), | ||
477 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), | ||
478 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), | ||
479 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), | ||
480 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), | ||
481 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), | ||
482 | }; | ||
483 | |||
484 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c new file mode 100644 index 00000000000..8e1592368cc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Red Hat <bskeggs@redhat.com> | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining | ||
5 | * a copy of this software and associated documentation files (the | ||
6 | * "Software"), to deal in the Software without restriction, including | ||
7 | * without limitation the rights to use, copy, modify, merge, publish, | ||
8 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
9 | * permit persons to whom the Software is furnished to do so, subject to | ||
10 | * the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the | ||
13 | * next paragraph) shall be included in all copies or substantial | ||
14 | * portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
19 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
20 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
21 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
22 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * Authors: | ||
28 | * Ben Skeggs <bskeggs@redhat.com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/debugfs.h> | ||
32 | |||
33 | #include "drmP.h" | ||
34 | #include "nouveau_drv.h" | ||
35 | |||
36 | #include <ttm/ttm_page_alloc.h> | ||
37 | |||
38 | static int | ||
39 | nouveau_debugfs_channel_info(struct seq_file *m, void *data) | ||
40 | { | ||
41 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
42 | struct nouveau_channel *chan = node->info_ent->data; | ||
43 | |||
44 | seq_printf(m, "channel id : %d\n", chan->id); | ||
45 | |||
46 | seq_printf(m, "cpu fifo state:\n"); | ||
47 | seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base); | ||
48 | seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2); | ||
49 | seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); | ||
50 | seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); | ||
51 | seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); | ||
52 | if (chan->dma.ib_max) { | ||
53 | seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max); | ||
54 | seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put); | ||
55 | seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free); | ||
56 | } | ||
57 | |||
58 | seq_printf(m, "gpu fifo state:\n"); | ||
59 | seq_printf(m, " get: 0x%08x\n", | ||
60 | nvchan_rd32(chan, chan->user_get)); | ||
61 | seq_printf(m, " put: 0x%08x\n", | ||
62 | nvchan_rd32(chan, chan->user_put)); | ||
63 | if (chan->dma.ib_max) { | ||
64 | seq_printf(m, " ib get: 0x%08x\n", | ||
65 | nvchan_rd32(chan, 0x88)); | ||
66 | seq_printf(m, " ib put: 0x%08x\n", | ||
67 | nvchan_rd32(chan, 0x8c)); | ||
68 | } | ||
69 | |||
70 | seq_printf(m, "last fence : %d\n", chan->fence.sequence); | ||
71 | seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | int | ||
76 | nouveau_debugfs_channel_init(struct nouveau_channel *chan) | ||
77 | { | ||
78 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
79 | struct drm_minor *minor = chan->dev->primary; | ||
80 | int ret; | ||
81 | |||
82 | if (!dev_priv->debugfs.channel_root) { | ||
83 | dev_priv->debugfs.channel_root = | ||
84 | debugfs_create_dir("channel", minor->debugfs_root); | ||
85 | if (!dev_priv->debugfs.channel_root) | ||
86 | return -ENOENT; | ||
87 | } | ||
88 | |||
89 | snprintf(chan->debugfs.name, 32, "%d", chan->id); | ||
90 | chan->debugfs.info.name = chan->debugfs.name; | ||
91 | chan->debugfs.info.show = nouveau_debugfs_channel_info; | ||
92 | chan->debugfs.info.driver_features = 0; | ||
93 | chan->debugfs.info.data = chan; | ||
94 | |||
95 | ret = drm_debugfs_create_files(&chan->debugfs.info, 1, | ||
96 | dev_priv->debugfs.channel_root, | ||
97 | chan->dev->primary); | ||
98 | if (ret == 0) | ||
99 | chan->debugfs.active = true; | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | void | ||
104 | nouveau_debugfs_channel_fini(struct nouveau_channel *chan) | ||
105 | { | ||
106 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
107 | |||
108 | if (!chan->debugfs.active) | ||
109 | return; | ||
110 | |||
111 | drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary); | ||
112 | chan->debugfs.active = false; | ||
113 | |||
114 | if (chan == dev_priv->channel) { | ||
115 | debugfs_remove(dev_priv->debugfs.channel_root); | ||
116 | dev_priv->debugfs.channel_root = NULL; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static int | ||
121 | nouveau_debugfs_chipset_info(struct seq_file *m, void *data) | ||
122 | { | ||
123 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
124 | struct drm_minor *minor = node->minor; | ||
125 | struct drm_device *dev = minor->dev; | ||
126 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
127 | uint32_t ppci_0; | ||
128 | |||
129 | ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800); | ||
130 | |||
131 | seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0)); | ||
132 | seq_printf(m, "PCI ID : 0x%04x:0x%04x\n", | ||
133 | ppci_0 & 0xffff, ppci_0 >> 16); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | nouveau_debugfs_memory_info(struct seq_file *m, void *data) | ||
139 | { | ||
140 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
141 | struct drm_minor *minor = node->minor; | ||
142 | struct drm_nouveau_private *dev_priv = minor->dev->dev_private; | ||
143 | |||
144 | seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | nouveau_debugfs_vbios_image(struct seq_file *m, void *data) | ||
150 | { | ||
151 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
152 | struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; | ||
153 | int i; | ||
154 | |||
155 | for (i = 0; i < dev_priv->vbios.length; i++) | ||
156 | seq_printf(m, "%c", dev_priv->vbios.data[i]); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static int | ||
161 | nouveau_debugfs_evict_vram(struct seq_file *m, void *data) | ||
162 | { | ||
163 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
164 | struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; | ||
165 | int ret; | ||
166 | |||
167 | ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
168 | if (ret) | ||
169 | seq_printf(m, "failed: %d", ret); | ||
170 | else | ||
171 | seq_printf(m, "succeeded\n"); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static struct drm_info_list nouveau_debugfs_list[] = { | ||
176 | { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL }, | ||
177 | { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, | ||
178 | { "memory", nouveau_debugfs_memory_info, 0, NULL }, | ||
179 | { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, | ||
180 | { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, | ||
181 | }; | ||
182 | #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) | ||
183 | |||
184 | int | ||
185 | nouveau_debugfs_init(struct drm_minor *minor) | ||
186 | { | ||
187 | drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, | ||
188 | minor->debugfs_root, minor); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | void | ||
193 | nouveau_debugfs_takedown(struct drm_minor *minor) | ||
194 | { | ||
195 | drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, | ||
196 | minor); | ||
197 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c new file mode 100644 index 00000000000..b30ddd8d2e2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -0,0 +1,490 @@ | |||
1 | /* | ||
2 | * Copyright 2005 Stephane Marchesin. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <linux/console.h> | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "drm_crtc_helper.h" | ||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_hw.h" | ||
32 | #include "nouveau_fb.h" | ||
33 | #include "nouveau_fbcon.h" | ||
34 | #include "nouveau_pm.h" | ||
35 | #include "nv50_display.h" | ||
36 | |||
37 | #include "drm_pciids.h" | ||
38 | |||
39 | MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)"); | ||
40 | int nouveau_agpmode = -1; | ||
41 | module_param_named(agpmode, nouveau_agpmode, int, 0400); | ||
42 | |||
43 | MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); | ||
44 | static int nouveau_modeset = -1; /* kms */ | ||
45 | module_param_named(modeset, nouveau_modeset, int, 0400); | ||
46 | |||
47 | MODULE_PARM_DESC(vbios, "Override default VBIOS location"); | ||
48 | char *nouveau_vbios; | ||
49 | module_param_named(vbios, nouveau_vbios, charp, 0400); | ||
50 | |||
51 | MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM"); | ||
52 | int nouveau_vram_pushbuf; | ||
53 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | ||
54 | |||
55 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); | ||
56 | int nouveau_vram_notify = 0; | ||
57 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); | ||
58 | |||
59 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); | ||
60 | int nouveau_duallink = 1; | ||
61 | module_param_named(duallink, nouveau_duallink, int, 0400); | ||
62 | |||
63 | MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)"); | ||
64 | int nouveau_uscript_lvds = -1; | ||
65 | module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400); | ||
66 | |||
67 | MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)"); | ||
68 | int nouveau_uscript_tmds = -1; | ||
69 | module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); | ||
70 | |||
71 | MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); | ||
72 | int nouveau_ignorelid = 0; | ||
73 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | ||
74 | |||
75 | MODULE_PARM_DESC(noaccel, "Disable all acceleration"); | ||
76 | int nouveau_noaccel = -1; | ||
77 | module_param_named(noaccel, nouveau_noaccel, int, 0400); | ||
78 | |||
79 | MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); | ||
80 | int nouveau_nofbaccel = 0; | ||
81 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | ||
82 | |||
83 | MODULE_PARM_DESC(force_post, "Force POST"); | ||
84 | int nouveau_force_post = 0; | ||
85 | module_param_named(force_post, nouveau_force_post, int, 0400); | ||
86 | |||
87 | MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); | ||
88 | int nouveau_override_conntype = 0; | ||
89 | module_param_named(override_conntype, nouveau_override_conntype, int, 0400); | ||
90 | |||
91 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); | ||
92 | int nouveau_tv_disable = 0; | ||
93 | module_param_named(tv_disable, nouveau_tv_disable, int, 0400); | ||
94 | |||
95 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | ||
96 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | ||
97 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | ||
98 | "\t\tDefault: PAL\n" | ||
99 | "\t\t*NOTE* Ignored for cards with external TV encoders."); | ||
100 | char *nouveau_tv_norm; | ||
101 | module_param_named(tv_norm, nouveau_tv_norm, charp, 0400); | ||
102 | |||
103 | MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" | ||
104 | "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n" | ||
105 | "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n" | ||
106 | "\t\t0x100 vgaattr, 0x200 EVO (G80+). "); | ||
107 | int nouveau_reg_debug; | ||
108 | module_param_named(reg_debug, nouveau_reg_debug, int, 0600); | ||
109 | |||
110 | MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n"); | ||
111 | char *nouveau_perflvl; | ||
112 | module_param_named(perflvl, nouveau_perflvl, charp, 0400); | ||
113 | |||
114 | MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); | ||
115 | int nouveau_perflvl_wr; | ||
116 | module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); | ||
117 | |||
118 | MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); | ||
119 | int nouveau_msi; | ||
120 | module_param_named(msi, nouveau_msi, int, 0400); | ||
121 | |||
122 | MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n"); | ||
123 | int nouveau_ctxfw; | ||
124 | module_param_named(ctxfw, nouveau_ctxfw, int, 0400); | ||
125 | |||
126 | int nouveau_fbpercrtc; | ||
127 | #if 0 | ||
128 | module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); | ||
129 | #endif | ||
130 | |||
131 | static struct pci_device_id pciidlist[] = { | ||
132 | { | ||
133 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), | ||
134 | .class = PCI_BASE_CLASS_DISPLAY << 16, | ||
135 | .class_mask = 0xff << 16, | ||
136 | }, | ||
137 | { | ||
138 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), | ||
139 | .class = PCI_BASE_CLASS_DISPLAY << 16, | ||
140 | .class_mask = 0xff << 16, | ||
141 | }, | ||
142 | {} | ||
143 | }; | ||
144 | |||
145 | MODULE_DEVICE_TABLE(pci, pciidlist); | ||
146 | |||
147 | static struct drm_driver driver; | ||
148 | |||
149 | static int __devinit | ||
150 | nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
151 | { | ||
152 | return drm_get_pci_dev(pdev, ent, &driver); | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | nouveau_pci_remove(struct pci_dev *pdev) | ||
157 | { | ||
158 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
159 | |||
160 | drm_put_dev(dev); | ||
161 | } | ||
162 | |||
163 | int | ||
164 | nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | ||
165 | { | ||
166 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
167 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
168 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
169 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
170 | struct nouveau_channel *chan; | ||
171 | struct drm_crtc *crtc; | ||
172 | int ret, i, e; | ||
173 | |||
174 | if (pm_state.event == PM_EVENT_PRETHAW) | ||
175 | return 0; | ||
176 | |||
177 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
178 | return 0; | ||
179 | |||
180 | NV_INFO(dev, "Disabling fbcon acceleration...\n"); | ||
181 | nouveau_fbcon_save_disable_accel(dev); | ||
182 | |||
183 | NV_INFO(dev, "Unpinning framebuffer(s)...\n"); | ||
184 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
185 | struct nouveau_framebuffer *nouveau_fb; | ||
186 | |||
187 | nouveau_fb = nouveau_framebuffer(crtc->fb); | ||
188 | if (!nouveau_fb || !nouveau_fb->nvbo) | ||
189 | continue; | ||
190 | |||
191 | nouveau_bo_unpin(nouveau_fb->nvbo); | ||
192 | } | ||
193 | |||
194 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
195 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
196 | |||
197 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | ||
198 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | ||
199 | } | ||
200 | |||
201 | NV_INFO(dev, "Evicting buffers...\n"); | ||
202 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
203 | |||
204 | NV_INFO(dev, "Idling channels...\n"); | ||
205 | for (i = 0; i < pfifo->channels; i++) { | ||
206 | chan = dev_priv->channels.ptr[i]; | ||
207 | |||
208 | if (chan && chan->pushbuf_bo) | ||
209 | nouveau_channel_idle(chan); | ||
210 | } | ||
211 | |||
212 | pfifo->reassign(dev, false); | ||
213 | pfifo->disable(dev); | ||
214 | pfifo->unload_context(dev); | ||
215 | |||
216 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | ||
217 | if (!dev_priv->eng[e]) | ||
218 | continue; | ||
219 | |||
220 | ret = dev_priv->eng[e]->fini(dev, e, true); | ||
221 | if (ret) { | ||
222 | NV_ERROR(dev, "... engine %d failed: %d\n", i, ret); | ||
223 | goto out_abort; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | ret = pinstmem->suspend(dev); | ||
228 | if (ret) { | ||
229 | NV_ERROR(dev, "... failed: %d\n", ret); | ||
230 | goto out_abort; | ||
231 | } | ||
232 | |||
233 | NV_INFO(dev, "Suspending GPU objects...\n"); | ||
234 | ret = nouveau_gpuobj_suspend(dev); | ||
235 | if (ret) { | ||
236 | NV_ERROR(dev, "... failed: %d\n", ret); | ||
237 | pinstmem->resume(dev); | ||
238 | goto out_abort; | ||
239 | } | ||
240 | |||
241 | NV_INFO(dev, "And we're gone!\n"); | ||
242 | pci_save_state(pdev); | ||
243 | if (pm_state.event == PM_EVENT_SUSPEND) { | ||
244 | pci_disable_device(pdev); | ||
245 | pci_set_power_state(pdev, PCI_D3hot); | ||
246 | } | ||
247 | |||
248 | console_lock(); | ||
249 | nouveau_fbcon_set_suspend(dev, 1); | ||
250 | console_unlock(); | ||
251 | nouveau_fbcon_restore_accel(dev); | ||
252 | return 0; | ||
253 | |||
254 | out_abort: | ||
255 | NV_INFO(dev, "Re-enabling acceleration..\n"); | ||
256 | for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) { | ||
257 | if (dev_priv->eng[e]) | ||
258 | dev_priv->eng[e]->init(dev, e); | ||
259 | } | ||
260 | pfifo->enable(dev); | ||
261 | pfifo->reassign(dev, true); | ||
262 | return ret; | ||
263 | } | ||
264 | |||
265 | int | ||
266 | nouveau_pci_resume(struct pci_dev *pdev) | ||
267 | { | ||
268 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
269 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
270 | struct nouveau_engine *engine = &dev_priv->engine; | ||
271 | struct drm_crtc *crtc; | ||
272 | int ret, i; | ||
273 | |||
274 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
275 | return 0; | ||
276 | |||
277 | nouveau_fbcon_save_disable_accel(dev); | ||
278 | |||
279 | NV_INFO(dev, "We're back, enabling device...\n"); | ||
280 | pci_set_power_state(pdev, PCI_D0); | ||
281 | pci_restore_state(pdev); | ||
282 | if (pci_enable_device(pdev)) | ||
283 | return -1; | ||
284 | pci_set_master(dev->pdev); | ||
285 | |||
286 | /* Make sure the AGP controller is in a consistent state */ | ||
287 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) | ||
288 | nouveau_mem_reset_agp(dev); | ||
289 | |||
290 | /* Make the CRTCs accessible */ | ||
291 | engine->display.early_init(dev); | ||
292 | |||
293 | NV_INFO(dev, "POSTing device...\n"); | ||
294 | ret = nouveau_run_vbios_init(dev); | ||
295 | if (ret) | ||
296 | return ret; | ||
297 | |||
298 | nouveau_pm_resume(dev); | ||
299 | |||
300 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | ||
301 | ret = nouveau_mem_init_agp(dev); | ||
302 | if (ret) { | ||
303 | NV_ERROR(dev, "error reinitialising AGP: %d\n", ret); | ||
304 | return ret; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | NV_INFO(dev, "Restoring GPU objects...\n"); | ||
309 | nouveau_gpuobj_resume(dev); | ||
310 | |||
311 | NV_INFO(dev, "Reinitialising engines...\n"); | ||
312 | engine->instmem.resume(dev); | ||
313 | engine->mc.init(dev); | ||
314 | engine->timer.init(dev); | ||
315 | engine->fb.init(dev); | ||
316 | for (i = 0; i < NVOBJ_ENGINE_NR; i++) { | ||
317 | if (dev_priv->eng[i]) | ||
318 | dev_priv->eng[i]->init(dev, i); | ||
319 | } | ||
320 | engine->fifo.init(dev); | ||
321 | |||
322 | nouveau_irq_postinstall(dev); | ||
323 | |||
324 | /* Re-write SKIPS, they'll have been lost over the suspend */ | ||
325 | if (nouveau_vram_pushbuf) { | ||
326 | struct nouveau_channel *chan; | ||
327 | int j; | ||
328 | |||
329 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
330 | chan = dev_priv->channels.ptr[i]; | ||
331 | if (!chan || !chan->pushbuf_bo) | ||
332 | continue; | ||
333 | |||
334 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) | ||
335 | nouveau_bo_wr32(chan->pushbuf_bo, i, 0); | ||
336 | } | ||
337 | } | ||
338 | |||
339 | NV_INFO(dev, "Restoring mode...\n"); | ||
340 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
341 | struct nouveau_framebuffer *nouveau_fb; | ||
342 | |||
343 | nouveau_fb = nouveau_framebuffer(crtc->fb); | ||
344 | if (!nouveau_fb || !nouveau_fb->nvbo) | ||
345 | continue; | ||
346 | |||
347 | nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); | ||
348 | } | ||
349 | |||
350 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
351 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
352 | |||
353 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | ||
354 | if (!ret) | ||
355 | ret = nouveau_bo_map(nv_crtc->cursor.nvbo); | ||
356 | if (ret) | ||
357 | NV_ERROR(dev, "Could not pin/map cursor.\n"); | ||
358 | } | ||
359 | |||
360 | engine->display.init(dev); | ||
361 | |||
362 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
363 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
364 | u32 offset = nv_crtc->cursor.nvbo->bo.offset; | ||
365 | |||
366 | nv_crtc->cursor.set_offset(nv_crtc, offset); | ||
367 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | ||
368 | nv_crtc->cursor_saved_y); | ||
369 | } | ||
370 | |||
371 | /* Force CLUT to get re-loaded during modeset */ | ||
372 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
373 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
374 | |||
375 | nv_crtc->lut.depth = 0; | ||
376 | } | ||
377 | |||
378 | console_lock(); | ||
379 | nouveau_fbcon_set_suspend(dev, 0); | ||
380 | console_unlock(); | ||
381 | |||
382 | nouveau_fbcon_zfill_all(dev); | ||
383 | |||
384 | drm_helper_resume_force_mode(dev); | ||
385 | |||
386 | nouveau_fbcon_restore_accel(dev); | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static struct drm_driver driver = { | ||
391 | .driver_features = | ||
392 | DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | | ||
393 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | | ||
394 | DRIVER_MODESET, | ||
395 | .load = nouveau_load, | ||
396 | .firstopen = nouveau_firstopen, | ||
397 | .lastclose = nouveau_lastclose, | ||
398 | .unload = nouveau_unload, | ||
399 | .open = nouveau_open, | ||
400 | .preclose = nouveau_preclose, | ||
401 | .postclose = nouveau_postclose, | ||
402 | #if defined(CONFIG_DRM_NOUVEAU_DEBUG) | ||
403 | .debugfs_init = nouveau_debugfs_init, | ||
404 | .debugfs_cleanup = nouveau_debugfs_takedown, | ||
405 | #endif | ||
406 | .irq_preinstall = nouveau_irq_preinstall, | ||
407 | .irq_postinstall = nouveau_irq_postinstall, | ||
408 | .irq_uninstall = nouveau_irq_uninstall, | ||
409 | .irq_handler = nouveau_irq_handler, | ||
410 | .get_vblank_counter = drm_vblank_count, | ||
411 | .enable_vblank = nouveau_vblank_enable, | ||
412 | .disable_vblank = nouveau_vblank_disable, | ||
413 | .reclaim_buffers = drm_core_reclaim_buffers, | ||
414 | .ioctls = nouveau_ioctls, | ||
415 | .fops = { | ||
416 | .owner = THIS_MODULE, | ||
417 | .open = drm_open, | ||
418 | .release = drm_release, | ||
419 | .unlocked_ioctl = drm_ioctl, | ||
420 | .mmap = nouveau_ttm_mmap, | ||
421 | .poll = drm_poll, | ||
422 | .fasync = drm_fasync, | ||
423 | .read = drm_read, | ||
424 | #if defined(CONFIG_COMPAT) | ||
425 | .compat_ioctl = nouveau_compat_ioctl, | ||
426 | #endif | ||
427 | .llseek = noop_llseek, | ||
428 | }, | ||
429 | |||
430 | .gem_init_object = nouveau_gem_object_new, | ||
431 | .gem_free_object = nouveau_gem_object_del, | ||
432 | .gem_open_object = nouveau_gem_object_open, | ||
433 | .gem_close_object = nouveau_gem_object_close, | ||
434 | |||
435 | .name = DRIVER_NAME, | ||
436 | .desc = DRIVER_DESC, | ||
437 | #ifdef GIT_REVISION | ||
438 | .date = GIT_REVISION, | ||
439 | #else | ||
440 | .date = DRIVER_DATE, | ||
441 | #endif | ||
442 | .major = DRIVER_MAJOR, | ||
443 | .minor = DRIVER_MINOR, | ||
444 | .patchlevel = DRIVER_PATCHLEVEL, | ||
445 | }; | ||
446 | |||
447 | static struct pci_driver nouveau_pci_driver = { | ||
448 | .name = DRIVER_NAME, | ||
449 | .id_table = pciidlist, | ||
450 | .probe = nouveau_pci_probe, | ||
451 | .remove = nouveau_pci_remove, | ||
452 | .suspend = nouveau_pci_suspend, | ||
453 | .resume = nouveau_pci_resume | ||
454 | }; | ||
455 | |||
456 | static int __init nouveau_init(void) | ||
457 | { | ||
458 | driver.num_ioctls = nouveau_max_ioctl; | ||
459 | |||
460 | if (nouveau_modeset == -1) { | ||
461 | #ifdef CONFIG_VGA_CONSOLE | ||
462 | if (vgacon_text_force()) | ||
463 | nouveau_modeset = 0; | ||
464 | else | ||
465 | #endif | ||
466 | nouveau_modeset = 1; | ||
467 | } | ||
468 | |||
469 | if (!nouveau_modeset) | ||
470 | return 0; | ||
471 | |||
472 | nouveau_register_dsm_handler(); | ||
473 | return drm_pci_init(&driver, &nouveau_pci_driver); | ||
474 | } | ||
475 | |||
476 | static void __exit nouveau_exit(void) | ||
477 | { | ||
478 | if (!nouveau_modeset) | ||
479 | return; | ||
480 | |||
481 | drm_pci_exit(&driver, &nouveau_pci_driver); | ||
482 | nouveau_unregister_dsm_handler(); | ||
483 | } | ||
484 | |||
485 | module_init(nouveau_init); | ||
486 | module_exit(nouveau_exit); | ||
487 | |||
488 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
489 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
490 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h new file mode 100644 index 00000000000..d7d51deb34b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -0,0 +1,1613 @@ | |||
1 | /* | ||
2 | * Copyright 2005 Stephane Marchesin. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_DRV_H__ | ||
26 | #define __NOUVEAU_DRV_H__ | ||
27 | |||
28 | #define DRIVER_AUTHOR "Stephane Marchesin" | ||
29 | #define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" | ||
30 | |||
31 | #define DRIVER_NAME "nouveau" | ||
32 | #define DRIVER_DESC "nVidia Riva/TNT/GeForce" | ||
33 | #define DRIVER_DATE "20090420" | ||
34 | |||
35 | #define DRIVER_MAJOR 0 | ||
36 | #define DRIVER_MINOR 0 | ||
37 | #define DRIVER_PATCHLEVEL 16 | ||
38 | |||
39 | #define NOUVEAU_FAMILY 0x0000FFFF | ||
40 | #define NOUVEAU_FLAGS 0xFFFF0000 | ||
41 | |||
42 | #include "ttm/ttm_bo_api.h" | ||
43 | #include "ttm/ttm_bo_driver.h" | ||
44 | #include "ttm/ttm_placement.h" | ||
45 | #include "ttm/ttm_memory.h" | ||
46 | #include "ttm/ttm_module.h" | ||
47 | |||
48 | struct nouveau_fpriv { | ||
49 | spinlock_t lock; | ||
50 | struct list_head channels; | ||
51 | struct nouveau_vm *vm; | ||
52 | }; | ||
53 | |||
54 | static inline struct nouveau_fpriv * | ||
55 | nouveau_fpriv(struct drm_file *file_priv) | ||
56 | { | ||
57 | return file_priv ? file_priv->driver_priv : NULL; | ||
58 | } | ||
59 | |||
60 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | ||
61 | |||
62 | #include "nouveau_drm.h" | ||
63 | #include "nouveau_reg.h" | ||
64 | #include "nouveau_bios.h" | ||
65 | #include "nouveau_util.h" | ||
66 | |||
67 | struct nouveau_grctx; | ||
68 | struct nouveau_mem; | ||
69 | #include "nouveau_vm.h" | ||
70 | |||
71 | #define MAX_NUM_DCB_ENTRIES 16 | ||
72 | |||
73 | #define NOUVEAU_MAX_CHANNEL_NR 128 | ||
74 | #define NOUVEAU_MAX_TILE_NR 15 | ||
75 | |||
76 | struct nouveau_mem { | ||
77 | struct drm_device *dev; | ||
78 | |||
79 | struct nouveau_vma bar_vma; | ||
80 | struct nouveau_vma vma[2]; | ||
81 | u8 page_shift; | ||
82 | |||
83 | struct drm_mm_node *tag; | ||
84 | struct list_head regions; | ||
85 | dma_addr_t *pages; | ||
86 | u32 memtype; | ||
87 | u64 offset; | ||
88 | u64 size; | ||
89 | }; | ||
90 | |||
91 | struct nouveau_tile_reg { | ||
92 | bool used; | ||
93 | uint32_t addr; | ||
94 | uint32_t limit; | ||
95 | uint32_t pitch; | ||
96 | uint32_t zcomp; | ||
97 | struct drm_mm_node *tag_mem; | ||
98 | struct nouveau_fence *fence; | ||
99 | }; | ||
100 | |||
101 | struct nouveau_bo { | ||
102 | struct ttm_buffer_object bo; | ||
103 | struct ttm_placement placement; | ||
104 | u32 valid_domains; | ||
105 | u32 placements[3]; | ||
106 | u32 busy_placements[3]; | ||
107 | struct ttm_bo_kmap_obj kmap; | ||
108 | struct list_head head; | ||
109 | |||
110 | /* protected by ttm_bo_reserve() */ | ||
111 | struct drm_file *reserved_by; | ||
112 | struct list_head entry; | ||
113 | int pbbo_index; | ||
114 | bool validate_mapped; | ||
115 | |||
116 | struct nouveau_channel *channel; | ||
117 | |||
118 | struct list_head vma_list; | ||
119 | unsigned page_shift; | ||
120 | |||
121 | uint32_t tile_mode; | ||
122 | uint32_t tile_flags; | ||
123 | struct nouveau_tile_reg *tile; | ||
124 | |||
125 | struct drm_gem_object *gem; | ||
126 | int pin_refcnt; | ||
127 | }; | ||
128 | |||
129 | #define nouveau_bo_tile_layout(nvbo) \ | ||
130 | ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) | ||
131 | |||
132 | static inline struct nouveau_bo * | ||
133 | nouveau_bo(struct ttm_buffer_object *bo) | ||
134 | { | ||
135 | return container_of(bo, struct nouveau_bo, bo); | ||
136 | } | ||
137 | |||
138 | static inline struct nouveau_bo * | ||
139 | nouveau_gem_object(struct drm_gem_object *gem) | ||
140 | { | ||
141 | return gem ? gem->driver_private : NULL; | ||
142 | } | ||
143 | |||
144 | /* TODO: submit equivalent to TTM generic API upstream? */ | ||
145 | static inline void __iomem * | ||
146 | nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) | ||
147 | { | ||
148 | bool is_iomem; | ||
149 | void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual( | ||
150 | &nvbo->kmap, &is_iomem); | ||
151 | WARN_ON_ONCE(ioptr && !is_iomem); | ||
152 | return ioptr; | ||
153 | } | ||
154 | |||
155 | enum nouveau_flags { | ||
156 | NV_NFORCE = 0x10000000, | ||
157 | NV_NFORCE2 = 0x20000000 | ||
158 | }; | ||
159 | |||
160 | #define NVOBJ_ENGINE_SW 0 | ||
161 | #define NVOBJ_ENGINE_GR 1 | ||
162 | #define NVOBJ_ENGINE_CRYPT 2 | ||
163 | #define NVOBJ_ENGINE_COPY0 3 | ||
164 | #define NVOBJ_ENGINE_COPY1 4 | ||
165 | #define NVOBJ_ENGINE_MPEG 5 | ||
166 | #define NVOBJ_ENGINE_DISPLAY 15 | ||
167 | #define NVOBJ_ENGINE_NR 16 | ||
168 | |||
169 | #define NVOBJ_FLAG_DONT_MAP (1 << 0) | ||
170 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | ||
171 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | ||
172 | #define NVOBJ_FLAG_VM (1 << 3) | ||
173 | #define NVOBJ_FLAG_VM_USER (1 << 4) | ||
174 | |||
175 | #define NVOBJ_CINST_GLOBAL 0xdeadbeef | ||
176 | |||
177 | struct nouveau_gpuobj { | ||
178 | struct drm_device *dev; | ||
179 | struct kref refcount; | ||
180 | struct list_head list; | ||
181 | |||
182 | void *node; | ||
183 | u32 *suspend; | ||
184 | |||
185 | uint32_t flags; | ||
186 | |||
187 | u32 size; | ||
188 | u32 pinst; /* PRAMIN BAR offset */ | ||
189 | u32 cinst; /* Channel offset */ | ||
190 | u64 vinst; /* VRAM address */ | ||
191 | u64 linst; /* VM address */ | ||
192 | |||
193 | uint32_t engine; | ||
194 | uint32_t class; | ||
195 | |||
196 | void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); | ||
197 | void *priv; | ||
198 | }; | ||
199 | |||
200 | struct nouveau_page_flip_state { | ||
201 | struct list_head head; | ||
202 | struct drm_pending_vblank_event *event; | ||
203 | int crtc, bpp, pitch, x, y; | ||
204 | uint64_t offset; | ||
205 | }; | ||
206 | |||
207 | enum nouveau_channel_mutex_class { | ||
208 | NOUVEAU_UCHANNEL_MUTEX, | ||
209 | NOUVEAU_KCHANNEL_MUTEX | ||
210 | }; | ||
211 | |||
212 | struct nouveau_channel { | ||
213 | struct drm_device *dev; | ||
214 | struct list_head list; | ||
215 | int id; | ||
216 | |||
217 | /* references to the channel data structure */ | ||
218 | struct kref ref; | ||
219 | /* users of the hardware channel resources, the hardware | ||
220 | * context will be kicked off when it reaches zero. */ | ||
221 | atomic_t users; | ||
222 | struct mutex mutex; | ||
223 | |||
224 | /* owner of this fifo */ | ||
225 | struct drm_file *file_priv; | ||
226 | /* mapping of the fifo itself */ | ||
227 | struct drm_local_map *map; | ||
228 | |||
229 | /* mapping of the regs controlling the fifo */ | ||
230 | void __iomem *user; | ||
231 | uint32_t user_get; | ||
232 | uint32_t user_put; | ||
233 | |||
234 | /* Fencing */ | ||
235 | struct { | ||
236 | /* lock protects the pending list only */ | ||
237 | spinlock_t lock; | ||
238 | struct list_head pending; | ||
239 | uint32_t sequence; | ||
240 | uint32_t sequence_ack; | ||
241 | atomic_t last_sequence_irq; | ||
242 | struct nouveau_vma vma; | ||
243 | } fence; | ||
244 | |||
245 | /* DMA push buffer */ | ||
246 | struct nouveau_gpuobj *pushbuf; | ||
247 | struct nouveau_bo *pushbuf_bo; | ||
248 | struct nouveau_vma pushbuf_vma; | ||
249 | uint32_t pushbuf_base; | ||
250 | |||
251 | /* Notifier memory */ | ||
252 | struct nouveau_bo *notifier_bo; | ||
253 | struct nouveau_vma notifier_vma; | ||
254 | struct drm_mm notifier_heap; | ||
255 | |||
256 | /* PFIFO context */ | ||
257 | struct nouveau_gpuobj *ramfc; | ||
258 | struct nouveau_gpuobj *cache; | ||
259 | void *fifo_priv; | ||
260 | |||
261 | /* Execution engine contexts */ | ||
262 | void *engctx[NVOBJ_ENGINE_NR]; | ||
263 | |||
264 | /* NV50 VM */ | ||
265 | struct nouveau_vm *vm; | ||
266 | struct nouveau_gpuobj *vm_pd; | ||
267 | |||
268 | /* Objects */ | ||
269 | struct nouveau_gpuobj *ramin; /* Private instmem */ | ||
270 | struct drm_mm ramin_heap; /* Private PRAMIN heap */ | ||
271 | struct nouveau_ramht *ramht; /* Hash table */ | ||
272 | |||
273 | /* GPU object info for stuff used in-kernel (mm_enabled) */ | ||
274 | uint32_t m2mf_ntfy; | ||
275 | uint32_t vram_handle; | ||
276 | uint32_t gart_handle; | ||
277 | bool accel_done; | ||
278 | |||
279 | /* Push buffer state (only for drm's channel on !mm_enabled) */ | ||
280 | struct { | ||
281 | int max; | ||
282 | int free; | ||
283 | int cur; | ||
284 | int put; | ||
285 | /* access via pushbuf_bo */ | ||
286 | |||
287 | int ib_base; | ||
288 | int ib_max; | ||
289 | int ib_free; | ||
290 | int ib_put; | ||
291 | } dma; | ||
292 | |||
293 | uint32_t sw_subchannel[8]; | ||
294 | |||
295 | struct nouveau_vma dispc_vma[2]; | ||
296 | struct { | ||
297 | struct nouveau_gpuobj *vblsem; | ||
298 | uint32_t vblsem_head; | ||
299 | uint32_t vblsem_offset; | ||
300 | uint32_t vblsem_rval; | ||
301 | struct list_head vbl_wait; | ||
302 | struct list_head flip; | ||
303 | } nvsw; | ||
304 | |||
305 | struct { | ||
306 | bool active; | ||
307 | char name[32]; | ||
308 | struct drm_info_list info; | ||
309 | } debugfs; | ||
310 | }; | ||
311 | |||
312 | struct nouveau_exec_engine { | ||
313 | void (*destroy)(struct drm_device *, int engine); | ||
314 | int (*init)(struct drm_device *, int engine); | ||
315 | int (*fini)(struct drm_device *, int engine, bool suspend); | ||
316 | int (*context_new)(struct nouveau_channel *, int engine); | ||
317 | void (*context_del)(struct nouveau_channel *, int engine); | ||
318 | int (*object_new)(struct nouveau_channel *, int engine, | ||
319 | u32 handle, u16 class); | ||
320 | void (*set_tile_region)(struct drm_device *dev, int i); | ||
321 | void (*tlb_flush)(struct drm_device *, int engine); | ||
322 | }; | ||
323 | |||
324 | struct nouveau_instmem_engine { | ||
325 | void *priv; | ||
326 | |||
327 | int (*init)(struct drm_device *dev); | ||
328 | void (*takedown)(struct drm_device *dev); | ||
329 | int (*suspend)(struct drm_device *dev); | ||
330 | void (*resume)(struct drm_device *dev); | ||
331 | |||
332 | int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *, | ||
333 | u32 size, u32 align); | ||
334 | void (*put)(struct nouveau_gpuobj *); | ||
335 | int (*map)(struct nouveau_gpuobj *); | ||
336 | void (*unmap)(struct nouveau_gpuobj *); | ||
337 | |||
338 | void (*flush)(struct drm_device *); | ||
339 | }; | ||
340 | |||
341 | struct nouveau_mc_engine { | ||
342 | int (*init)(struct drm_device *dev); | ||
343 | void (*takedown)(struct drm_device *dev); | ||
344 | }; | ||
345 | |||
346 | struct nouveau_timer_engine { | ||
347 | int (*init)(struct drm_device *dev); | ||
348 | void (*takedown)(struct drm_device *dev); | ||
349 | uint64_t (*read)(struct drm_device *dev); | ||
350 | }; | ||
351 | |||
352 | struct nouveau_fb_engine { | ||
353 | int num_tiles; | ||
354 | struct drm_mm tag_heap; | ||
355 | void *priv; | ||
356 | |||
357 | int (*init)(struct drm_device *dev); | ||
358 | void (*takedown)(struct drm_device *dev); | ||
359 | |||
360 | void (*init_tile_region)(struct drm_device *dev, int i, | ||
361 | uint32_t addr, uint32_t size, | ||
362 | uint32_t pitch, uint32_t flags); | ||
363 | void (*set_tile_region)(struct drm_device *dev, int i); | ||
364 | void (*free_tile_region)(struct drm_device *dev, int i); | ||
365 | }; | ||
366 | |||
367 | struct nouveau_fifo_engine { | ||
368 | void *priv; | ||
369 | int channels; | ||
370 | |||
371 | struct nouveau_gpuobj *playlist[2]; | ||
372 | int cur_playlist; | ||
373 | |||
374 | int (*init)(struct drm_device *); | ||
375 | void (*takedown)(struct drm_device *); | ||
376 | |||
377 | void (*disable)(struct drm_device *); | ||
378 | void (*enable)(struct drm_device *); | ||
379 | bool (*reassign)(struct drm_device *, bool enable); | ||
380 | bool (*cache_pull)(struct drm_device *dev, bool enable); | ||
381 | |||
382 | int (*channel_id)(struct drm_device *); | ||
383 | |||
384 | int (*create_context)(struct nouveau_channel *); | ||
385 | void (*destroy_context)(struct nouveau_channel *); | ||
386 | int (*load_context)(struct nouveau_channel *); | ||
387 | int (*unload_context)(struct drm_device *); | ||
388 | void (*tlb_flush)(struct drm_device *dev); | ||
389 | }; | ||
390 | |||
391 | struct nouveau_display_engine { | ||
392 | void *priv; | ||
393 | int (*early_init)(struct drm_device *); | ||
394 | void (*late_takedown)(struct drm_device *); | ||
395 | int (*create)(struct drm_device *); | ||
396 | int (*init)(struct drm_device *); | ||
397 | void (*destroy)(struct drm_device *); | ||
398 | }; | ||
399 | |||
400 | struct nouveau_gpio_engine { | ||
401 | void *priv; | ||
402 | |||
403 | int (*init)(struct drm_device *); | ||
404 | void (*takedown)(struct drm_device *); | ||
405 | |||
406 | int (*get)(struct drm_device *, enum dcb_gpio_tag); | ||
407 | int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); | ||
408 | |||
409 | int (*irq_register)(struct drm_device *, enum dcb_gpio_tag, | ||
410 | void (*)(void *, int), void *); | ||
411 | void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag, | ||
412 | void (*)(void *, int), void *); | ||
413 | bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); | ||
414 | }; | ||
415 | |||
416 | struct nouveau_pm_voltage_level { | ||
417 | u8 voltage; | ||
418 | u8 vid; | ||
419 | }; | ||
420 | |||
421 | struct nouveau_pm_voltage { | ||
422 | bool supported; | ||
423 | u8 vid_mask; | ||
424 | |||
425 | struct nouveau_pm_voltage_level *level; | ||
426 | int nr_level; | ||
427 | }; | ||
428 | |||
429 | struct nouveau_pm_memtiming { | ||
430 | int id; | ||
431 | u32 reg_100220; | ||
432 | u32 reg_100224; | ||
433 | u32 reg_100228; | ||
434 | u32 reg_10022c; | ||
435 | u32 reg_100230; | ||
436 | u32 reg_100234; | ||
437 | u32 reg_100238; | ||
438 | u32 reg_10023c; | ||
439 | u32 reg_100240; | ||
440 | }; | ||
441 | |||
442 | #define NOUVEAU_PM_MAX_LEVEL 8 | ||
443 | struct nouveau_pm_level { | ||
444 | struct device_attribute dev_attr; | ||
445 | char name[32]; | ||
446 | int id; | ||
447 | |||
448 | u32 core; | ||
449 | u32 memory; | ||
450 | u32 shader; | ||
451 | u32 unk05; | ||
452 | u32 unk0a; | ||
453 | |||
454 | u8 voltage; | ||
455 | u8 fanspeed; | ||
456 | |||
457 | u16 memscript; | ||
458 | struct nouveau_pm_memtiming *timing; | ||
459 | }; | ||
460 | |||
461 | struct nouveau_pm_temp_sensor_constants { | ||
462 | u16 offset_constant; | ||
463 | s16 offset_mult; | ||
464 | s16 offset_div; | ||
465 | s16 slope_mult; | ||
466 | s16 slope_div; | ||
467 | }; | ||
468 | |||
469 | struct nouveau_pm_threshold_temp { | ||
470 | s16 critical; | ||
471 | s16 down_clock; | ||
472 | s16 fan_boost; | ||
473 | }; | ||
474 | |||
475 | struct nouveau_pm_memtimings { | ||
476 | bool supported; | ||
477 | struct nouveau_pm_memtiming *timing; | ||
478 | int nr_timing; | ||
479 | }; | ||
480 | |||
481 | struct nouveau_pm_engine { | ||
482 | struct nouveau_pm_voltage voltage; | ||
483 | struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; | ||
484 | int nr_perflvl; | ||
485 | struct nouveau_pm_memtimings memtimings; | ||
486 | struct nouveau_pm_temp_sensor_constants sensor_constants; | ||
487 | struct nouveau_pm_threshold_temp threshold_temp; | ||
488 | |||
489 | struct nouveau_pm_level boot; | ||
490 | struct nouveau_pm_level *cur; | ||
491 | |||
492 | struct device *hwmon; | ||
493 | struct notifier_block acpi_nb; | ||
494 | |||
495 | int (*clock_get)(struct drm_device *, u32 id); | ||
496 | void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, | ||
497 | u32 id, int khz); | ||
498 | void (*clock_set)(struct drm_device *, void *); | ||
499 | int (*voltage_get)(struct drm_device *); | ||
500 | int (*voltage_set)(struct drm_device *, int voltage); | ||
501 | int (*fanspeed_get)(struct drm_device *); | ||
502 | int (*fanspeed_set)(struct drm_device *, int fanspeed); | ||
503 | int (*temp_get)(struct drm_device *); | ||
504 | }; | ||
505 | |||
506 | struct nouveau_vram_engine { | ||
507 | struct nouveau_mm *mm; | ||
508 | |||
509 | int (*init)(struct drm_device *); | ||
510 | void (*takedown)(struct drm_device *dev); | ||
511 | int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, | ||
512 | u32 type, struct nouveau_mem **); | ||
513 | void (*put)(struct drm_device *, struct nouveau_mem **); | ||
514 | |||
515 | bool (*flags_valid)(struct drm_device *, u32 tile_flags); | ||
516 | }; | ||
517 | |||
518 | struct nouveau_engine { | ||
519 | struct nouveau_instmem_engine instmem; | ||
520 | struct nouveau_mc_engine mc; | ||
521 | struct nouveau_timer_engine timer; | ||
522 | struct nouveau_fb_engine fb; | ||
523 | struct nouveau_fifo_engine fifo; | ||
524 | struct nouveau_display_engine display; | ||
525 | struct nouveau_gpio_engine gpio; | ||
526 | struct nouveau_pm_engine pm; | ||
527 | struct nouveau_vram_engine vram; | ||
528 | }; | ||
529 | |||
530 | struct nouveau_pll_vals { | ||
531 | union { | ||
532 | struct { | ||
533 | #ifdef __BIG_ENDIAN | ||
534 | uint8_t N1, M1, N2, M2; | ||
535 | #else | ||
536 | uint8_t M1, N1, M2, N2; | ||
537 | #endif | ||
538 | }; | ||
539 | struct { | ||
540 | uint16_t NM1, NM2; | ||
541 | } __attribute__((packed)); | ||
542 | }; | ||
543 | int log2P; | ||
544 | |||
545 | int refclk; | ||
546 | }; | ||
547 | |||
548 | enum nv04_fp_display_regs { | ||
549 | FP_DISPLAY_END, | ||
550 | FP_TOTAL, | ||
551 | FP_CRTC, | ||
552 | FP_SYNC_START, | ||
553 | FP_SYNC_END, | ||
554 | FP_VALID_START, | ||
555 | FP_VALID_END | ||
556 | }; | ||
557 | |||
558 | struct nv04_crtc_reg { | ||
559 | unsigned char MiscOutReg; | ||
560 | uint8_t CRTC[0xa0]; | ||
561 | uint8_t CR58[0x10]; | ||
562 | uint8_t Sequencer[5]; | ||
563 | uint8_t Graphics[9]; | ||
564 | uint8_t Attribute[21]; | ||
565 | unsigned char DAC[768]; | ||
566 | |||
567 | /* PCRTC regs */ | ||
568 | uint32_t fb_start; | ||
569 | uint32_t crtc_cfg; | ||
570 | uint32_t cursor_cfg; | ||
571 | uint32_t gpio_ext; | ||
572 | uint32_t crtc_830; | ||
573 | uint32_t crtc_834; | ||
574 | uint32_t crtc_850; | ||
575 | uint32_t crtc_eng_ctrl; | ||
576 | |||
577 | /* PRAMDAC regs */ | ||
578 | uint32_t nv10_cursync; | ||
579 | struct nouveau_pll_vals pllvals; | ||
580 | uint32_t ramdac_gen_ctrl; | ||
581 | uint32_t ramdac_630; | ||
582 | uint32_t ramdac_634; | ||
583 | uint32_t tv_setup; | ||
584 | uint32_t tv_vtotal; | ||
585 | uint32_t tv_vskew; | ||
586 | uint32_t tv_vsync_delay; | ||
587 | uint32_t tv_htotal; | ||
588 | uint32_t tv_hskew; | ||
589 | uint32_t tv_hsync_delay; | ||
590 | uint32_t tv_hsync_delay2; | ||
591 | uint32_t fp_horiz_regs[7]; | ||
592 | uint32_t fp_vert_regs[7]; | ||
593 | uint32_t dither; | ||
594 | uint32_t fp_control; | ||
595 | uint32_t dither_regs[6]; | ||
596 | uint32_t fp_debug_0; | ||
597 | uint32_t fp_debug_1; | ||
598 | uint32_t fp_debug_2; | ||
599 | uint32_t fp_margin_color; | ||
600 | uint32_t ramdac_8c0; | ||
601 | uint32_t ramdac_a20; | ||
602 | uint32_t ramdac_a24; | ||
603 | uint32_t ramdac_a34; | ||
604 | uint32_t ctv_regs[38]; | ||
605 | }; | ||
606 | |||
607 | struct nv04_output_reg { | ||
608 | uint32_t output; | ||
609 | int head; | ||
610 | }; | ||
611 | |||
612 | struct nv04_mode_state { | ||
613 | struct nv04_crtc_reg crtc_reg[2]; | ||
614 | uint32_t pllsel; | ||
615 | uint32_t sel_clk; | ||
616 | }; | ||
617 | |||
618 | enum nouveau_card_type { | ||
619 | NV_04 = 0x00, | ||
620 | NV_10 = 0x10, | ||
621 | NV_20 = 0x20, | ||
622 | NV_30 = 0x30, | ||
623 | NV_40 = 0x40, | ||
624 | NV_50 = 0x50, | ||
625 | NV_C0 = 0xc0, | ||
626 | }; | ||
627 | |||
628 | struct drm_nouveau_private { | ||
629 | struct drm_device *dev; | ||
630 | bool noaccel; | ||
631 | |||
632 | /* the card type, takes NV_* as values */ | ||
633 | enum nouveau_card_type card_type; | ||
634 | /* exact chipset, derived from NV_PMC_BOOT_0 */ | ||
635 | int chipset; | ||
636 | int stepping; | ||
637 | int flags; | ||
638 | |||
639 | void __iomem *mmio; | ||
640 | |||
641 | spinlock_t ramin_lock; | ||
642 | void __iomem *ramin; | ||
643 | u32 ramin_size; | ||
644 | u32 ramin_base; | ||
645 | bool ramin_available; | ||
646 | struct drm_mm ramin_heap; | ||
647 | struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR]; | ||
648 | struct list_head gpuobj_list; | ||
649 | struct list_head classes; | ||
650 | |||
651 | struct nouveau_bo *vga_ram; | ||
652 | |||
653 | /* interrupt handling */ | ||
654 | void (*irq_handler[32])(struct drm_device *); | ||
655 | bool msi_enabled; | ||
656 | |||
657 | struct list_head vbl_waiting; | ||
658 | |||
659 | struct { | ||
660 | struct drm_global_reference mem_global_ref; | ||
661 | struct ttm_bo_global_ref bo_global_ref; | ||
662 | struct ttm_bo_device bdev; | ||
663 | atomic_t validate_sequence; | ||
664 | } ttm; | ||
665 | |||
666 | struct { | ||
667 | spinlock_t lock; | ||
668 | struct drm_mm heap; | ||
669 | struct nouveau_bo *bo; | ||
670 | } fence; | ||
671 | |||
672 | struct { | ||
673 | spinlock_t lock; | ||
674 | struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; | ||
675 | } channels; | ||
676 | |||
677 | struct nouveau_engine engine; | ||
678 | struct nouveau_channel *channel; | ||
679 | |||
680 | /* For PFIFO and PGRAPH. */ | ||
681 | spinlock_t context_switch_lock; | ||
682 | |||
683 | /* VM/PRAMIN flush, legacy PRAMIN aperture */ | ||
684 | spinlock_t vm_lock; | ||
685 | |||
686 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ | ||
687 | struct nouveau_ramht *ramht; | ||
688 | struct nouveau_gpuobj *ramfc; | ||
689 | struct nouveau_gpuobj *ramro; | ||
690 | |||
691 | uint32_t ramin_rsvd_vram; | ||
692 | |||
693 | struct { | ||
694 | enum { | ||
695 | NOUVEAU_GART_NONE = 0, | ||
696 | NOUVEAU_GART_AGP, /* AGP */ | ||
697 | NOUVEAU_GART_PDMA, /* paged dma object */ | ||
698 | NOUVEAU_GART_HW /* on-chip gart/vm */ | ||
699 | } type; | ||
700 | uint64_t aper_base; | ||
701 | uint64_t aper_size; | ||
702 | uint64_t aper_free; | ||
703 | |||
704 | struct ttm_backend_func *func; | ||
705 | |||
706 | struct { | ||
707 | struct page *page; | ||
708 | dma_addr_t addr; | ||
709 | } dummy; | ||
710 | |||
711 | struct nouveau_gpuobj *sg_ctxdma; | ||
712 | } gart_info; | ||
713 | |||
714 | /* nv10-nv40 tiling regions */ | ||
715 | struct { | ||
716 | struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; | ||
717 | spinlock_t lock; | ||
718 | } tile; | ||
719 | |||
720 | /* VRAM/fb configuration */ | ||
721 | uint64_t vram_size; | ||
722 | uint64_t vram_sys_base; | ||
723 | |||
724 | uint64_t fb_phys; | ||
725 | uint64_t fb_available_size; | ||
726 | uint64_t fb_mappable_pages; | ||
727 | uint64_t fb_aper_free; | ||
728 | int fb_mtrr; | ||
729 | |||
730 | /* BAR control (NV50-) */ | ||
731 | struct nouveau_vm *bar1_vm; | ||
732 | struct nouveau_vm *bar3_vm; | ||
733 | |||
734 | /* G8x/G9x virtual address space */ | ||
735 | struct nouveau_vm *chan_vm; | ||
736 | |||
737 | struct nvbios vbios; | ||
738 | |||
739 | struct nv04_mode_state mode_reg; | ||
740 | struct nv04_mode_state saved_reg; | ||
741 | uint32_t saved_vga_font[4][16384]; | ||
742 | uint32_t crtc_owner; | ||
743 | uint32_t dac_users[4]; | ||
744 | |||
745 | struct backlight_device *backlight; | ||
746 | |||
747 | struct { | ||
748 | struct dentry *channel_root; | ||
749 | } debugfs; | ||
750 | |||
751 | struct nouveau_fbdev *nfbdev; | ||
752 | struct apertures_struct *apertures; | ||
753 | }; | ||
754 | |||
755 | static inline struct drm_nouveau_private * | ||
756 | nouveau_private(struct drm_device *dev) | ||
757 | { | ||
758 | return dev->dev_private; | ||
759 | } | ||
760 | |||
761 | static inline struct drm_nouveau_private * | ||
762 | nouveau_bdev(struct ttm_bo_device *bd) | ||
763 | { | ||
764 | return container_of(bd, struct drm_nouveau_private, ttm.bdev); | ||
765 | } | ||
766 | |||
767 | static inline int | ||
768 | nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) | ||
769 | { | ||
770 | struct nouveau_bo *prev; | ||
771 | |||
772 | if (!pnvbo) | ||
773 | return -EINVAL; | ||
774 | prev = *pnvbo; | ||
775 | |||
776 | *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; | ||
777 | if (prev) { | ||
778 | struct ttm_buffer_object *bo = &prev->bo; | ||
779 | |||
780 | ttm_bo_unref(&bo); | ||
781 | } | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | /* nouveau_drv.c */ | ||
787 | extern int nouveau_agpmode; | ||
788 | extern int nouveau_duallink; | ||
789 | extern int nouveau_uscript_lvds; | ||
790 | extern int nouveau_uscript_tmds; | ||
791 | extern int nouveau_vram_pushbuf; | ||
792 | extern int nouveau_vram_notify; | ||
793 | extern int nouveau_fbpercrtc; | ||
794 | extern int nouveau_tv_disable; | ||
795 | extern char *nouveau_tv_norm; | ||
796 | extern int nouveau_reg_debug; | ||
797 | extern char *nouveau_vbios; | ||
798 | extern int nouveau_ignorelid; | ||
799 | extern int nouveau_nofbaccel; | ||
800 | extern int nouveau_noaccel; | ||
801 | extern int nouveau_force_post; | ||
802 | extern int nouveau_override_conntype; | ||
803 | extern char *nouveau_perflvl; | ||
804 | extern int nouveau_perflvl_wr; | ||
805 | extern int nouveau_msi; | ||
806 | extern int nouveau_ctxfw; | ||
807 | |||
808 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); | ||
809 | extern int nouveau_pci_resume(struct pci_dev *pdev); | ||
810 | |||
811 | /* nouveau_state.c */ | ||
812 | extern int nouveau_open(struct drm_device *, struct drm_file *); | ||
813 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | ||
814 | extern void nouveau_postclose(struct drm_device *, struct drm_file *); | ||
815 | extern int nouveau_load(struct drm_device *, unsigned long flags); | ||
816 | extern int nouveau_firstopen(struct drm_device *); | ||
817 | extern void nouveau_lastclose(struct drm_device *); | ||
818 | extern int nouveau_unload(struct drm_device *); | ||
819 | extern int nouveau_ioctl_getparam(struct drm_device *, void *data, | ||
820 | struct drm_file *); | ||
821 | extern int nouveau_ioctl_setparam(struct drm_device *, void *data, | ||
822 | struct drm_file *); | ||
823 | extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, | ||
824 | uint32_t reg, uint32_t mask, uint32_t val); | ||
825 | extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, | ||
826 | uint32_t reg, uint32_t mask, uint32_t val); | ||
827 | extern bool nouveau_wait_for_idle(struct drm_device *); | ||
828 | extern int nouveau_card_init(struct drm_device *); | ||
829 | |||
830 | /* nouveau_mem.c */ | ||
831 | extern int nouveau_mem_vram_init(struct drm_device *); | ||
832 | extern void nouveau_mem_vram_fini(struct drm_device *); | ||
833 | extern int nouveau_mem_gart_init(struct drm_device *); | ||
834 | extern void nouveau_mem_gart_fini(struct drm_device *); | ||
835 | extern int nouveau_mem_init_agp(struct drm_device *); | ||
836 | extern int nouveau_mem_reset_agp(struct drm_device *); | ||
837 | extern void nouveau_mem_close(struct drm_device *); | ||
838 | extern int nouveau_mem_detect(struct drm_device *); | ||
839 | extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); | ||
840 | extern struct nouveau_tile_reg *nv10_mem_set_tiling( | ||
841 | struct drm_device *dev, uint32_t addr, uint32_t size, | ||
842 | uint32_t pitch, uint32_t flags); | ||
843 | extern void nv10_mem_put_tile_region(struct drm_device *dev, | ||
844 | struct nouveau_tile_reg *tile, | ||
845 | struct nouveau_fence *fence); | ||
846 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | ||
847 | extern const struct ttm_mem_type_manager_func nouveau_gart_manager; | ||
848 | |||
849 | /* nouveau_notifier.c */ | ||
850 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | ||
851 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | ||
852 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | ||
853 | int cout, uint32_t start, uint32_t end, | ||
854 | uint32_t *offset); | ||
855 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | ||
856 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | ||
857 | struct drm_file *); | ||
858 | extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, | ||
859 | struct drm_file *); | ||
860 | |||
861 | /* nouveau_channel.c */ | ||
862 | extern struct drm_ioctl_desc nouveau_ioctls[]; | ||
863 | extern int nouveau_max_ioctl; | ||
864 | extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); | ||
865 | extern int nouveau_channel_alloc(struct drm_device *dev, | ||
866 | struct nouveau_channel **chan, | ||
867 | struct drm_file *file_priv, | ||
868 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); | ||
869 | extern struct nouveau_channel * | ||
870 | nouveau_channel_get_unlocked(struct nouveau_channel *); | ||
871 | extern struct nouveau_channel * | ||
872 | nouveau_channel_get(struct drm_file *, int id); | ||
873 | extern void nouveau_channel_put_unlocked(struct nouveau_channel **); | ||
874 | extern void nouveau_channel_put(struct nouveau_channel **); | ||
875 | extern void nouveau_channel_ref(struct nouveau_channel *chan, | ||
876 | struct nouveau_channel **pchan); | ||
877 | extern void nouveau_channel_idle(struct nouveau_channel *chan); | ||
878 | |||
879 | /* nouveau_object.c */ | ||
880 | #define NVOBJ_ENGINE_ADD(d, e, p) do { \ | ||
881 | struct drm_nouveau_private *dev_priv = (d)->dev_private; \ | ||
882 | dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \ | ||
883 | } while (0) | ||
884 | |||
885 | #define NVOBJ_ENGINE_DEL(d, e) do { \ | ||
886 | struct drm_nouveau_private *dev_priv = (d)->dev_private; \ | ||
887 | dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \ | ||
888 | } while (0) | ||
889 | |||
890 | #define NVOBJ_CLASS(d, c, e) do { \ | ||
891 | int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ | ||
892 | if (ret) \ | ||
893 | return ret; \ | ||
894 | } while (0) | ||
895 | |||
896 | #define NVOBJ_MTHD(d, c, m, e) do { \ | ||
897 | int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ | ||
898 | if (ret) \ | ||
899 | return ret; \ | ||
900 | } while (0) | ||
901 | |||
902 | extern int nouveau_gpuobj_early_init(struct drm_device *); | ||
903 | extern int nouveau_gpuobj_init(struct drm_device *); | ||
904 | extern void nouveau_gpuobj_takedown(struct drm_device *); | ||
905 | extern int nouveau_gpuobj_suspend(struct drm_device *dev); | ||
906 | extern void nouveau_gpuobj_resume(struct drm_device *dev); | ||
907 | extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); | ||
908 | extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, | ||
909 | int (*exec)(struct nouveau_channel *, | ||
910 | u32 class, u32 mthd, u32 data)); | ||
911 | extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); | ||
912 | extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); | ||
913 | extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, | ||
914 | uint32_t vram_h, uint32_t tt_h); | ||
915 | extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); | ||
916 | extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, | ||
917 | uint32_t size, int align, uint32_t flags, | ||
918 | struct nouveau_gpuobj **); | ||
919 | extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *, | ||
920 | struct nouveau_gpuobj **); | ||
921 | extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, | ||
922 | u32 size, u32 flags, | ||
923 | struct nouveau_gpuobj **); | ||
924 | extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, | ||
925 | uint64_t offset, uint64_t size, int access, | ||
926 | int target, struct nouveau_gpuobj **); | ||
927 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class); | ||
928 | extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, | ||
929 | u64 size, int target, int access, u32 type, | ||
930 | u32 comp, struct nouveau_gpuobj **pobj); | ||
931 | extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, | ||
932 | int class, u64 base, u64 size, int target, | ||
933 | int access, u32 type, u32 comp); | ||
934 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, | ||
935 | struct drm_file *); | ||
936 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, | ||
937 | struct drm_file *); | ||
938 | |||
939 | /* nouveau_irq.c */ | ||
940 | extern int nouveau_irq_init(struct drm_device *); | ||
941 | extern void nouveau_irq_fini(struct drm_device *); | ||
942 | extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); | ||
943 | extern void nouveau_irq_register(struct drm_device *, int status_bit, | ||
944 | void (*)(struct drm_device *)); | ||
945 | extern void nouveau_irq_unregister(struct drm_device *, int status_bit); | ||
946 | extern void nouveau_irq_preinstall(struct drm_device *); | ||
947 | extern int nouveau_irq_postinstall(struct drm_device *); | ||
948 | extern void nouveau_irq_uninstall(struct drm_device *); | ||
949 | |||
950 | /* nouveau_sgdma.c */ | ||
951 | extern int nouveau_sgdma_init(struct drm_device *); | ||
952 | extern void nouveau_sgdma_takedown(struct drm_device *); | ||
953 | extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, | ||
954 | uint32_t offset); | ||
955 | extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); | ||
956 | |||
957 | /* nouveau_debugfs.c */ | ||
958 | #if defined(CONFIG_DRM_NOUVEAU_DEBUG) | ||
959 | extern int nouveau_debugfs_init(struct drm_minor *); | ||
960 | extern void nouveau_debugfs_takedown(struct drm_minor *); | ||
961 | extern int nouveau_debugfs_channel_init(struct nouveau_channel *); | ||
962 | extern void nouveau_debugfs_channel_fini(struct nouveau_channel *); | ||
963 | #else | ||
964 | static inline int | ||
965 | nouveau_debugfs_init(struct drm_minor *minor) | ||
966 | { | ||
967 | return 0; | ||
968 | } | ||
969 | |||
970 | static inline void nouveau_debugfs_takedown(struct drm_minor *minor) | ||
971 | { | ||
972 | } | ||
973 | |||
974 | static inline int | ||
975 | nouveau_debugfs_channel_init(struct nouveau_channel *chan) | ||
976 | { | ||
977 | return 0; | ||
978 | } | ||
979 | |||
980 | static inline void | ||
981 | nouveau_debugfs_channel_fini(struct nouveau_channel *chan) | ||
982 | { | ||
983 | } | ||
984 | #endif | ||
985 | |||
986 | /* nouveau_dma.c */ | ||
987 | extern void nouveau_dma_pre_init(struct nouveau_channel *); | ||
988 | extern int nouveau_dma_init(struct nouveau_channel *); | ||
989 | extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); | ||
990 | |||
991 | /* nouveau_acpi.c */ | ||
992 | #define ROM_BIOS_PAGE 4096 | ||
993 | #if defined(CONFIG_ACPI) | ||
994 | void nouveau_register_dsm_handler(void); | ||
995 | void nouveau_unregister_dsm_handler(void); | ||
996 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); | ||
997 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev); | ||
998 | int nouveau_acpi_edid(struct drm_device *, struct drm_connector *); | ||
999 | #else | ||
1000 | static inline void nouveau_register_dsm_handler(void) {} | ||
1001 | static inline void nouveau_unregister_dsm_handler(void) {} | ||
1002 | static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } | ||
1003 | static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } | ||
1004 | static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; } | ||
1005 | #endif | ||
1006 | |||
1007 | /* nouveau_backlight.c */ | ||
1008 | #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT | ||
1009 | extern int nouveau_backlight_init(struct drm_connector *); | ||
1010 | extern void nouveau_backlight_exit(struct drm_connector *); | ||
1011 | #else | ||
1012 | static inline int nouveau_backlight_init(struct drm_connector *dev) | ||
1013 | { | ||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | static inline void nouveau_backlight_exit(struct drm_connector *dev) { } | ||
1018 | #endif | ||
1019 | |||
1020 | /* nouveau_bios.c */ | ||
1021 | extern int nouveau_bios_init(struct drm_device *); | ||
1022 | extern void nouveau_bios_takedown(struct drm_device *dev); | ||
1023 | extern int nouveau_run_vbios_init(struct drm_device *); | ||
1024 | extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, | ||
1025 | struct dcb_entry *); | ||
1026 | extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, | ||
1027 | enum dcb_gpio_tag); | ||
1028 | extern struct dcb_connector_table_entry * | ||
1029 | nouveau_bios_connector_entry(struct drm_device *, int index); | ||
1030 | extern u32 get_pll_register(struct drm_device *, enum pll_types); | ||
1031 | extern int get_pll_limits(struct drm_device *, uint32_t limit_match, | ||
1032 | struct pll_lims *); | ||
1033 | extern int nouveau_bios_run_display_table(struct drm_device *, | ||
1034 | struct dcb_entry *, | ||
1035 | uint32_t script, int pxclk); | ||
1036 | extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *, | ||
1037 | int *length); | ||
1038 | extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); | ||
1039 | extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *); | ||
1040 | extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, | ||
1041 | bool *dl, bool *if_is_24bit); | ||
1042 | extern int run_tmds_table(struct drm_device *, struct dcb_entry *, | ||
1043 | int head, int pxclk); | ||
1044 | extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head, | ||
1045 | enum LVDS_script, int pxclk); | ||
1046 | |||
1047 | /* nouveau_ttm.c */ | ||
1048 | int nouveau_ttm_global_init(struct drm_nouveau_private *); | ||
1049 | void nouveau_ttm_global_release(struct drm_nouveau_private *); | ||
1050 | int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); | ||
1051 | |||
1052 | /* nouveau_dp.c */ | ||
1053 | int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | ||
1054 | uint8_t *data, int data_nr); | ||
1055 | bool nouveau_dp_detect(struct drm_encoder *); | ||
1056 | bool nouveau_dp_link_train(struct drm_encoder *); | ||
1057 | |||
1058 | /* nv04_fb.c */ | ||
1059 | extern int nv04_fb_init(struct drm_device *); | ||
1060 | extern void nv04_fb_takedown(struct drm_device *); | ||
1061 | |||
1062 | /* nv10_fb.c */ | ||
1063 | extern int nv10_fb_init(struct drm_device *); | ||
1064 | extern void nv10_fb_takedown(struct drm_device *); | ||
1065 | extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, | ||
1066 | uint32_t addr, uint32_t size, | ||
1067 | uint32_t pitch, uint32_t flags); | ||
1068 | extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); | ||
1069 | extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); | ||
1070 | |||
1071 | /* nv30_fb.c */ | ||
1072 | extern int nv30_fb_init(struct drm_device *); | ||
1073 | extern void nv30_fb_takedown(struct drm_device *); | ||
1074 | extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, | ||
1075 | uint32_t addr, uint32_t size, | ||
1076 | uint32_t pitch, uint32_t flags); | ||
1077 | extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); | ||
1078 | |||
1079 | /* nv40_fb.c */ | ||
1080 | extern int nv40_fb_init(struct drm_device *); | ||
1081 | extern void nv40_fb_takedown(struct drm_device *); | ||
1082 | extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); | ||
1083 | |||
1084 | /* nv50_fb.c */ | ||
1085 | extern int nv50_fb_init(struct drm_device *); | ||
1086 | extern void nv50_fb_takedown(struct drm_device *); | ||
1087 | extern void nv50_fb_vm_trap(struct drm_device *, int display); | ||
1088 | |||
1089 | /* nvc0_fb.c */ | ||
1090 | extern int nvc0_fb_init(struct drm_device *); | ||
1091 | extern void nvc0_fb_takedown(struct drm_device *); | ||
1092 | |||
1093 | /* nv04_fifo.c */ | ||
1094 | extern int nv04_fifo_init(struct drm_device *); | ||
1095 | extern void nv04_fifo_fini(struct drm_device *); | ||
1096 | extern void nv04_fifo_disable(struct drm_device *); | ||
1097 | extern void nv04_fifo_enable(struct drm_device *); | ||
1098 | extern bool nv04_fifo_reassign(struct drm_device *, bool); | ||
1099 | extern bool nv04_fifo_cache_pull(struct drm_device *, bool); | ||
1100 | extern int nv04_fifo_channel_id(struct drm_device *); | ||
1101 | extern int nv04_fifo_create_context(struct nouveau_channel *); | ||
1102 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); | ||
1103 | extern int nv04_fifo_load_context(struct nouveau_channel *); | ||
1104 | extern int nv04_fifo_unload_context(struct drm_device *); | ||
1105 | extern void nv04_fifo_isr(struct drm_device *); | ||
1106 | |||
1107 | /* nv10_fifo.c */ | ||
1108 | extern int nv10_fifo_init(struct drm_device *); | ||
1109 | extern int nv10_fifo_channel_id(struct drm_device *); | ||
1110 | extern int nv10_fifo_create_context(struct nouveau_channel *); | ||
1111 | extern int nv10_fifo_load_context(struct nouveau_channel *); | ||
1112 | extern int nv10_fifo_unload_context(struct drm_device *); | ||
1113 | |||
1114 | /* nv40_fifo.c */ | ||
1115 | extern int nv40_fifo_init(struct drm_device *); | ||
1116 | extern int nv40_fifo_create_context(struct nouveau_channel *); | ||
1117 | extern int nv40_fifo_load_context(struct nouveau_channel *); | ||
1118 | extern int nv40_fifo_unload_context(struct drm_device *); | ||
1119 | |||
1120 | /* nv50_fifo.c */ | ||
1121 | extern int nv50_fifo_init(struct drm_device *); | ||
1122 | extern void nv50_fifo_takedown(struct drm_device *); | ||
1123 | extern int nv50_fifo_channel_id(struct drm_device *); | ||
1124 | extern int nv50_fifo_create_context(struct nouveau_channel *); | ||
1125 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); | ||
1126 | extern int nv50_fifo_load_context(struct nouveau_channel *); | ||
1127 | extern int nv50_fifo_unload_context(struct drm_device *); | ||
1128 | extern void nv50_fifo_tlb_flush(struct drm_device *dev); | ||
1129 | |||
1130 | /* nvc0_fifo.c */ | ||
1131 | extern int nvc0_fifo_init(struct drm_device *); | ||
1132 | extern void nvc0_fifo_takedown(struct drm_device *); | ||
1133 | extern void nvc0_fifo_disable(struct drm_device *); | ||
1134 | extern void nvc0_fifo_enable(struct drm_device *); | ||
1135 | extern bool nvc0_fifo_reassign(struct drm_device *, bool); | ||
1136 | extern bool nvc0_fifo_cache_pull(struct drm_device *, bool); | ||
1137 | extern int nvc0_fifo_channel_id(struct drm_device *); | ||
1138 | extern int nvc0_fifo_create_context(struct nouveau_channel *); | ||
1139 | extern void nvc0_fifo_destroy_context(struct nouveau_channel *); | ||
1140 | extern int nvc0_fifo_load_context(struct nouveau_channel *); | ||
1141 | extern int nvc0_fifo_unload_context(struct drm_device *); | ||
1142 | |||
1143 | /* nv04_graph.c */ | ||
1144 | extern int nv04_graph_create(struct drm_device *); | ||
1145 | extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); | ||
1146 | extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, | ||
1147 | u32 class, u32 mthd, u32 data); | ||
1148 | extern struct nouveau_bitfield nv04_graph_nsource[]; | ||
1149 | |||
1150 | /* nv10_graph.c */ | ||
1151 | extern int nv10_graph_create(struct drm_device *); | ||
1152 | extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); | ||
1153 | extern struct nouveau_bitfield nv10_graph_intr[]; | ||
1154 | extern struct nouveau_bitfield nv10_graph_nstatus[]; | ||
1155 | |||
1156 | /* nv20_graph.c */ | ||
1157 | extern int nv20_graph_create(struct drm_device *); | ||
1158 | |||
1159 | /* nv40_graph.c */ | ||
1160 | extern int nv40_graph_create(struct drm_device *); | ||
1161 | extern void nv40_grctx_init(struct nouveau_grctx *); | ||
1162 | |||
1163 | /* nv50_graph.c */ | ||
1164 | extern int nv50_graph_create(struct drm_device *); | ||
1165 | extern int nv50_grctx_init(struct nouveau_grctx *); | ||
1166 | extern struct nouveau_enum nv50_data_error_names[]; | ||
1167 | extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst); | ||
1168 | |||
1169 | /* nvc0_graph.c */ | ||
1170 | extern int nvc0_graph_create(struct drm_device *); | ||
1171 | extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst); | ||
1172 | |||
1173 | /* nv84_crypt.c */ | ||
1174 | extern int nv84_crypt_create(struct drm_device *); | ||
1175 | |||
1176 | /* nva3_copy.c */ | ||
1177 | extern int nva3_copy_create(struct drm_device *dev); | ||
1178 | |||
1179 | /* nvc0_copy.c */ | ||
1180 | extern int nvc0_copy_create(struct drm_device *dev, int engine); | ||
1181 | |||
1182 | /* nv40_mpeg.c */ | ||
1183 | extern int nv40_mpeg_create(struct drm_device *dev); | ||
1184 | |||
1185 | /* nv50_mpeg.c */ | ||
1186 | extern int nv50_mpeg_create(struct drm_device *dev); | ||
1187 | |||
1188 | /* nv04_instmem.c */ | ||
1189 | extern int nv04_instmem_init(struct drm_device *); | ||
1190 | extern void nv04_instmem_takedown(struct drm_device *); | ||
1191 | extern int nv04_instmem_suspend(struct drm_device *); | ||
1192 | extern void nv04_instmem_resume(struct drm_device *); | ||
1193 | extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, | ||
1194 | u32 size, u32 align); | ||
1195 | extern void nv04_instmem_put(struct nouveau_gpuobj *); | ||
1196 | extern int nv04_instmem_map(struct nouveau_gpuobj *); | ||
1197 | extern void nv04_instmem_unmap(struct nouveau_gpuobj *); | ||
1198 | extern void nv04_instmem_flush(struct drm_device *); | ||
1199 | |||
1200 | /* nv50_instmem.c */ | ||
1201 | extern int nv50_instmem_init(struct drm_device *); | ||
1202 | extern void nv50_instmem_takedown(struct drm_device *); | ||
1203 | extern int nv50_instmem_suspend(struct drm_device *); | ||
1204 | extern void nv50_instmem_resume(struct drm_device *); | ||
1205 | extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, | ||
1206 | u32 size, u32 align); | ||
1207 | extern void nv50_instmem_put(struct nouveau_gpuobj *); | ||
1208 | extern int nv50_instmem_map(struct nouveau_gpuobj *); | ||
1209 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); | ||
1210 | extern void nv50_instmem_flush(struct drm_device *); | ||
1211 | extern void nv84_instmem_flush(struct drm_device *); | ||
1212 | |||
1213 | /* nvc0_instmem.c */ | ||
1214 | extern int nvc0_instmem_init(struct drm_device *); | ||
1215 | extern void nvc0_instmem_takedown(struct drm_device *); | ||
1216 | extern int nvc0_instmem_suspend(struct drm_device *); | ||
1217 | extern void nvc0_instmem_resume(struct drm_device *); | ||
1218 | |||
1219 | /* nv04_mc.c */ | ||
1220 | extern int nv04_mc_init(struct drm_device *); | ||
1221 | extern void nv04_mc_takedown(struct drm_device *); | ||
1222 | |||
1223 | /* nv40_mc.c */ | ||
1224 | extern int nv40_mc_init(struct drm_device *); | ||
1225 | extern void nv40_mc_takedown(struct drm_device *); | ||
1226 | |||
1227 | /* nv50_mc.c */ | ||
1228 | extern int nv50_mc_init(struct drm_device *); | ||
1229 | extern void nv50_mc_takedown(struct drm_device *); | ||
1230 | |||
1231 | /* nv04_timer.c */ | ||
1232 | extern int nv04_timer_init(struct drm_device *); | ||
1233 | extern uint64_t nv04_timer_read(struct drm_device *); | ||
1234 | extern void nv04_timer_takedown(struct drm_device *); | ||
1235 | |||
1236 | extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, | ||
1237 | unsigned long arg); | ||
1238 | |||
1239 | /* nv04_dac.c */ | ||
1240 | extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *); | ||
1241 | extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); | ||
1242 | extern int nv04_dac_output_offset(struct drm_encoder *encoder); | ||
1243 | extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); | ||
1244 | extern bool nv04_dac_in_use(struct drm_encoder *encoder); | ||
1245 | |||
1246 | /* nv04_dfp.c */ | ||
1247 | extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *); | ||
1248 | extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); | ||
1249 | extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, | ||
1250 | int head, bool dl); | ||
1251 | extern void nv04_dfp_disable(struct drm_device *dev, int head); | ||
1252 | extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); | ||
1253 | |||
1254 | /* nv04_tv.c */ | ||
1255 | extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); | ||
1256 | extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *); | ||
1257 | |||
1258 | /* nv17_tv.c */ | ||
1259 | extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *); | ||
1260 | |||
1261 | /* nv04_display.c */ | ||
1262 | extern int nv04_display_early_init(struct drm_device *); | ||
1263 | extern void nv04_display_late_takedown(struct drm_device *); | ||
1264 | extern int nv04_display_create(struct drm_device *); | ||
1265 | extern int nv04_display_init(struct drm_device *); | ||
1266 | extern void nv04_display_destroy(struct drm_device *); | ||
1267 | |||
1268 | /* nv04_crtc.c */ | ||
1269 | extern int nv04_crtc_create(struct drm_device *, int index); | ||
1270 | |||
1271 | /* nouveau_bo.c */ | ||
1272 | extern struct ttm_bo_driver nouveau_bo_driver; | ||
1273 | extern int nouveau_bo_new(struct drm_device *, int size, int align, | ||
1274 | uint32_t flags, uint32_t tile_mode, | ||
1275 | uint32_t tile_flags, struct nouveau_bo **); | ||
1276 | extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); | ||
1277 | extern int nouveau_bo_unpin(struct nouveau_bo *); | ||
1278 | extern int nouveau_bo_map(struct nouveau_bo *); | ||
1279 | extern void nouveau_bo_unmap(struct nouveau_bo *); | ||
1280 | extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, | ||
1281 | uint32_t busy); | ||
1282 | extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | ||
1283 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | ||
1284 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | ||
1285 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | ||
1286 | extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); | ||
1287 | extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, | ||
1288 | bool no_wait_reserve, bool no_wait_gpu); | ||
1289 | |||
1290 | extern struct nouveau_vma * | ||
1291 | nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); | ||
1292 | extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, | ||
1293 | struct nouveau_vma *); | ||
1294 | extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); | ||
1295 | |||
1296 | /* nouveau_fence.c */ | ||
1297 | struct nouveau_fence; | ||
1298 | extern int nouveau_fence_init(struct drm_device *); | ||
1299 | extern void nouveau_fence_fini(struct drm_device *); | ||
1300 | extern int nouveau_fence_channel_init(struct nouveau_channel *); | ||
1301 | extern void nouveau_fence_channel_fini(struct nouveau_channel *); | ||
1302 | extern void nouveau_fence_update(struct nouveau_channel *); | ||
1303 | extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, | ||
1304 | bool emit); | ||
1305 | extern int nouveau_fence_emit(struct nouveau_fence *); | ||
1306 | extern void nouveau_fence_work(struct nouveau_fence *fence, | ||
1307 | void (*work)(void *priv, bool signalled), | ||
1308 | void *priv); | ||
1309 | struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); | ||
1310 | |||
1311 | extern bool __nouveau_fence_signalled(void *obj, void *arg); | ||
1312 | extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); | ||
1313 | extern int __nouveau_fence_flush(void *obj, void *arg); | ||
1314 | extern void __nouveau_fence_unref(void **obj); | ||
1315 | extern void *__nouveau_fence_ref(void *obj); | ||
1316 | |||
1317 | static inline bool nouveau_fence_signalled(struct nouveau_fence *obj) | ||
1318 | { | ||
1319 | return __nouveau_fence_signalled(obj, NULL); | ||
1320 | } | ||
1321 | static inline int | ||
1322 | nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr) | ||
1323 | { | ||
1324 | return __nouveau_fence_wait(obj, NULL, lazy, intr); | ||
1325 | } | ||
1326 | extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); | ||
1327 | static inline int nouveau_fence_flush(struct nouveau_fence *obj) | ||
1328 | { | ||
1329 | return __nouveau_fence_flush(obj, NULL); | ||
1330 | } | ||
1331 | static inline void nouveau_fence_unref(struct nouveau_fence **obj) | ||
1332 | { | ||
1333 | __nouveau_fence_unref((void **)obj); | ||
1334 | } | ||
1335 | static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) | ||
1336 | { | ||
1337 | return __nouveau_fence_ref(obj); | ||
1338 | } | ||
1339 | |||
1340 | /* nouveau_gem.c */ | ||
1341 | extern int nouveau_gem_new(struct drm_device *, int size, int align, | ||
1342 | uint32_t domain, uint32_t tile_mode, | ||
1343 | uint32_t tile_flags, struct nouveau_bo **); | ||
1344 | extern int nouveau_gem_object_new(struct drm_gem_object *); | ||
1345 | extern void nouveau_gem_object_del(struct drm_gem_object *); | ||
1346 | extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); | ||
1347 | extern void nouveau_gem_object_close(struct drm_gem_object *, | ||
1348 | struct drm_file *); | ||
1349 | extern int nouveau_gem_ioctl_new(struct drm_device *, void *, | ||
1350 | struct drm_file *); | ||
1351 | extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, | ||
1352 | struct drm_file *); | ||
1353 | extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, | ||
1354 | struct drm_file *); | ||
1355 | extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, | ||
1356 | struct drm_file *); | ||
1357 | extern int nouveau_gem_ioctl_info(struct drm_device *, void *, | ||
1358 | struct drm_file *); | ||
1359 | |||
1360 | /* nouveau_display.c */ | ||
1361 | int nouveau_vblank_enable(struct drm_device *dev, int crtc); | ||
1362 | void nouveau_vblank_disable(struct drm_device *dev, int crtc); | ||
1363 | int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
1364 | struct drm_pending_vblank_event *event); | ||
1365 | int nouveau_finish_page_flip(struct nouveau_channel *, | ||
1366 | struct nouveau_page_flip_state *); | ||
1367 | |||
1368 | /* nv10_gpio.c */ | ||
1369 | int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | ||
1370 | int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | ||
1371 | |||
1372 | /* nv50_gpio.c */ | ||
1373 | int nv50_gpio_init(struct drm_device *dev); | ||
1374 | void nv50_gpio_fini(struct drm_device *dev); | ||
1375 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | ||
1376 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | ||
1377 | int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, | ||
1378 | void (*)(void *, int), void *); | ||
1379 | void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, | ||
1380 | void (*)(void *, int), void *); | ||
1381 | bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); | ||
1382 | |||
1383 | /* nv50_calc. */ | ||
1384 | int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, | ||
1385 | int *N1, int *M1, int *N2, int *M2, int *P); | ||
1386 | int nva3_calc_pll(struct drm_device *, struct pll_lims *, | ||
1387 | int clk, int *N, int *fN, int *M, int *P); | ||
1388 | |||
1389 | #ifndef ioread32_native | ||
1390 | #ifdef __BIG_ENDIAN | ||
1391 | #define ioread16_native ioread16be | ||
1392 | #define iowrite16_native iowrite16be | ||
1393 | #define ioread32_native ioread32be | ||
1394 | #define iowrite32_native iowrite32be | ||
1395 | #else /* def __BIG_ENDIAN */ | ||
1396 | #define ioread16_native ioread16 | ||
1397 | #define iowrite16_native iowrite16 | ||
1398 | #define ioread32_native ioread32 | ||
1399 | #define iowrite32_native iowrite32 | ||
1400 | #endif /* def __BIG_ENDIAN else */ | ||
1401 | #endif /* !ioread32_native */ | ||
1402 | |||
1403 | /* channel control reg access */ | ||
1404 | static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg) | ||
1405 | { | ||
1406 | return ioread32_native(chan->user + reg); | ||
1407 | } | ||
1408 | |||
1409 | static inline void nvchan_wr32(struct nouveau_channel *chan, | ||
1410 | unsigned reg, u32 val) | ||
1411 | { | ||
1412 | iowrite32_native(val, chan->user + reg); | ||
1413 | } | ||
1414 | |||
1415 | /* register access */ | ||
1416 | static inline u32 nv_rd32(struct drm_device *dev, unsigned reg) | ||
1417 | { | ||
1418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1419 | return ioread32_native(dev_priv->mmio + reg); | ||
1420 | } | ||
1421 | |||
1422 | static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val) | ||
1423 | { | ||
1424 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1425 | iowrite32_native(val, dev_priv->mmio + reg); | ||
1426 | } | ||
1427 | |||
1428 | static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) | ||
1429 | { | ||
1430 | u32 tmp = nv_rd32(dev, reg); | ||
1431 | nv_wr32(dev, reg, (tmp & ~mask) | val); | ||
1432 | return tmp; | ||
1433 | } | ||
1434 | |||
1435 | static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) | ||
1436 | { | ||
1437 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1438 | return ioread8(dev_priv->mmio + reg); | ||
1439 | } | ||
1440 | |||
1441 | static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) | ||
1442 | { | ||
1443 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1444 | iowrite8(val, dev_priv->mmio + reg); | ||
1445 | } | ||
1446 | |||
1447 | #define nv_wait(dev, reg, mask, val) \ | ||
1448 | nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) | ||
1449 | #define nv_wait_ne(dev, reg, mask, val) \ | ||
1450 | nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) | ||
1451 | |||
1452 | /* PRAMIN access */ | ||
1453 | static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) | ||
1454 | { | ||
1455 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1456 | return ioread32_native(dev_priv->ramin + offset); | ||
1457 | } | ||
1458 | |||
1459 | static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val) | ||
1460 | { | ||
1461 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1462 | iowrite32_native(val, dev_priv->ramin + offset); | ||
1463 | } | ||
1464 | |||
1465 | /* object access */ | ||
1466 | extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset); | ||
1467 | extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val); | ||
1468 | |||
1469 | /* | ||
1470 | * Logging | ||
1471 | * Argument d is (struct drm_device *). | ||
1472 | */ | ||
1473 | #define NV_PRINTK(level, d, fmt, arg...) \ | ||
1474 | printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \ | ||
1475 | pci_name(d->pdev), ##arg) | ||
1476 | #ifndef NV_DEBUG_NOTRACE | ||
1477 | #define NV_DEBUG(d, fmt, arg...) do { \ | ||
1478 | if (drm_debug & DRM_UT_DRIVER) { \ | ||
1479 | NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ | ||
1480 | __LINE__, ##arg); \ | ||
1481 | } \ | ||
1482 | } while (0) | ||
1483 | #define NV_DEBUG_KMS(d, fmt, arg...) do { \ | ||
1484 | if (drm_debug & DRM_UT_KMS) { \ | ||
1485 | NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ | ||
1486 | __LINE__, ##arg); \ | ||
1487 | } \ | ||
1488 | } while (0) | ||
1489 | #else | ||
1490 | #define NV_DEBUG(d, fmt, arg...) do { \ | ||
1491 | if (drm_debug & DRM_UT_DRIVER) \ | ||
1492 | NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ | ||
1493 | } while (0) | ||
1494 | #define NV_DEBUG_KMS(d, fmt, arg...) do { \ | ||
1495 | if (drm_debug & DRM_UT_KMS) \ | ||
1496 | NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ | ||
1497 | } while (0) | ||
1498 | #endif | ||
1499 | #define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg) | ||
1500 | #define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg) | ||
1501 | #define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg) | ||
1502 | #define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg) | ||
1503 | #define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg) | ||
1504 | |||
1505 | /* nouveau_reg_debug bitmask */ | ||
1506 | enum { | ||
1507 | NOUVEAU_REG_DEBUG_MC = 0x1, | ||
1508 | NOUVEAU_REG_DEBUG_VIDEO = 0x2, | ||
1509 | NOUVEAU_REG_DEBUG_FB = 0x4, | ||
1510 | NOUVEAU_REG_DEBUG_EXTDEV = 0x8, | ||
1511 | NOUVEAU_REG_DEBUG_CRTC = 0x10, | ||
1512 | NOUVEAU_REG_DEBUG_RAMDAC = 0x20, | ||
1513 | NOUVEAU_REG_DEBUG_VGACRTC = 0x40, | ||
1514 | NOUVEAU_REG_DEBUG_RMVIO = 0x80, | ||
1515 | NOUVEAU_REG_DEBUG_VGAATTR = 0x100, | ||
1516 | NOUVEAU_REG_DEBUG_EVO = 0x200, | ||
1517 | }; | ||
1518 | |||
1519 | #define NV_REG_DEBUG(type, dev, fmt, arg...) do { \ | ||
1520 | if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \ | ||
1521 | NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \ | ||
1522 | } while (0) | ||
1523 | |||
1524 | static inline bool | ||
1525 | nv_two_heads(struct drm_device *dev) | ||
1526 | { | ||
1527 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1528 | const int impl = dev->pci_device & 0x0ff0; | ||
1529 | |||
1530 | if (dev_priv->card_type >= NV_10 && impl != 0x0100 && | ||
1531 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) | ||
1532 | return true; | ||
1533 | |||
1534 | return false; | ||
1535 | } | ||
1536 | |||
1537 | static inline bool | ||
1538 | nv_gf4_disp_arch(struct drm_device *dev) | ||
1539 | { | ||
1540 | return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; | ||
1541 | } | ||
1542 | |||
1543 | static inline bool | ||
1544 | nv_two_reg_pll(struct drm_device *dev) | ||
1545 | { | ||
1546 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1547 | const int impl = dev->pci_device & 0x0ff0; | ||
1548 | |||
1549 | if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40) | ||
1550 | return true; | ||
1551 | return false; | ||
1552 | } | ||
1553 | |||
1554 | static inline bool | ||
1555 | nv_match_device(struct drm_device *dev, unsigned device, | ||
1556 | unsigned sub_vendor, unsigned sub_device) | ||
1557 | { | ||
1558 | return dev->pdev->device == device && | ||
1559 | dev->pdev->subsystem_vendor == sub_vendor && | ||
1560 | dev->pdev->subsystem_device == sub_device; | ||
1561 | } | ||
1562 | |||
1563 | static inline void * | ||
1564 | nv_engine(struct drm_device *dev, int engine) | ||
1565 | { | ||
1566 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1567 | return (void *)dev_priv->eng[engine]; | ||
1568 | } | ||
1569 | |||
1570 | /* returns 1 if device is one of the nv4x using the 0x4497 object class, | ||
1571 | * helpful to determine a number of other hardware features | ||
1572 | */ | ||
1573 | static inline int | ||
1574 | nv44_graph_class(struct drm_device *dev) | ||
1575 | { | ||
1576 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1577 | |||
1578 | if ((dev_priv->chipset & 0xf0) == 0x60) | ||
1579 | return 1; | ||
1580 | |||
1581 | return !(0x0baf & (1 << (dev_priv->chipset & 0x0f))); | ||
1582 | } | ||
1583 | |||
1584 | /* memory type/access flags, do not match hardware values */ | ||
1585 | #define NV_MEM_ACCESS_RO 1 | ||
1586 | #define NV_MEM_ACCESS_WO 2 | ||
1587 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) | ||
1588 | #define NV_MEM_ACCESS_SYS 4 | ||
1589 | #define NV_MEM_ACCESS_VM 8 | ||
1590 | |||
1591 | #define NV_MEM_TARGET_VRAM 0 | ||
1592 | #define NV_MEM_TARGET_PCI 1 | ||
1593 | #define NV_MEM_TARGET_PCI_NOSNOOP 2 | ||
1594 | #define NV_MEM_TARGET_VM 3 | ||
1595 | #define NV_MEM_TARGET_GART 4 | ||
1596 | |||
1597 | #define NV_MEM_TYPE_VM 0x7f | ||
1598 | #define NV_MEM_COMP_VM 0x03 | ||
1599 | |||
1600 | /* NV_SW object class */ | ||
1601 | #define NV_SW 0x0000506e | ||
1602 | #define NV_SW_DMA_SEMAPHORE 0x00000060 | ||
1603 | #define NV_SW_SEMAPHORE_OFFSET 0x00000064 | ||
1604 | #define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 | ||
1605 | #define NV_SW_SEMAPHORE_RELEASE 0x0000006c | ||
1606 | #define NV_SW_YIELD 0x00000080 | ||
1607 | #define NV_SW_DMA_VBLSEM 0x0000018c | ||
1608 | #define NV_SW_VBLSEM_OFFSET 0x00000400 | ||
1609 | #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 | ||
1610 | #define NV_SW_VBLSEM_RELEASE 0x00000408 | ||
1611 | #define NV_SW_PAGE_FLIP 0x00000500 | ||
1612 | |||
1613 | #endif /* __NOUVEAU_DRV_H__ */ | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h new file mode 100644 index 00000000000..95c843e684b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fb.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __NOUVEAU_FB_H__ | ||
28 | #define __NOUVEAU_FB_H__ | ||
29 | |||
30 | struct nouveau_framebuffer { | ||
31 | struct drm_framebuffer base; | ||
32 | struct nouveau_bo *nvbo; | ||
33 | struct nouveau_vma vma; | ||
34 | u32 r_dma; | ||
35 | u32 r_format; | ||
36 | u32 r_pitch; | ||
37 | }; | ||
38 | |||
39 | static inline struct nouveau_framebuffer * | ||
40 | nouveau_framebuffer(struct drm_framebuffer *fb) | ||
41 | { | ||
42 | return container_of(fb, struct nouveau_framebuffer, base); | ||
43 | } | ||
44 | |||
45 | extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; | ||
46 | |||
47 | int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, | ||
48 | struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo); | ||
49 | #endif /* __NOUVEAU_FB_H__ */ | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h new file mode 100644 index 00000000000..86c2e374e93 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h | |||
@@ -0,0 +1,133 @@ | |||
1 | #ifndef __NOUVEAU_GRCTX_H__ | ||
2 | #define __NOUVEAU_GRCTX_H__ | ||
3 | |||
4 | struct nouveau_grctx { | ||
5 | struct drm_device *dev; | ||
6 | |||
7 | enum { | ||
8 | NOUVEAU_GRCTX_PROG, | ||
9 | NOUVEAU_GRCTX_VALS | ||
10 | } mode; | ||
11 | void *data; | ||
12 | |||
13 | uint32_t ctxprog_max; | ||
14 | uint32_t ctxprog_len; | ||
15 | uint32_t ctxprog_reg; | ||
16 | int ctxprog_label[32]; | ||
17 | uint32_t ctxvals_pos; | ||
18 | uint32_t ctxvals_base; | ||
19 | }; | ||
20 | |||
21 | #ifdef CP_CTX | ||
22 | static inline void | ||
23 | cp_out(struct nouveau_grctx *ctx, uint32_t inst) | ||
24 | { | ||
25 | uint32_t *ctxprog = ctx->data; | ||
26 | |||
27 | if (ctx->mode != NOUVEAU_GRCTX_PROG) | ||
28 | return; | ||
29 | |||
30 | BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max); | ||
31 | ctxprog[ctx->ctxprog_len++] = inst; | ||
32 | } | ||
33 | |||
34 | static inline void | ||
35 | cp_lsr(struct nouveau_grctx *ctx, uint32_t val) | ||
36 | { | ||
37 | cp_out(ctx, CP_LOAD_SR | val); | ||
38 | } | ||
39 | |||
40 | static inline void | ||
41 | cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length) | ||
42 | { | ||
43 | ctx->ctxprog_reg = (reg - 0x00400000) >> 2; | ||
44 | |||
45 | ctx->ctxvals_base = ctx->ctxvals_pos; | ||
46 | ctx->ctxvals_pos = ctx->ctxvals_base + length; | ||
47 | |||
48 | if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) { | ||
49 | cp_lsr(ctx, length); | ||
50 | length = 0; | ||
51 | } | ||
52 | |||
53 | cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg); | ||
54 | } | ||
55 | |||
56 | static inline void | ||
57 | cp_name(struct nouveau_grctx *ctx, int name) | ||
58 | { | ||
59 | uint32_t *ctxprog = ctx->data; | ||
60 | int i; | ||
61 | |||
62 | if (ctx->mode != NOUVEAU_GRCTX_PROG) | ||
63 | return; | ||
64 | |||
65 | ctx->ctxprog_label[name] = ctx->ctxprog_len; | ||
66 | for (i = 0; i < ctx->ctxprog_len; i++) { | ||
67 | if ((ctxprog[i] & 0xfff00000) != 0xff400000) | ||
68 | continue; | ||
69 | if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT)) | ||
70 | continue; | ||
71 | ctxprog[i] = (ctxprog[i] & 0x00ff00ff) | | ||
72 | (ctx->ctxprog_len << CP_BRA_IP_SHIFT); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static inline void | ||
77 | _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name) | ||
78 | { | ||
79 | int ip = 0; | ||
80 | |||
81 | if (mod != 2) { | ||
82 | ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT; | ||
83 | if (ip == 0) | ||
84 | ip = 0xff000000 | (name << CP_BRA_IP_SHIFT); | ||
85 | } | ||
86 | |||
87 | cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | | ||
88 | (state ? 0 : CP_BRA_IF_CLEAR)); | ||
89 | } | ||
90 | #define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) | ||
91 | #ifdef CP_BRA_MOD | ||
92 | #define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) | ||
93 | #define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) | ||
94 | #endif | ||
95 | |||
96 | static inline void | ||
97 | _cp_wait(struct nouveau_grctx *ctx, int flag, int state) | ||
98 | { | ||
99 | cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); | ||
100 | } | ||
101 | #define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) | ||
102 | |||
103 | static inline void | ||
104 | _cp_set(struct nouveau_grctx *ctx, int flag, int state) | ||
105 | { | ||
106 | cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); | ||
107 | } | ||
108 | #define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) | ||
109 | |||
110 | static inline void | ||
111 | cp_pos(struct nouveau_grctx *ctx, int offset) | ||
112 | { | ||
113 | ctx->ctxvals_pos = offset; | ||
114 | ctx->ctxvals_base = ctx->ctxvals_pos; | ||
115 | |||
116 | cp_lsr(ctx, ctx->ctxvals_pos); | ||
117 | cp_out(ctx, CP_SET_CONTEXT_POINTER); | ||
118 | } | ||
119 | |||
120 | static inline void | ||
121 | gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) | ||
122 | { | ||
123 | if (ctx->mode != NOUVEAU_GRCTX_VALS) | ||
124 | return; | ||
125 | |||
126 | reg = (reg - 0x00400000) / 4; | ||
127 | reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; | ||
128 | |||
129 | nv_wo32(ctx->data, reg * 4, val); | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c new file mode 100644 index 00000000000..cb389d01432 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_i2c.h" | ||
28 | #include "nouveau_hw.h" | ||
29 | |||
30 | static void | ||
31 | nv04_i2c_setscl(void *data, int state) | ||
32 | { | ||
33 | struct nouveau_i2c_chan *i2c = data; | ||
34 | struct drm_device *dev = i2c->dev; | ||
35 | uint8_t val; | ||
36 | |||
37 | val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0); | ||
38 | NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); | ||
39 | } | ||
40 | |||
41 | static void | ||
42 | nv04_i2c_setsda(void *data, int state) | ||
43 | { | ||
44 | struct nouveau_i2c_chan *i2c = data; | ||
45 | struct drm_device *dev = i2c->dev; | ||
46 | uint8_t val; | ||
47 | |||
48 | val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0); | ||
49 | NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); | ||
50 | } | ||
51 | |||
52 | static int | ||
53 | nv04_i2c_getscl(void *data) | ||
54 | { | ||
55 | struct nouveau_i2c_chan *i2c = data; | ||
56 | struct drm_device *dev = i2c->dev; | ||
57 | |||
58 | return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4); | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | nv04_i2c_getsda(void *data) | ||
63 | { | ||
64 | struct nouveau_i2c_chan *i2c = data; | ||
65 | struct drm_device *dev = i2c->dev; | ||
66 | |||
67 | return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8); | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | nv4e_i2c_setscl(void *data, int state) | ||
72 | { | ||
73 | struct nouveau_i2c_chan *i2c = data; | ||
74 | struct drm_device *dev = i2c->dev; | ||
75 | uint8_t val; | ||
76 | |||
77 | val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0); | ||
78 | nv_wr32(dev, i2c->wr, val | 0x01); | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | nv4e_i2c_setsda(void *data, int state) | ||
83 | { | ||
84 | struct nouveau_i2c_chan *i2c = data; | ||
85 | struct drm_device *dev = i2c->dev; | ||
86 | uint8_t val; | ||
87 | |||
88 | val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0); | ||
89 | nv_wr32(dev, i2c->wr, val | 0x01); | ||
90 | } | ||
91 | |||
92 | static int | ||
93 | nv4e_i2c_getscl(void *data) | ||
94 | { | ||
95 | struct nouveau_i2c_chan *i2c = data; | ||
96 | struct drm_device *dev = i2c->dev; | ||
97 | |||
98 | return !!((nv_rd32(dev, i2c->rd) >> 16) & 4); | ||
99 | } | ||
100 | |||
101 | static int | ||
102 | nv4e_i2c_getsda(void *data) | ||
103 | { | ||
104 | struct nouveau_i2c_chan *i2c = data; | ||
105 | struct drm_device *dev = i2c->dev; | ||
106 | |||
107 | return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); | ||
108 | } | ||
109 | |||
110 | static int | ||
111 | nv50_i2c_getscl(void *data) | ||
112 | { | ||
113 | struct nouveau_i2c_chan *i2c = data; | ||
114 | struct drm_device *dev = i2c->dev; | ||
115 | |||
116 | return !!(nv_rd32(dev, i2c->rd) & 1); | ||
117 | } | ||
118 | |||
119 | |||
120 | static int | ||
121 | nv50_i2c_getsda(void *data) | ||
122 | { | ||
123 | struct nouveau_i2c_chan *i2c = data; | ||
124 | struct drm_device *dev = i2c->dev; | ||
125 | |||
126 | return !!(nv_rd32(dev, i2c->rd) & 2); | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | nv50_i2c_setscl(void *data, int state) | ||
131 | { | ||
132 | struct nouveau_i2c_chan *i2c = data; | ||
133 | struct drm_device *dev = i2c->dev; | ||
134 | |||
135 | nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); | ||
136 | } | ||
137 | |||
138 | static void | ||
139 | nv50_i2c_setsda(void *data, int state) | ||
140 | { | ||
141 | struct nouveau_i2c_chan *i2c = data; | ||
142 | struct drm_device *dev = i2c->dev; | ||
143 | |||
144 | nv_wr32(dev, i2c->wr, | ||
145 | (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0)); | ||
146 | i2c->data = state; | ||
147 | } | ||
148 | |||
149 | static const uint32_t nv50_i2c_port[] = { | ||
150 | 0x00e138, 0x00e150, 0x00e168, 0x00e180, | ||
151 | 0x00e254, 0x00e274, 0x00e764, 0x00e780, | ||
152 | 0x00e79c, 0x00e7b8 | ||
153 | }; | ||
154 | #define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) | ||
155 | |||
156 | int | ||
157 | nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | ||
158 | { | ||
159 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
160 | struct nouveau_i2c_chan *i2c; | ||
161 | int ret; | ||
162 | |||
163 | if (entry->chan) | ||
164 | return -EEXIST; | ||
165 | |||
166 | if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { | ||
167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); | ||
172 | if (i2c == NULL) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | switch (entry->port_type) { | ||
176 | case 0: | ||
177 | i2c->bit.setsda = nv04_i2c_setsda; | ||
178 | i2c->bit.setscl = nv04_i2c_setscl; | ||
179 | i2c->bit.getsda = nv04_i2c_getsda; | ||
180 | i2c->bit.getscl = nv04_i2c_getscl; | ||
181 | i2c->rd = entry->read; | ||
182 | i2c->wr = entry->write; | ||
183 | break; | ||
184 | case 4: | ||
185 | i2c->bit.setsda = nv4e_i2c_setsda; | ||
186 | i2c->bit.setscl = nv4e_i2c_setscl; | ||
187 | i2c->bit.getsda = nv4e_i2c_getsda; | ||
188 | i2c->bit.getscl = nv4e_i2c_getscl; | ||
189 | i2c->rd = 0x600800 + entry->read; | ||
190 | i2c->wr = 0x600800 + entry->write; | ||
191 | break; | ||
192 | case 5: | ||
193 | i2c->bit.setsda = nv50_i2c_setsda; | ||
194 | i2c->bit.setscl = nv50_i2c_setscl; | ||
195 | i2c->bit.getsda = nv50_i2c_getsda; | ||
196 | i2c->bit.getscl = nv50_i2c_getscl; | ||
197 | i2c->rd = nv50_i2c_port[entry->read]; | ||
198 | i2c->wr = i2c->rd; | ||
199 | break; | ||
200 | case 6: | ||
201 | i2c->rd = entry->read; | ||
202 | i2c->wr = entry->write; | ||
203 | break; | ||
204 | default: | ||
205 | NV_ERROR(dev, "DCB I2C port type %d unknown\n", | ||
206 | entry->port_type); | ||
207 | kfree(i2c); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
212 | "nouveau-%s-%d", pci_name(dev->pdev), index); | ||
213 | i2c->adapter.owner = THIS_MODULE; | ||
214 | i2c->adapter.dev.parent = &dev->pdev->dev; | ||
215 | i2c->dev = dev; | ||
216 | i2c_set_adapdata(&i2c->adapter, i2c); | ||
217 | |||
218 | if (entry->port_type < 6) { | ||
219 | i2c->adapter.algo_data = &i2c->bit; | ||
220 | i2c->bit.udelay = 40; | ||
221 | i2c->bit.timeout = usecs_to_jiffies(5000); | ||
222 | i2c->bit.data = i2c; | ||
223 | ret = i2c_bit_add_bus(&i2c->adapter); | ||
224 | } else { | ||
225 | i2c->adapter.algo = &nouveau_dp_i2c_algo; | ||
226 | ret = i2c_add_adapter(&i2c->adapter); | ||
227 | } | ||
228 | |||
229 | if (ret) { | ||
230 | NV_ERROR(dev, "Failed to register i2c %d\n", index); | ||
231 | kfree(i2c); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | entry->chan = i2c; | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | void | ||
240 | nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry) | ||
241 | { | ||
242 | if (!entry->chan) | ||
243 | return; | ||
244 | |||
245 | i2c_del_adapter(&entry->chan->adapter); | ||
246 | kfree(entry->chan); | ||
247 | entry->chan = NULL; | ||
248 | } | ||
249 | |||
250 | struct nouveau_i2c_chan * | ||
251 | nouveau_i2c_find(struct drm_device *dev, int index) | ||
252 | { | ||
253 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
254 | struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; | ||
255 | |||
256 | if (index >= DCB_MAX_NUM_I2C_ENTRIES) | ||
257 | return NULL; | ||
258 | |||
259 | if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { | ||
260 | uint32_t reg = 0xe500, val; | ||
261 | |||
262 | if (i2c->port_type == 6) { | ||
263 | reg += i2c->read * 0x50; | ||
264 | val = 0x2002; | ||
265 | } else { | ||
266 | reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; | ||
267 | val = 0xe001; | ||
268 | } | ||
269 | |||
270 | nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); | ||
271 | } | ||
272 | |||
273 | if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) | ||
274 | return NULL; | ||
275 | return i2c->chan; | ||
276 | } | ||
277 | |||
278 | bool | ||
279 | nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr) | ||
280 | { | ||
281 | uint8_t buf[] = { 0 }; | ||
282 | struct i2c_msg msgs[] = { | ||
283 | { | ||
284 | .addr = addr, | ||
285 | .flags = 0, | ||
286 | .len = 1, | ||
287 | .buf = buf, | ||
288 | }, | ||
289 | { | ||
290 | .addr = addr, | ||
291 | .flags = I2C_M_RD, | ||
292 | .len = 1, | ||
293 | .buf = buf, | ||
294 | } | ||
295 | }; | ||
296 | |||
297 | return i2c_transfer(&i2c->adapter, msgs, 2) == 2; | ||
298 | } | ||
299 | |||
300 | int | ||
301 | nouveau_i2c_identify(struct drm_device *dev, const char *what, | ||
302 | struct i2c_board_info *info, | ||
303 | bool (*match)(struct nouveau_i2c_chan *, | ||
304 | struct i2c_board_info *), | ||
305 | int index) | ||
306 | { | ||
307 | struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); | ||
308 | int i; | ||
309 | |||
310 | NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); | ||
311 | |||
312 | for (i = 0; info[i].addr; i++) { | ||
313 | if (nouveau_probe_i2c_addr(i2c, info[i].addr) && | ||
314 | (!match || match(i2c, &info[i]))) { | ||
315 | NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); | ||
316 | return i; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | NV_DEBUG(dev, "No devices found.\n"); | ||
321 | |||
322 | return -ENODEV; | ||
323 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h new file mode 100644 index 00000000000..422b62fd827 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef __NOUVEAU_I2C_H__ | ||
24 | #define __NOUVEAU_I2C_H__ | ||
25 | |||
26 | #include <linux/i2c.h> | ||
27 | #include <linux/i2c-algo-bit.h> | ||
28 | #include "drm_dp_helper.h" | ||
29 | |||
30 | struct dcb_i2c_entry; | ||
31 | |||
32 | struct nouveau_i2c_chan { | ||
33 | struct i2c_adapter adapter; | ||
34 | struct drm_device *dev; | ||
35 | struct i2c_algo_bit_data bit; | ||
36 | unsigned rd; | ||
37 | unsigned wr; | ||
38 | unsigned data; | ||
39 | }; | ||
40 | |||
41 | int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index); | ||
42 | void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); | ||
43 | struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); | ||
44 | bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); | ||
45 | int nouveau_i2c_identify(struct drm_device *dev, const char *what, | ||
46 | struct i2c_board_info *info, | ||
47 | bool (*match)(struct nouveau_i2c_chan *, | ||
48 | struct i2c_board_info *), | ||
49 | int index); | ||
50 | |||
51 | extern const struct i2c_algorithm nouveau_dp_i2c_algo; | ||
52 | |||
53 | #endif /* __NOUVEAU_I2C_H__ */ | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c new file mode 100644 index 00000000000..1640dec3b82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | static inline void | ||
30 | region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) | ||
31 | { | ||
32 | list_del(&a->nl_entry); | ||
33 | list_del(&a->fl_entry); | ||
34 | kfree(a); | ||
35 | } | ||
36 | |||
37 | static struct nouveau_mm_node * | ||
38 | region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | ||
39 | { | ||
40 | struct nouveau_mm_node *b; | ||
41 | |||
42 | if (a->length == size) | ||
43 | return a; | ||
44 | |||
45 | b = kmalloc(sizeof(*b), GFP_KERNEL); | ||
46 | if (unlikely(b == NULL)) | ||
47 | return NULL; | ||
48 | |||
49 | b->offset = a->offset; | ||
50 | b->length = size; | ||
51 | b->type = a->type; | ||
52 | a->offset += size; | ||
53 | a->length -= size; | ||
54 | list_add_tail(&b->nl_entry, &a->nl_entry); | ||
55 | if (b->type == 0) | ||
56 | list_add_tail(&b->fl_entry, &a->fl_entry); | ||
57 | return b; | ||
58 | } | ||
59 | |||
60 | #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ | ||
61 | list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) | ||
62 | |||
63 | void | ||
64 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | ||
65 | { | ||
66 | struct nouveau_mm_node *prev = node(this, prev); | ||
67 | struct nouveau_mm_node *next = node(this, next); | ||
68 | |||
69 | list_add(&this->fl_entry, &rmm->free); | ||
70 | this->type = 0; | ||
71 | |||
72 | if (prev && prev->type == 0) { | ||
73 | prev->length += this->length; | ||
74 | region_put(rmm, this); | ||
75 | this = prev; | ||
76 | } | ||
77 | |||
78 | if (next && next->type == 0) { | ||
79 | next->offset = this->offset; | ||
80 | next->length += this->length; | ||
81 | region_put(rmm, this); | ||
82 | } | ||
83 | } | ||
84 | |||
85 | int | ||
86 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | ||
87 | u32 align, struct nouveau_mm_node **pnode) | ||
88 | { | ||
89 | struct nouveau_mm_node *prev, *this, *next; | ||
90 | u32 min = size_nc ? size_nc : size; | ||
91 | u32 align_mask = align - 1; | ||
92 | u32 splitoff; | ||
93 | u32 s, e; | ||
94 | |||
95 | list_for_each_entry(this, &rmm->free, fl_entry) { | ||
96 | e = this->offset + this->length; | ||
97 | s = this->offset; | ||
98 | |||
99 | prev = node(this, prev); | ||
100 | if (prev && prev->type != type) | ||
101 | s = roundup(s, rmm->block_size); | ||
102 | |||
103 | next = node(this, next); | ||
104 | if (next && next->type != type) | ||
105 | e = rounddown(e, rmm->block_size); | ||
106 | |||
107 | s = (s + align_mask) & ~align_mask; | ||
108 | e &= ~align_mask; | ||
109 | if (s > e || e - s < min) | ||
110 | continue; | ||
111 | |||
112 | splitoff = s - this->offset; | ||
113 | if (splitoff && !region_split(rmm, this, splitoff)) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | this = region_split(rmm, this, min(size, e - s)); | ||
117 | if (!this) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | this->type = type; | ||
121 | list_del(&this->fl_entry); | ||
122 | *pnode = this; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | return -ENOSPC; | ||
127 | } | ||
128 | |||
129 | int | ||
130 | nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) | ||
131 | { | ||
132 | struct nouveau_mm *rmm; | ||
133 | struct nouveau_mm_node *heap; | ||
134 | |||
135 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); | ||
136 | if (!heap) | ||
137 | return -ENOMEM; | ||
138 | heap->offset = roundup(offset, block); | ||
139 | heap->length = rounddown(offset + length, block) - heap->offset; | ||
140 | |||
141 | rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); | ||
142 | if (!rmm) { | ||
143 | kfree(heap); | ||
144 | return -ENOMEM; | ||
145 | } | ||
146 | rmm->block_size = block; | ||
147 | mutex_init(&rmm->mutex); | ||
148 | INIT_LIST_HEAD(&rmm->nodes); | ||
149 | INIT_LIST_HEAD(&rmm->free); | ||
150 | list_add(&heap->nl_entry, &rmm->nodes); | ||
151 | list_add(&heap->fl_entry, &rmm->free); | ||
152 | |||
153 | *prmm = rmm; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | int | ||
158 | nouveau_mm_fini(struct nouveau_mm **prmm) | ||
159 | { | ||
160 | struct nouveau_mm *rmm = *prmm; | ||
161 | struct nouveau_mm_node *node, *heap = | ||
162 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); | ||
163 | |||
164 | if (!list_is_singular(&rmm->nodes)) { | ||
165 | printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); | ||
166 | list_for_each_entry(node, &rmm->nodes, nl_entry) { | ||
167 | printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", | ||
168 | node->type, node->offset, node->length); | ||
169 | } | ||
170 | WARN_ON(1); | ||
171 | return -EBUSY; | ||
172 | } | ||
173 | |||
174 | kfree(heap); | ||
175 | kfree(rmm); | ||
176 | *prmm = NULL; | ||
177 | return 0; | ||
178 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h new file mode 100644 index 00000000000..b9c016d2155 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_REGION_H__ | ||
26 | #define __NOUVEAU_REGION_H__ | ||
27 | |||
28 | struct nouveau_mm_node { | ||
29 | struct list_head nl_entry; | ||
30 | struct list_head fl_entry; | ||
31 | struct list_head rl_entry; | ||
32 | |||
33 | u8 type; | ||
34 | u32 offset; | ||
35 | u32 length; | ||
36 | }; | ||
37 | |||
38 | struct nouveau_mm { | ||
39 | struct list_head nodes; | ||
40 | struct list_head free; | ||
41 | |||
42 | struct mutex mutex; | ||
43 | |||
44 | u32 block_size; | ||
45 | }; | ||
46 | |||
47 | int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); | ||
48 | int nouveau_mm_fini(struct nouveau_mm **); | ||
49 | int nouveau_mm_pre(struct nouveau_mm *); | ||
50 | int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, | ||
51 | u32 align, struct nouveau_mm_node **); | ||
52 | void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); | ||
53 | |||
54 | int nv50_vram_init(struct drm_device *); | ||
55 | void nv50_vram_fini(struct drm_device *); | ||
56 | int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, | ||
57 | u32 memtype, struct nouveau_mem **); | ||
58 | void nv50_vram_del(struct drm_device *, struct nouveau_mem **); | ||
59 | bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); | ||
60 | |||
61 | int nvc0_vram_init(struct drm_device *); | ||
62 | int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin, | ||
63 | u32 memtype, struct nouveau_mem **); | ||
64 | bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags); | ||
65 | |||
66 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c new file mode 100644 index 00000000000..6abdbe6530a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_ramht.h" | ||
32 | |||
33 | int | ||
34 | nouveau_notifier_init_channel(struct nouveau_channel *chan) | ||
35 | { | ||
36 | struct drm_device *dev = chan->dev; | ||
37 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
38 | struct nouveau_bo *ntfy = NULL; | ||
39 | uint32_t flags, ttmpl; | ||
40 | int ret; | ||
41 | |||
42 | if (nouveau_vram_notify) { | ||
43 | flags = NOUVEAU_GEM_DOMAIN_VRAM; | ||
44 | ttmpl = TTM_PL_FLAG_VRAM; | ||
45 | } else { | ||
46 | flags = NOUVEAU_GEM_DOMAIN_GART; | ||
47 | ttmpl = TTM_PL_FLAG_TT; | ||
48 | } | ||
49 | |||
50 | ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy); | ||
51 | if (ret) | ||
52 | return ret; | ||
53 | |||
54 | ret = nouveau_bo_pin(ntfy, ttmpl); | ||
55 | if (ret) | ||
56 | goto out_err; | ||
57 | |||
58 | ret = nouveau_bo_map(ntfy); | ||
59 | if (ret) | ||
60 | goto out_err; | ||
61 | |||
62 | if (dev_priv->card_type >= NV_50) { | ||
63 | ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma); | ||
64 | if (ret) | ||
65 | goto out_err; | ||
66 | } | ||
67 | |||
68 | ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); | ||
69 | if (ret) | ||
70 | goto out_err; | ||
71 | |||
72 | chan->notifier_bo = ntfy; | ||
73 | out_err: | ||
74 | if (ret) { | ||
75 | nouveau_bo_vma_del(ntfy, &chan->notifier_vma); | ||
76 | drm_gem_object_unreference_unlocked(ntfy->gem); | ||
77 | } | ||
78 | |||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | void | ||
83 | nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | ||
84 | { | ||
85 | struct drm_device *dev = chan->dev; | ||
86 | |||
87 | if (!chan->notifier_bo) | ||
88 | return; | ||
89 | |||
90 | nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma); | ||
91 | nouveau_bo_unmap(chan->notifier_bo); | ||
92 | mutex_lock(&dev->struct_mutex); | ||
93 | nouveau_bo_unpin(chan->notifier_bo); | ||
94 | mutex_unlock(&dev->struct_mutex); | ||
95 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | ||
96 | drm_mm_takedown(&chan->notifier_heap); | ||
97 | } | ||
98 | |||
99 | static void | ||
100 | nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | ||
101 | struct nouveau_gpuobj *gpuobj) | ||
102 | { | ||
103 | NV_DEBUG(dev, "\n"); | ||
104 | |||
105 | if (gpuobj->priv) | ||
106 | drm_mm_put_block(gpuobj->priv); | ||
107 | } | ||
108 | |||
109 | int | ||
110 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | ||
111 | int size, uint32_t start, uint32_t end, | ||
112 | uint32_t *b_offset) | ||
113 | { | ||
114 | struct drm_device *dev = chan->dev; | ||
115 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
116 | struct nouveau_gpuobj *nobj = NULL; | ||
117 | struct drm_mm_node *mem; | ||
118 | uint32_t offset; | ||
119 | int target, ret; | ||
120 | |||
121 | mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, | ||
122 | start, end, 0); | ||
123 | if (mem) | ||
124 | mem = drm_mm_get_block_range(mem, size, 0, start, end); | ||
125 | if (!mem) { | ||
126 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | ||
127 | return -ENOMEM; | ||
128 | } | ||
129 | |||
130 | if (dev_priv->card_type < NV_50) { | ||
131 | if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) | ||
132 | target = NV_MEM_TARGET_VRAM; | ||
133 | else | ||
134 | target = NV_MEM_TARGET_GART; | ||
135 | offset = chan->notifier_bo->bo.offset; | ||
136 | } else { | ||
137 | target = NV_MEM_TARGET_VM; | ||
138 | offset = chan->notifier_vma.offset; | ||
139 | } | ||
140 | offset += mem->start; | ||
141 | |||
142 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, | ||
143 | mem->size, NV_MEM_ACCESS_RW, target, | ||
144 | &nobj); | ||
145 | if (ret) { | ||
146 | drm_mm_put_block(mem); | ||
147 | NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); | ||
148 | return ret; | ||
149 | } | ||
150 | nobj->dtor = nouveau_notifier_gpuobj_dtor; | ||
151 | nobj->priv = mem; | ||
152 | |||
153 | ret = nouveau_ramht_insert(chan, handle, nobj); | ||
154 | nouveau_gpuobj_ref(NULL, &nobj); | ||
155 | if (ret) { | ||
156 | drm_mm_put_block(mem); | ||
157 | NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | *b_offset = mem->start; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | int | ||
166 | nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) | ||
167 | { | ||
168 | if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor) | ||
169 | return -EINVAL; | ||
170 | |||
171 | if (poffset) { | ||
172 | struct drm_mm_node *mem = nobj->priv; | ||
173 | |||
174 | if (*poffset >= mem->size) | ||
175 | return false; | ||
176 | |||
177 | *poffset += mem->start; | ||
178 | } | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | int | ||
184 | nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | ||
185 | struct drm_file *file_priv) | ||
186 | { | ||
187 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
188 | struct drm_nouveau_notifierobj_alloc *na = data; | ||
189 | struct nouveau_channel *chan; | ||
190 | int ret; | ||
191 | |||
192 | /* completely unnecessary for these chipsets... */ | ||
193 | if (unlikely(dev_priv->card_type >= NV_C0)) | ||
194 | return -EINVAL; | ||
195 | |||
196 | chan = nouveau_channel_get(file_priv, na->channel); | ||
197 | if (IS_ERR(chan)) | ||
198 | return PTR_ERR(chan); | ||
199 | |||
200 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, | ||
201 | &na->offset); | ||
202 | nouveau_channel_put(&chan); | ||
203 | return ret; | ||
204 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c new file mode 100644 index 00000000000..159b7c437d3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -0,0 +1,1036 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Ben Skeggs. | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Authors: | ||
30 | * Ben Skeggs <darktama@iinet.net.au> | ||
31 | */ | ||
32 | |||
33 | #include "drmP.h" | ||
34 | #include "drm.h" | ||
35 | #include "nouveau_drv.h" | ||
36 | #include "nouveau_drm.h" | ||
37 | #include "nouveau_ramht.h" | ||
38 | #include "nouveau_vm.h" | ||
39 | #include "nv50_display.h" | ||
40 | |||
41 | struct nouveau_gpuobj_method { | ||
42 | struct list_head head; | ||
43 | u32 mthd; | ||
44 | int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); | ||
45 | }; | ||
46 | |||
47 | struct nouveau_gpuobj_class { | ||
48 | struct list_head head; | ||
49 | struct list_head methods; | ||
50 | u32 id; | ||
51 | u32 engine; | ||
52 | }; | ||
53 | |||
54 | int | ||
55 | nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) | ||
56 | { | ||
57 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
58 | struct nouveau_gpuobj_class *oc; | ||
59 | |||
60 | oc = kzalloc(sizeof(*oc), GFP_KERNEL); | ||
61 | if (!oc) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | INIT_LIST_HEAD(&oc->methods); | ||
65 | oc->id = class; | ||
66 | oc->engine = engine; | ||
67 | list_add(&oc->head, &dev_priv->classes); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | int | ||
72 | nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, | ||
73 | int (*exec)(struct nouveau_channel *, u32, u32, u32)) | ||
74 | { | ||
75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
76 | struct nouveau_gpuobj_method *om; | ||
77 | struct nouveau_gpuobj_class *oc; | ||
78 | |||
79 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
80 | if (oc->id == class) | ||
81 | goto found; | ||
82 | } | ||
83 | |||
84 | return -EINVAL; | ||
85 | |||
86 | found: | ||
87 | om = kzalloc(sizeof(*om), GFP_KERNEL); | ||
88 | if (!om) | ||
89 | return -ENOMEM; | ||
90 | |||
91 | om->mthd = mthd; | ||
92 | om->exec = exec; | ||
93 | list_add(&om->head, &oc->methods); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | int | ||
98 | nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, | ||
99 | u32 class, u32 mthd, u32 data) | ||
100 | { | ||
101 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
102 | struct nouveau_gpuobj_method *om; | ||
103 | struct nouveau_gpuobj_class *oc; | ||
104 | |||
105 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
106 | if (oc->id != class) | ||
107 | continue; | ||
108 | |||
109 | list_for_each_entry(om, &oc->methods, head) { | ||
110 | if (om->mthd == mthd) | ||
111 | return om->exec(chan, class, mthd, data); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | return -ENOENT; | ||
116 | } | ||
117 | |||
118 | int | ||
119 | nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | ||
120 | u32 class, u32 mthd, u32 data) | ||
121 | { | ||
122 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
123 | struct nouveau_channel *chan = NULL; | ||
124 | unsigned long flags; | ||
125 | int ret = -EINVAL; | ||
126 | |||
127 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
128 | if (chid >= 0 && chid < dev_priv->engine.fifo.channels) | ||
129 | chan = dev_priv->channels.ptr[chid]; | ||
130 | if (chan) | ||
131 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | ||
132 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | /* NVidia uses context objects to drive drawing operations. | ||
137 | |||
138 | Context objects can be selected into 8 subchannels in the FIFO, | ||
139 | and then used via DMA command buffers. | ||
140 | |||
141 | A context object is referenced by a user defined handle (CARD32). The HW | ||
142 | looks up graphics objects in a hash table in the instance RAM. | ||
143 | |||
144 | An entry in the hash table consists of 2 CARD32. The first CARD32 contains | ||
145 | the handle, the second one a bitfield, that contains the address of the | ||
146 | object in instance RAM. | ||
147 | |||
148 | The format of the second CARD32 seems to be: | ||
149 | |||
150 | NV4 to NV30: | ||
151 | |||
152 | 15: 0 instance_addr >> 4 | ||
153 | 17:16 engine (here uses 1 = graphics) | ||
154 | 28:24 channel id (here uses 0) | ||
155 | 31 valid (use 1) | ||
156 | |||
157 | NV40: | ||
158 | |||
159 | 15: 0 instance_addr >> 4 (maybe 19-0) | ||
160 | 21:20 engine (here uses 1 = graphics) | ||
161 | I'm unsure about the other bits, but using 0 seems to work. | ||
162 | |||
163 | The key into the hash table depends on the object handle and channel id and | ||
164 | is given as: | ||
165 | */ | ||
166 | |||
167 | int | ||
168 | nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | ||
169 | uint32_t size, int align, uint32_t flags, | ||
170 | struct nouveau_gpuobj **gpuobj_ret) | ||
171 | { | ||
172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
173 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
174 | struct nouveau_gpuobj *gpuobj; | ||
175 | struct drm_mm_node *ramin = NULL; | ||
176 | int ret, i; | ||
177 | |||
178 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | ||
179 | chan ? chan->id : -1, size, align, flags); | ||
180 | |||
181 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
182 | if (!gpuobj) | ||
183 | return -ENOMEM; | ||
184 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | ||
185 | gpuobj->dev = dev; | ||
186 | gpuobj->flags = flags; | ||
187 | kref_init(&gpuobj->refcount); | ||
188 | gpuobj->size = size; | ||
189 | |||
190 | spin_lock(&dev_priv->ramin_lock); | ||
191 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
192 | spin_unlock(&dev_priv->ramin_lock); | ||
193 | |||
194 | if (!(flags & NVOBJ_FLAG_VM) && chan) { | ||
195 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); | ||
196 | if (ramin) | ||
197 | ramin = drm_mm_get_block(ramin, size, align); | ||
198 | if (!ramin) { | ||
199 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
200 | return -ENOMEM; | ||
201 | } | ||
202 | |||
203 | gpuobj->pinst = chan->ramin->pinst; | ||
204 | if (gpuobj->pinst != ~0) | ||
205 | gpuobj->pinst += ramin->start; | ||
206 | |||
207 | gpuobj->cinst = ramin->start; | ||
208 | gpuobj->vinst = ramin->start + chan->ramin->vinst; | ||
209 | gpuobj->node = ramin; | ||
210 | } else { | ||
211 | ret = instmem->get(gpuobj, chan, size, align); | ||
212 | if (ret) { | ||
213 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | ret = -ENOSYS; | ||
218 | if (!(flags & NVOBJ_FLAG_DONT_MAP)) | ||
219 | ret = instmem->map(gpuobj); | ||
220 | if (ret) | ||
221 | gpuobj->pinst = ~0; | ||
222 | |||
223 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | ||
224 | } | ||
225 | |||
226 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | ||
227 | for (i = 0; i < gpuobj->size; i += 4) | ||
228 | nv_wo32(gpuobj, i, 0); | ||
229 | instmem->flush(dev); | ||
230 | } | ||
231 | |||
232 | |||
233 | *gpuobj_ret = gpuobj; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | int | ||
238 | nouveau_gpuobj_init(struct drm_device *dev) | ||
239 | { | ||
240 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
241 | |||
242 | NV_DEBUG(dev, "\n"); | ||
243 | |||
244 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | ||
245 | INIT_LIST_HEAD(&dev_priv->classes); | ||
246 | spin_lock_init(&dev_priv->ramin_lock); | ||
247 | dev_priv->ramin_base = ~0; | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | void | ||
253 | nouveau_gpuobj_takedown(struct drm_device *dev) | ||
254 | { | ||
255 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
256 | struct nouveau_gpuobj_method *om, *tm; | ||
257 | struct nouveau_gpuobj_class *oc, *tc; | ||
258 | |||
259 | NV_DEBUG(dev, "\n"); | ||
260 | |||
261 | list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { | ||
262 | list_for_each_entry_safe(om, tm, &oc->methods, head) { | ||
263 | list_del(&om->head); | ||
264 | kfree(om); | ||
265 | } | ||
266 | list_del(&oc->head); | ||
267 | kfree(oc); | ||
268 | } | ||
269 | |||
270 | BUG_ON(!list_empty(&dev_priv->gpuobj_list)); | ||
271 | } | ||
272 | |||
273 | |||
274 | static void | ||
275 | nouveau_gpuobj_del(struct kref *ref) | ||
276 | { | ||
277 | struct nouveau_gpuobj *gpuobj = | ||
278 | container_of(ref, struct nouveau_gpuobj, refcount); | ||
279 | struct drm_device *dev = gpuobj->dev; | ||
280 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
281 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
282 | int i; | ||
283 | |||
284 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | ||
285 | |||
286 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | ||
287 | for (i = 0; i < gpuobj->size; i += 4) | ||
288 | nv_wo32(gpuobj, i, 0); | ||
289 | instmem->flush(dev); | ||
290 | } | ||
291 | |||
292 | if (gpuobj->dtor) | ||
293 | gpuobj->dtor(dev, gpuobj); | ||
294 | |||
295 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { | ||
296 | if (gpuobj->node) { | ||
297 | instmem->unmap(gpuobj); | ||
298 | instmem->put(gpuobj); | ||
299 | } | ||
300 | } else { | ||
301 | if (gpuobj->node) { | ||
302 | spin_lock(&dev_priv->ramin_lock); | ||
303 | drm_mm_put_block(gpuobj->node); | ||
304 | spin_unlock(&dev_priv->ramin_lock); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | spin_lock(&dev_priv->ramin_lock); | ||
309 | list_del(&gpuobj->list); | ||
310 | spin_unlock(&dev_priv->ramin_lock); | ||
311 | |||
312 | kfree(gpuobj); | ||
313 | } | ||
314 | |||
315 | void | ||
316 | nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) | ||
317 | { | ||
318 | if (ref) | ||
319 | kref_get(&ref->refcount); | ||
320 | |||
321 | if (*ptr) | ||
322 | kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); | ||
323 | |||
324 | *ptr = ref; | ||
325 | } | ||
326 | |||
327 | int | ||
328 | nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, | ||
329 | u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) | ||
330 | { | ||
331 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
332 | struct nouveau_gpuobj *gpuobj = NULL; | ||
333 | int i; | ||
334 | |||
335 | NV_DEBUG(dev, | ||
336 | "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", | ||
337 | pinst, vinst, size, flags); | ||
338 | |||
339 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
340 | if (!gpuobj) | ||
341 | return -ENOMEM; | ||
342 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | ||
343 | gpuobj->dev = dev; | ||
344 | gpuobj->flags = flags; | ||
345 | kref_init(&gpuobj->refcount); | ||
346 | gpuobj->size = size; | ||
347 | gpuobj->pinst = pinst; | ||
348 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | ||
349 | gpuobj->vinst = vinst; | ||
350 | |||
351 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | ||
352 | for (i = 0; i < gpuobj->size; i += 4) | ||
353 | nv_wo32(gpuobj, i, 0); | ||
354 | dev_priv->engine.instmem.flush(dev); | ||
355 | } | ||
356 | |||
357 | spin_lock(&dev_priv->ramin_lock); | ||
358 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
359 | spin_unlock(&dev_priv->ramin_lock); | ||
360 | *pgpuobj = gpuobj; | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | DMA objects are used to reference a piece of memory in the | ||
366 | framebuffer, PCI or AGP address space. Each object is 16 bytes big | ||
367 | and looks as follows: | ||
368 | |||
369 | entry[0] | ||
370 | 11:0 class (seems like I can always use 0 here) | ||
371 | 12 page table present? | ||
372 | 13 page entry linear? | ||
373 | 15:14 access: 0 rw, 1 ro, 2 wo | ||
374 | 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP | ||
375 | 31:20 dma adjust (bits 0-11 of the address) | ||
376 | entry[1] | ||
377 | dma limit (size of transfer) | ||
378 | entry[X] | ||
379 | 1 0 readonly, 1 readwrite | ||
380 | 31:12 dma frame address of the page (bits 12-31 of the address) | ||
381 | entry[N] | ||
382 | page table terminator, same value as the first pte, as does nvidia | ||
383 | rivatv uses 0xffffffff | ||
384 | |||
385 | Non linear page tables need a list of frame addresses afterwards, | ||
386 | the rivatv project has some info on this. | ||
387 | |||
388 | The method below creates a DMA object in instance RAM and returns a handle | ||
389 | to it that can be used to set up context objects. | ||
390 | */ | ||
391 | |||
392 | void | ||
393 | nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, | ||
394 | u64 base, u64 size, int target, int access, | ||
395 | u32 type, u32 comp) | ||
396 | { | ||
397 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; | ||
398 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
399 | u32 flags0; | ||
400 | |||
401 | flags0 = (comp << 29) | (type << 22) | class; | ||
402 | flags0 |= 0x00100000; | ||
403 | |||
404 | switch (access) { | ||
405 | case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; | ||
406 | case NV_MEM_ACCESS_RW: | ||
407 | case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; | ||
408 | default: | ||
409 | break; | ||
410 | } | ||
411 | |||
412 | switch (target) { | ||
413 | case NV_MEM_TARGET_VRAM: | ||
414 | flags0 |= 0x00010000; | ||
415 | break; | ||
416 | case NV_MEM_TARGET_PCI: | ||
417 | flags0 |= 0x00020000; | ||
418 | break; | ||
419 | case NV_MEM_TARGET_PCI_NOSNOOP: | ||
420 | flags0 |= 0x00030000; | ||
421 | break; | ||
422 | case NV_MEM_TARGET_GART: | ||
423 | base += dev_priv->gart_info.aper_base; | ||
424 | default: | ||
425 | flags0 &= ~0x00100000; | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | /* convert to base + limit */ | ||
430 | size = (base + size) - 1; | ||
431 | |||
432 | nv_wo32(obj, offset + 0x00, flags0); | ||
433 | nv_wo32(obj, offset + 0x04, lower_32_bits(size)); | ||
434 | nv_wo32(obj, offset + 0x08, lower_32_bits(base)); | ||
435 | nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | | ||
436 | upper_32_bits(base)); | ||
437 | nv_wo32(obj, offset + 0x10, 0x00000000); | ||
438 | nv_wo32(obj, offset + 0x14, 0x00000000); | ||
439 | |||
440 | pinstmem->flush(obj->dev); | ||
441 | } | ||
442 | |||
443 | int | ||
444 | nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, | ||
445 | int target, int access, u32 type, u32 comp, | ||
446 | struct nouveau_gpuobj **pobj) | ||
447 | { | ||
448 | struct drm_device *dev = chan->dev; | ||
449 | int ret; | ||
450 | |||
451 | ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); | ||
452 | if (ret) | ||
453 | return ret; | ||
454 | |||
455 | nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, | ||
456 | access, type, comp); | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | int | ||
461 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, | ||
462 | u64 size, int access, int target, | ||
463 | struct nouveau_gpuobj **pobj) | ||
464 | { | ||
465 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
466 | struct drm_device *dev = chan->dev; | ||
467 | struct nouveau_gpuobj *obj; | ||
468 | u32 flags0, flags2; | ||
469 | int ret; | ||
470 | |||
471 | if (dev_priv->card_type >= NV_50) { | ||
472 | u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; | ||
473 | u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; | ||
474 | |||
475 | return nv50_gpuobj_dma_new(chan, class, base, size, | ||
476 | target, access, type, comp, pobj); | ||
477 | } | ||
478 | |||
479 | if (target == NV_MEM_TARGET_GART) { | ||
480 | struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; | ||
481 | |||
482 | if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) { | ||
483 | if (base == 0) { | ||
484 | nouveau_gpuobj_ref(gart, pobj); | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | base = nouveau_sgdma_get_physical(dev, base); | ||
489 | target = NV_MEM_TARGET_PCI; | ||
490 | } else { | ||
491 | base += dev_priv->gart_info.aper_base; | ||
492 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) | ||
493 | target = NV_MEM_TARGET_PCI_NOSNOOP; | ||
494 | else | ||
495 | target = NV_MEM_TARGET_PCI; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | flags0 = class; | ||
500 | flags0 |= 0x00003000; /* PT present, PT linear */ | ||
501 | flags2 = 0; | ||
502 | |||
503 | switch (target) { | ||
504 | case NV_MEM_TARGET_PCI: | ||
505 | flags0 |= 0x00020000; | ||
506 | break; | ||
507 | case NV_MEM_TARGET_PCI_NOSNOOP: | ||
508 | flags0 |= 0x00030000; | ||
509 | break; | ||
510 | default: | ||
511 | break; | ||
512 | } | ||
513 | |||
514 | switch (access) { | ||
515 | case NV_MEM_ACCESS_RO: | ||
516 | flags0 |= 0x00004000; | ||
517 | break; | ||
518 | case NV_MEM_ACCESS_WO: | ||
519 | flags0 |= 0x00008000; | ||
520 | default: | ||
521 | flags2 |= 0x00000002; | ||
522 | break; | ||
523 | } | ||
524 | |||
525 | flags0 |= (base & 0x00000fff) << 20; | ||
526 | flags2 |= (base & 0xfffff000); | ||
527 | |||
528 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
529 | if (ret) | ||
530 | return ret; | ||
531 | |||
532 | nv_wo32(obj, 0x00, flags0); | ||
533 | nv_wo32(obj, 0x04, size - 1); | ||
534 | nv_wo32(obj, 0x08, flags2); | ||
535 | nv_wo32(obj, 0x0c, flags2); | ||
536 | |||
537 | obj->engine = NVOBJ_ENGINE_SW; | ||
538 | obj->class = class; | ||
539 | *pobj = obj; | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | /* Context objects in the instance RAM have the following structure. | ||
544 | * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. | ||
545 | |||
546 | NV4 - NV30: | ||
547 | |||
548 | entry[0] | ||
549 | 11:0 class | ||
550 | 12 chroma key enable | ||
551 | 13 user clip enable | ||
552 | 14 swizzle enable | ||
553 | 17:15 patch config: | ||
554 | scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre | ||
555 | 18 synchronize enable | ||
556 | 19 endian: 1 big, 0 little | ||
557 | 21:20 dither mode | ||
558 | 23 single step enable | ||
559 | 24 patch status: 0 invalid, 1 valid | ||
560 | 25 context_surface 0: 1 valid | ||
561 | 26 context surface 1: 1 valid | ||
562 | 27 context pattern: 1 valid | ||
563 | 28 context rop: 1 valid | ||
564 | 29,30 context beta, beta4 | ||
565 | entry[1] | ||
566 | 7:0 mono format | ||
567 | 15:8 color format | ||
568 | 31:16 notify instance address | ||
569 | entry[2] | ||
570 | 15:0 dma 0 instance address | ||
571 | 31:16 dma 1 instance address | ||
572 | entry[3] | ||
573 | dma method traps | ||
574 | |||
575 | NV40: | ||
576 | No idea what the exact format is. Here's what can be deducted: | ||
577 | |||
578 | entry[0]: | ||
579 | 11:0 class (maybe uses more bits here?) | ||
580 | 17 user clip enable | ||
581 | 21:19 patch config | ||
582 | 25 patch status valid ? | ||
583 | entry[1]: | ||
584 | 15:0 DMA notifier (maybe 20:0) | ||
585 | entry[2]: | ||
586 | 15:0 DMA 0 instance (maybe 20:0) | ||
587 | 24 big endian | ||
588 | entry[3]: | ||
589 | 15:0 DMA 1 instance (maybe 20:0) | ||
590 | entry[4]: | ||
591 | entry[5]: | ||
592 | set to 0? | ||
593 | */ | ||
594 | static int | ||
595 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class) | ||
596 | { | ||
597 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
598 | struct nouveau_gpuobj *gpuobj; | ||
599 | int ret; | ||
600 | |||
601 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
602 | if (!gpuobj) | ||
603 | return -ENOMEM; | ||
604 | gpuobj->dev = chan->dev; | ||
605 | gpuobj->engine = NVOBJ_ENGINE_SW; | ||
606 | gpuobj->class = class; | ||
607 | kref_init(&gpuobj->refcount); | ||
608 | gpuobj->cinst = 0x40; | ||
609 | |||
610 | spin_lock(&dev_priv->ramin_lock); | ||
611 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
612 | spin_unlock(&dev_priv->ramin_lock); | ||
613 | |||
614 | ret = nouveau_ramht_insert(chan, handle, gpuobj); | ||
615 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
616 | return ret; | ||
617 | } | ||
618 | |||
619 | int | ||
620 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) | ||
621 | { | ||
622 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
623 | struct drm_device *dev = chan->dev; | ||
624 | struct nouveau_gpuobj_class *oc; | ||
625 | int ret; | ||
626 | |||
627 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | ||
628 | |||
629 | list_for_each_entry(oc, &dev_priv->classes, head) { | ||
630 | struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine]; | ||
631 | |||
632 | if (oc->id != class) | ||
633 | continue; | ||
634 | |||
635 | if (oc->engine == NVOBJ_ENGINE_SW) | ||
636 | return nouveau_gpuobj_sw_new(chan, handle, class); | ||
637 | |||
638 | if (!chan->engctx[oc->engine]) { | ||
639 | ret = eng->context_new(chan, oc->engine); | ||
640 | if (ret) | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | return eng->object_new(chan, oc->engine, handle, class); | ||
645 | } | ||
646 | |||
647 | NV_ERROR(dev, "illegal object class: 0x%x\n", class); | ||
648 | return -EINVAL; | ||
649 | } | ||
650 | |||
651 | static int | ||
652 | nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | ||
653 | { | ||
654 | struct drm_device *dev = chan->dev; | ||
655 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
656 | uint32_t size; | ||
657 | uint32_t base; | ||
658 | int ret; | ||
659 | |||
660 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
661 | |||
662 | /* Base amount for object storage (4KiB enough?) */ | ||
663 | size = 0x2000; | ||
664 | base = 0; | ||
665 | |||
666 | if (dev_priv->card_type == NV_50) { | ||
667 | /* Various fixed table thingos */ | ||
668 | size += 0x1400; /* mostly unknown stuff */ | ||
669 | size += 0x4000; /* vm pd */ | ||
670 | base = 0x6000; | ||
671 | /* RAMHT, not sure about setting size yet, 32KiB to be safe */ | ||
672 | size += 0x8000; | ||
673 | /* RAMFC */ | ||
674 | size += 0x1000; | ||
675 | } | ||
676 | |||
677 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); | ||
678 | if (ret) { | ||
679 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | ||
680 | return ret; | ||
681 | } | ||
682 | |||
683 | ret = drm_mm_init(&chan->ramin_heap, base, size); | ||
684 | if (ret) { | ||
685 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | ||
686 | nouveau_gpuobj_ref(NULL, &chan->ramin); | ||
687 | return ret; | ||
688 | } | ||
689 | |||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | static int | ||
694 | nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) | ||
695 | { | ||
696 | struct drm_device *dev = chan->dev; | ||
697 | struct nouveau_gpuobj *pgd = NULL; | ||
698 | struct nouveau_vm_pgd *vpgd; | ||
699 | int ret, i; | ||
700 | |||
701 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); | ||
702 | if (ret) | ||
703 | return ret; | ||
704 | |||
705 | /* create page directory for this vm if none currently exists, | ||
706 | * will be destroyed automagically when last reference to the | ||
707 | * vm is removed | ||
708 | */ | ||
709 | if (list_empty(&vm->pgd_list)) { | ||
710 | ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); | ||
711 | if (ret) | ||
712 | return ret; | ||
713 | } | ||
714 | nouveau_vm_ref(vm, &chan->vm, pgd); | ||
715 | nouveau_gpuobj_ref(NULL, &pgd); | ||
716 | |||
717 | /* point channel at vm's page directory */ | ||
718 | vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); | ||
719 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); | ||
720 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); | ||
721 | nv_wo32(chan->ramin, 0x0208, 0xffffffff); | ||
722 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | ||
723 | |||
724 | /* map display semaphore buffers into channel's vm */ | ||
725 | for (i = 0; i < 2; i++) { | ||
726 | struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; | ||
727 | |||
728 | ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, | ||
729 | &chan->dispc_vma[i]); | ||
730 | if (ret) | ||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | int | ||
738 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | ||
739 | uint32_t vram_h, uint32_t tt_h) | ||
740 | { | ||
741 | struct drm_device *dev = chan->dev; | ||
742 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
743 | struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); | ||
744 | struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; | ||
745 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | ||
746 | int ret, i; | ||
747 | |||
748 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | ||
749 | if (dev_priv->card_type == NV_C0) | ||
750 | return nvc0_gpuobj_channel_init(chan, vm); | ||
751 | |||
752 | /* Allocate a chunk of memory for per-channel object storage */ | ||
753 | ret = nouveau_gpuobj_channel_init_pramin(chan); | ||
754 | if (ret) { | ||
755 | NV_ERROR(dev, "init pramin\n"); | ||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | /* NV50 VM | ||
760 | * - Allocate per-channel page-directory | ||
761 | * - Link with shared channel VM | ||
762 | */ | ||
763 | if (vm) { | ||
764 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | ||
765 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | ||
766 | u32 vm_pinst = chan->ramin->pinst; | ||
767 | |||
768 | if (vm_pinst != ~0) | ||
769 | vm_pinst += pgd_offs; | ||
770 | |||
771 | ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, | ||
772 | 0, &chan->vm_pd); | ||
773 | if (ret) | ||
774 | return ret; | ||
775 | |||
776 | nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); | ||
777 | } | ||
778 | |||
779 | /* RAMHT */ | ||
780 | if (dev_priv->card_type < NV_50) { | ||
781 | nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); | ||
782 | } else { | ||
783 | struct nouveau_gpuobj *ramht = NULL; | ||
784 | |||
785 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, | ||
786 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
787 | if (ret) | ||
788 | return ret; | ||
789 | |||
790 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); | ||
791 | nouveau_gpuobj_ref(NULL, &ramht); | ||
792 | if (ret) | ||
793 | return ret; | ||
794 | |||
795 | /* dma objects for display sync channel semaphore blocks */ | ||
796 | for (i = 0; i < 2; i++) { | ||
797 | struct nouveau_gpuobj *sem = NULL; | ||
798 | struct nv50_display_crtc *dispc = | ||
799 | &nv50_display(dev)->crtc[i]; | ||
800 | u64 offset = dispc->sem.bo->bo.offset; | ||
801 | |||
802 | ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, | ||
803 | NV_MEM_ACCESS_RW, | ||
804 | NV_MEM_TARGET_VRAM, &sem); | ||
805 | if (ret) | ||
806 | return ret; | ||
807 | |||
808 | ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem); | ||
809 | nouveau_gpuobj_ref(NULL, &sem); | ||
810 | if (ret) | ||
811 | return ret; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | /* VRAM ctxdma */ | ||
816 | if (dev_priv->card_type >= NV_50) { | ||
817 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
818 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, | ||
819 | NV_MEM_TARGET_VM, &vram); | ||
820 | if (ret) { | ||
821 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
822 | return ret; | ||
823 | } | ||
824 | } else { | ||
825 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
826 | 0, dev_priv->fb_available_size, | ||
827 | NV_MEM_ACCESS_RW, | ||
828 | NV_MEM_TARGET_VRAM, &vram); | ||
829 | if (ret) { | ||
830 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
831 | return ret; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | ret = nouveau_ramht_insert(chan, vram_h, vram); | ||
836 | nouveau_gpuobj_ref(NULL, &vram); | ||
837 | if (ret) { | ||
838 | NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); | ||
839 | return ret; | ||
840 | } | ||
841 | |||
842 | /* TT memory ctxdma */ | ||
843 | if (dev_priv->card_type >= NV_50) { | ||
844 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
845 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, | ||
846 | NV_MEM_TARGET_VM, &tt); | ||
847 | } else { | ||
848 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
849 | 0, dev_priv->gart_info.aper_size, | ||
850 | NV_MEM_ACCESS_RW, | ||
851 | NV_MEM_TARGET_GART, &tt); | ||
852 | } | ||
853 | |||
854 | if (ret) { | ||
855 | NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); | ||
856 | return ret; | ||
857 | } | ||
858 | |||
859 | ret = nouveau_ramht_insert(chan, tt_h, tt); | ||
860 | nouveau_gpuobj_ref(NULL, &tt); | ||
861 | if (ret) { | ||
862 | NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); | ||
863 | return ret; | ||
864 | } | ||
865 | |||
866 | return 0; | ||
867 | } | ||
868 | |||
869 | void | ||
870 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | ||
871 | { | ||
872 | struct drm_device *dev = chan->dev; | ||
873 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
874 | int i; | ||
875 | |||
876 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
877 | |||
878 | if (dev_priv->card_type >= NV_50) { | ||
879 | struct nv50_display *disp = nv50_display(dev); | ||
880 | |||
881 | for (i = 0; i < 2; i++) { | ||
882 | struct nv50_display_crtc *dispc = &disp->crtc[i]; | ||
883 | nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); | ||
884 | } | ||
885 | |||
886 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
887 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | ||
888 | } | ||
889 | |||
890 | if (drm_mm_initialized(&chan->ramin_heap)) | ||
891 | drm_mm_takedown(&chan->ramin_heap); | ||
892 | nouveau_gpuobj_ref(NULL, &chan->ramin); | ||
893 | } | ||
894 | |||
895 | int | ||
896 | nouveau_gpuobj_suspend(struct drm_device *dev) | ||
897 | { | ||
898 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
899 | struct nouveau_gpuobj *gpuobj; | ||
900 | int i; | ||
901 | |||
902 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
903 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) | ||
904 | continue; | ||
905 | |||
906 | gpuobj->suspend = vmalloc(gpuobj->size); | ||
907 | if (!gpuobj->suspend) { | ||
908 | nouveau_gpuobj_resume(dev); | ||
909 | return -ENOMEM; | ||
910 | } | ||
911 | |||
912 | for (i = 0; i < gpuobj->size; i += 4) | ||
913 | gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); | ||
914 | } | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | void | ||
920 | nouveau_gpuobj_resume(struct drm_device *dev) | ||
921 | { | ||
922 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
923 | struct nouveau_gpuobj *gpuobj; | ||
924 | int i; | ||
925 | |||
926 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
927 | if (!gpuobj->suspend) | ||
928 | continue; | ||
929 | |||
930 | for (i = 0; i < gpuobj->size; i += 4) | ||
931 | nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); | ||
932 | |||
933 | vfree(gpuobj->suspend); | ||
934 | gpuobj->suspend = NULL; | ||
935 | } | ||
936 | |||
937 | dev_priv->engine.instmem.flush(dev); | ||
938 | } | ||
939 | |||
940 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | ||
941 | struct drm_file *file_priv) | ||
942 | { | ||
943 | struct drm_nouveau_grobj_alloc *init = data; | ||
944 | struct nouveau_channel *chan; | ||
945 | int ret; | ||
946 | |||
947 | if (init->handle == ~0) | ||
948 | return -EINVAL; | ||
949 | |||
950 | chan = nouveau_channel_get(file_priv, init->channel); | ||
951 | if (IS_ERR(chan)) | ||
952 | return PTR_ERR(chan); | ||
953 | |||
954 | if (nouveau_ramht_find(chan, init->handle)) { | ||
955 | ret = -EEXIST; | ||
956 | goto out; | ||
957 | } | ||
958 | |||
959 | ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); | ||
960 | if (ret) { | ||
961 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | ||
962 | ret, init->channel, init->handle); | ||
963 | } | ||
964 | |||
965 | out: | ||
966 | nouveau_channel_put(&chan); | ||
967 | return ret; | ||
968 | } | ||
969 | |||
970 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | ||
971 | struct drm_file *file_priv) | ||
972 | { | ||
973 | struct drm_nouveau_gpuobj_free *objfree = data; | ||
974 | struct nouveau_channel *chan; | ||
975 | int ret; | ||
976 | |||
977 | chan = nouveau_channel_get(file_priv, objfree->channel); | ||
978 | if (IS_ERR(chan)) | ||
979 | return PTR_ERR(chan); | ||
980 | |||
981 | /* Synchronize with the user channel */ | ||
982 | nouveau_channel_idle(chan); | ||
983 | |||
984 | ret = nouveau_ramht_remove(chan, objfree->handle); | ||
985 | nouveau_channel_put(&chan); | ||
986 | return ret; | ||
987 | } | ||
988 | |||
989 | u32 | ||
990 | nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | ||
991 | { | ||
992 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
993 | struct drm_device *dev = gpuobj->dev; | ||
994 | unsigned long flags; | ||
995 | |||
996 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | ||
997 | u64 ptr = gpuobj->vinst + offset; | ||
998 | u32 base = ptr >> 16; | ||
999 | u32 val; | ||
1000 | |||
1001 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
1002 | if (dev_priv->ramin_base != base) { | ||
1003 | dev_priv->ramin_base = base; | ||
1004 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | ||
1005 | } | ||
1006 | val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); | ||
1007 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
1008 | return val; | ||
1009 | } | ||
1010 | |||
1011 | return nv_ri32(dev, gpuobj->pinst + offset); | ||
1012 | } | ||
1013 | |||
1014 | void | ||
1015 | nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) | ||
1016 | { | ||
1017 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
1018 | struct drm_device *dev = gpuobj->dev; | ||
1019 | unsigned long flags; | ||
1020 | |||
1021 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | ||
1022 | u64 ptr = gpuobj->vinst + offset; | ||
1023 | u32 base = ptr >> 16; | ||
1024 | |||
1025 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
1026 | if (dev_priv->ramin_base != base) { | ||
1027 | dev_priv->ramin_base = base; | ||
1028 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | ||
1029 | } | ||
1030 | nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); | ||
1031 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
1032 | return; | ||
1033 | } | ||
1034 | |||
1035 | nv_wi32(dev, gpuobj->pinst + offset, val); | ||
1036 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c new file mode 100644 index 00000000000..a24a81f5a89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_ramht.h" | ||
29 | |||
30 | static u32 | ||
31 | nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle) | ||
32 | { | ||
33 | struct drm_device *dev = chan->dev; | ||
34 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
35 | struct nouveau_ramht *ramht = chan->ramht; | ||
36 | u32 hash = 0; | ||
37 | int i; | ||
38 | |||
39 | NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle); | ||
40 | |||
41 | for (i = 32; i > 0; i -= ramht->bits) { | ||
42 | hash ^= (handle & ((1 << ramht->bits) - 1)); | ||
43 | handle >>= ramht->bits; | ||
44 | } | ||
45 | |||
46 | if (dev_priv->card_type < NV_50) | ||
47 | hash ^= chan->id << (ramht->bits - 4); | ||
48 | hash <<= 3; | ||
49 | |||
50 | NV_DEBUG(dev, "hash=0x%08x\n", hash); | ||
51 | return hash; | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, | ||
56 | u32 offset) | ||
57 | { | ||
58 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
59 | u32 ctx = nv_ro32(ramht, offset + 4); | ||
60 | |||
61 | if (dev_priv->card_type < NV_40) | ||
62 | return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); | ||
63 | return (ctx != 0); | ||
64 | } | ||
65 | |||
66 | static int | ||
67 | nouveau_ramht_entry_same_channel(struct nouveau_channel *chan, | ||
68 | struct nouveau_gpuobj *ramht, u32 offset) | ||
69 | { | ||
70 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
71 | u32 ctx = nv_ro32(ramht, offset + 4); | ||
72 | |||
73 | if (dev_priv->card_type >= NV_50) | ||
74 | return true; | ||
75 | else if (dev_priv->card_type >= NV_40) | ||
76 | return chan->id == | ||
77 | ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); | ||
78 | else | ||
79 | return chan->id == | ||
80 | ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); | ||
81 | } | ||
82 | |||
83 | int | ||
84 | nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, | ||
85 | struct nouveau_gpuobj *gpuobj) | ||
86 | { | ||
87 | struct drm_device *dev = chan->dev; | ||
88 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
89 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
90 | struct nouveau_ramht_entry *entry; | ||
91 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; | ||
92 | unsigned long flags; | ||
93 | u32 ctx, co, ho; | ||
94 | |||
95 | if (nouveau_ramht_find(chan, handle)) | ||
96 | return -EEXIST; | ||
97 | |||
98 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
99 | if (!entry) | ||
100 | return -ENOMEM; | ||
101 | entry->channel = chan; | ||
102 | entry->gpuobj = NULL; | ||
103 | entry->handle = handle; | ||
104 | nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); | ||
105 | |||
106 | if (dev_priv->card_type < NV_40) { | ||
107 | ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) | | ||
108 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | | ||
109 | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); | ||
110 | } else | ||
111 | if (dev_priv->card_type < NV_50) { | ||
112 | ctx = (gpuobj->pinst >> 4) | | ||
113 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | | ||
114 | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); | ||
115 | } else { | ||
116 | if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { | ||
117 | ctx = (gpuobj->cinst << 10) | | ||
118 | (chan->id << 28) | | ||
119 | chan->id; /* HASH_TAG */ | ||
120 | } else { | ||
121 | ctx = (gpuobj->cinst >> 4) | | ||
122 | ((gpuobj->engine << | ||
123 | NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | spin_lock_irqsave(&chan->ramht->lock, flags); | ||
128 | list_add(&entry->head, &chan->ramht->entries); | ||
129 | |||
130 | co = ho = nouveau_ramht_hash_handle(chan, handle); | ||
131 | do { | ||
132 | if (!nouveau_ramht_entry_valid(dev, ramht, co)) { | ||
133 | NV_DEBUG(dev, | ||
134 | "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | ||
135 | chan->id, co, handle, ctx); | ||
136 | nv_wo32(ramht, co + 0, handle); | ||
137 | nv_wo32(ramht, co + 4, ctx); | ||
138 | |||
139 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | ||
140 | instmem->flush(dev); | ||
141 | return 0; | ||
142 | } | ||
143 | NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", | ||
144 | chan->id, co, nv_ro32(ramht, co)); | ||
145 | |||
146 | co += 8; | ||
147 | if (co >= ramht->size) | ||
148 | co = 0; | ||
149 | } while (co != ho); | ||
150 | |||
151 | NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); | ||
152 | list_del(&entry->head); | ||
153 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | ||
154 | kfree(entry); | ||
155 | return -ENOMEM; | ||
156 | } | ||
157 | |||
158 | static struct nouveau_ramht_entry * | ||
159 | nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) | ||
160 | { | ||
161 | struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; | ||
162 | struct nouveau_ramht_entry *entry; | ||
163 | unsigned long flags; | ||
164 | |||
165 | if (!ramht) | ||
166 | return NULL; | ||
167 | |||
168 | spin_lock_irqsave(&ramht->lock, flags); | ||
169 | list_for_each_entry(entry, &ramht->entries, head) { | ||
170 | if (entry->channel == chan && | ||
171 | (!handle || entry->handle == handle)) { | ||
172 | list_del(&entry->head); | ||
173 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
174 | |||
175 | return entry; | ||
176 | } | ||
177 | } | ||
178 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
179 | |||
180 | return NULL; | ||
181 | } | ||
182 | |||
183 | static void | ||
184 | nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) | ||
185 | { | ||
186 | struct drm_device *dev = chan->dev; | ||
187 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
188 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
189 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; | ||
190 | unsigned long flags; | ||
191 | u32 co, ho; | ||
192 | |||
193 | spin_lock_irqsave(&chan->ramht->lock, flags); | ||
194 | co = ho = nouveau_ramht_hash_handle(chan, handle); | ||
195 | do { | ||
196 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | ||
197 | nouveau_ramht_entry_same_channel(chan, ramht, co) && | ||
198 | (handle == nv_ro32(ramht, co))) { | ||
199 | NV_DEBUG(dev, | ||
200 | "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | ||
201 | chan->id, co, handle, nv_ro32(ramht, co + 4)); | ||
202 | nv_wo32(ramht, co + 0, 0x00000000); | ||
203 | nv_wo32(ramht, co + 4, 0x00000000); | ||
204 | instmem->flush(dev); | ||
205 | goto out; | ||
206 | } | ||
207 | |||
208 | co += 8; | ||
209 | if (co >= ramht->size) | ||
210 | co = 0; | ||
211 | } while (co != ho); | ||
212 | |||
213 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | ||
214 | chan->id, handle); | ||
215 | out: | ||
216 | spin_unlock_irqrestore(&chan->ramht->lock, flags); | ||
217 | } | ||
218 | |||
219 | int | ||
220 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) | ||
221 | { | ||
222 | struct nouveau_ramht_entry *entry; | ||
223 | |||
224 | entry = nouveau_ramht_remove_entry(chan, handle); | ||
225 | if (!entry) | ||
226 | return -ENOENT; | ||
227 | |||
228 | nouveau_ramht_remove_hash(chan, entry->handle); | ||
229 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
230 | kfree(entry); | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | struct nouveau_gpuobj * | ||
235 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) | ||
236 | { | ||
237 | struct nouveau_ramht *ramht = chan->ramht; | ||
238 | struct nouveau_ramht_entry *entry; | ||
239 | struct nouveau_gpuobj *gpuobj = NULL; | ||
240 | unsigned long flags; | ||
241 | |||
242 | if (unlikely(!chan->ramht)) | ||
243 | return NULL; | ||
244 | |||
245 | spin_lock_irqsave(&ramht->lock, flags); | ||
246 | list_for_each_entry(entry, &chan->ramht->entries, head) { | ||
247 | if (entry->channel == chan && entry->handle == handle) { | ||
248 | gpuobj = entry->gpuobj; | ||
249 | break; | ||
250 | } | ||
251 | } | ||
252 | spin_unlock_irqrestore(&ramht->lock, flags); | ||
253 | |||
254 | return gpuobj; | ||
255 | } | ||
256 | |||
257 | int | ||
258 | nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | ||
259 | struct nouveau_ramht **pramht) | ||
260 | { | ||
261 | struct nouveau_ramht *ramht; | ||
262 | |||
263 | ramht = kzalloc(sizeof(*ramht), GFP_KERNEL); | ||
264 | if (!ramht) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | ramht->dev = dev; | ||
268 | kref_init(&ramht->refcount); | ||
269 | ramht->bits = drm_order(gpuobj->size / 8); | ||
270 | INIT_LIST_HEAD(&ramht->entries); | ||
271 | spin_lock_init(&ramht->lock); | ||
272 | nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); | ||
273 | |||
274 | *pramht = ramht; | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static void | ||
279 | nouveau_ramht_del(struct kref *ref) | ||
280 | { | ||
281 | struct nouveau_ramht *ramht = | ||
282 | container_of(ref, struct nouveau_ramht, refcount); | ||
283 | |||
284 | nouveau_gpuobj_ref(NULL, &ramht->gpuobj); | ||
285 | kfree(ramht); | ||
286 | } | ||
287 | |||
288 | void | ||
289 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, | ||
290 | struct nouveau_channel *chan) | ||
291 | { | ||
292 | struct nouveau_ramht_entry *entry; | ||
293 | struct nouveau_ramht *ramht; | ||
294 | |||
295 | if (ref) | ||
296 | kref_get(&ref->refcount); | ||
297 | |||
298 | ramht = *ptr; | ||
299 | if (ramht) { | ||
300 | while ((entry = nouveau_ramht_remove_entry(chan, 0))) { | ||
301 | nouveau_ramht_remove_hash(chan, entry->handle); | ||
302 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
303 | kfree(entry); | ||
304 | } | ||
305 | |||
306 | kref_put(&ramht->refcount, nouveau_ramht_del); | ||
307 | } | ||
308 | *ptr = ref; | ||
309 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h new file mode 100644 index 00000000000..c82de98fee0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_RAMHT_H__ | ||
26 | #define __NOUVEAU_RAMHT_H__ | ||
27 | |||
28 | struct nouveau_ramht_entry { | ||
29 | struct list_head head; | ||
30 | struct nouveau_channel *channel; | ||
31 | struct nouveau_gpuobj *gpuobj; | ||
32 | u32 handle; | ||
33 | }; | ||
34 | |||
35 | struct nouveau_ramht { | ||
36 | struct drm_device *dev; | ||
37 | struct kref refcount; | ||
38 | spinlock_t lock; | ||
39 | struct nouveau_gpuobj *gpuobj; | ||
40 | struct list_head entries; | ||
41 | int bits; | ||
42 | }; | ||
43 | |||
44 | extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, | ||
45 | struct nouveau_ramht **); | ||
46 | extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, | ||
47 | struct nouveau_channel *unref_channel); | ||
48 | |||
49 | extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, | ||
50 | struct nouveau_gpuobj *); | ||
51 | extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); | ||
52 | extern struct nouveau_gpuobj * | ||
53 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); | ||
54 | |||
55 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c new file mode 100644 index 00000000000..10656e430b4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -0,0 +1,1218 @@ | |||
1 | /* | ||
2 | * Copyright 2005 Stephane Marchesin | ||
3 | * Copyright 2008 Stuart Bennett | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include <linux/swab.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | #include "drm_sarea.h" | ||
31 | #include "drm_crtc_helper.h" | ||
32 | #include <linux/vgaarb.h> | ||
33 | #include <linux/vga_switcheroo.h> | ||
34 | |||
35 | #include "nouveau_drv.h" | ||
36 | #include "nouveau_drm.h" | ||
37 | #include "nouveau_fbcon.h" | ||
38 | #include "nouveau_ramht.h" | ||
39 | #include "nouveau_pm.h" | ||
40 | #include "nv50_display.h" | ||
41 | |||
42 | static void nouveau_stub_takedown(struct drm_device *dev) {} | ||
43 | static int nouveau_stub_init(struct drm_device *dev) { return 0; } | ||
44 | |||
45 | static int nouveau_init_engine_ptrs(struct drm_device *dev) | ||
46 | { | ||
47 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
48 | struct nouveau_engine *engine = &dev_priv->engine; | ||
49 | |||
50 | switch (dev_priv->chipset & 0xf0) { | ||
51 | case 0x00: | ||
52 | engine->instmem.init = nv04_instmem_init; | ||
53 | engine->instmem.takedown = nv04_instmem_takedown; | ||
54 | engine->instmem.suspend = nv04_instmem_suspend; | ||
55 | engine->instmem.resume = nv04_instmem_resume; | ||
56 | engine->instmem.get = nv04_instmem_get; | ||
57 | engine->instmem.put = nv04_instmem_put; | ||
58 | engine->instmem.map = nv04_instmem_map; | ||
59 | engine->instmem.unmap = nv04_instmem_unmap; | ||
60 | engine->instmem.flush = nv04_instmem_flush; | ||
61 | engine->mc.init = nv04_mc_init; | ||
62 | engine->mc.takedown = nv04_mc_takedown; | ||
63 | engine->timer.init = nv04_timer_init; | ||
64 | engine->timer.read = nv04_timer_read; | ||
65 | engine->timer.takedown = nv04_timer_takedown; | ||
66 | engine->fb.init = nv04_fb_init; | ||
67 | engine->fb.takedown = nv04_fb_takedown; | ||
68 | engine->fifo.channels = 16; | ||
69 | engine->fifo.init = nv04_fifo_init; | ||
70 | engine->fifo.takedown = nv04_fifo_fini; | ||
71 | engine->fifo.disable = nv04_fifo_disable; | ||
72 | engine->fifo.enable = nv04_fifo_enable; | ||
73 | engine->fifo.reassign = nv04_fifo_reassign; | ||
74 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
75 | engine->fifo.channel_id = nv04_fifo_channel_id; | ||
76 | engine->fifo.create_context = nv04_fifo_create_context; | ||
77 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
78 | engine->fifo.load_context = nv04_fifo_load_context; | ||
79 | engine->fifo.unload_context = nv04_fifo_unload_context; | ||
80 | engine->display.early_init = nv04_display_early_init; | ||
81 | engine->display.late_takedown = nv04_display_late_takedown; | ||
82 | engine->display.create = nv04_display_create; | ||
83 | engine->display.init = nv04_display_init; | ||
84 | engine->display.destroy = nv04_display_destroy; | ||
85 | engine->gpio.init = nouveau_stub_init; | ||
86 | engine->gpio.takedown = nouveau_stub_takedown; | ||
87 | engine->gpio.get = NULL; | ||
88 | engine->gpio.set = NULL; | ||
89 | engine->gpio.irq_enable = NULL; | ||
90 | engine->pm.clock_get = nv04_pm_clock_get; | ||
91 | engine->pm.clock_pre = nv04_pm_clock_pre; | ||
92 | engine->pm.clock_set = nv04_pm_clock_set; | ||
93 | engine->vram.init = nouveau_mem_detect; | ||
94 | engine->vram.takedown = nouveau_stub_takedown; | ||
95 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
96 | break; | ||
97 | case 0x10: | ||
98 | engine->instmem.init = nv04_instmem_init; | ||
99 | engine->instmem.takedown = nv04_instmem_takedown; | ||
100 | engine->instmem.suspend = nv04_instmem_suspend; | ||
101 | engine->instmem.resume = nv04_instmem_resume; | ||
102 | engine->instmem.get = nv04_instmem_get; | ||
103 | engine->instmem.put = nv04_instmem_put; | ||
104 | engine->instmem.map = nv04_instmem_map; | ||
105 | engine->instmem.unmap = nv04_instmem_unmap; | ||
106 | engine->instmem.flush = nv04_instmem_flush; | ||
107 | engine->mc.init = nv04_mc_init; | ||
108 | engine->mc.takedown = nv04_mc_takedown; | ||
109 | engine->timer.init = nv04_timer_init; | ||
110 | engine->timer.read = nv04_timer_read; | ||
111 | engine->timer.takedown = nv04_timer_takedown; | ||
112 | engine->fb.init = nv10_fb_init; | ||
113 | engine->fb.takedown = nv10_fb_takedown; | ||
114 | engine->fb.init_tile_region = nv10_fb_init_tile_region; | ||
115 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | ||
116 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | ||
117 | engine->fifo.channels = 32; | ||
118 | engine->fifo.init = nv10_fifo_init; | ||
119 | engine->fifo.takedown = nv04_fifo_fini; | ||
120 | engine->fifo.disable = nv04_fifo_disable; | ||
121 | engine->fifo.enable = nv04_fifo_enable; | ||
122 | engine->fifo.reassign = nv04_fifo_reassign; | ||
123 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
124 | engine->fifo.channel_id = nv10_fifo_channel_id; | ||
125 | engine->fifo.create_context = nv10_fifo_create_context; | ||
126 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
127 | engine->fifo.load_context = nv10_fifo_load_context; | ||
128 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
129 | engine->display.early_init = nv04_display_early_init; | ||
130 | engine->display.late_takedown = nv04_display_late_takedown; | ||
131 | engine->display.create = nv04_display_create; | ||
132 | engine->display.init = nv04_display_init; | ||
133 | engine->display.destroy = nv04_display_destroy; | ||
134 | engine->gpio.init = nouveau_stub_init; | ||
135 | engine->gpio.takedown = nouveau_stub_takedown; | ||
136 | engine->gpio.get = nv10_gpio_get; | ||
137 | engine->gpio.set = nv10_gpio_set; | ||
138 | engine->gpio.irq_enable = NULL; | ||
139 | engine->pm.clock_get = nv04_pm_clock_get; | ||
140 | engine->pm.clock_pre = nv04_pm_clock_pre; | ||
141 | engine->pm.clock_set = nv04_pm_clock_set; | ||
142 | engine->vram.init = nouveau_mem_detect; | ||
143 | engine->vram.takedown = nouveau_stub_takedown; | ||
144 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
145 | break; | ||
146 | case 0x20: | ||
147 | engine->instmem.init = nv04_instmem_init; | ||
148 | engine->instmem.takedown = nv04_instmem_takedown; | ||
149 | engine->instmem.suspend = nv04_instmem_suspend; | ||
150 | engine->instmem.resume = nv04_instmem_resume; | ||
151 | engine->instmem.get = nv04_instmem_get; | ||
152 | engine->instmem.put = nv04_instmem_put; | ||
153 | engine->instmem.map = nv04_instmem_map; | ||
154 | engine->instmem.unmap = nv04_instmem_unmap; | ||
155 | engine->instmem.flush = nv04_instmem_flush; | ||
156 | engine->mc.init = nv04_mc_init; | ||
157 | engine->mc.takedown = nv04_mc_takedown; | ||
158 | engine->timer.init = nv04_timer_init; | ||
159 | engine->timer.read = nv04_timer_read; | ||
160 | engine->timer.takedown = nv04_timer_takedown; | ||
161 | engine->fb.init = nv10_fb_init; | ||
162 | engine->fb.takedown = nv10_fb_takedown; | ||
163 | engine->fb.init_tile_region = nv10_fb_init_tile_region; | ||
164 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | ||
165 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | ||
166 | engine->fifo.channels = 32; | ||
167 | engine->fifo.init = nv10_fifo_init; | ||
168 | engine->fifo.takedown = nv04_fifo_fini; | ||
169 | engine->fifo.disable = nv04_fifo_disable; | ||
170 | engine->fifo.enable = nv04_fifo_enable; | ||
171 | engine->fifo.reassign = nv04_fifo_reassign; | ||
172 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
173 | engine->fifo.channel_id = nv10_fifo_channel_id; | ||
174 | engine->fifo.create_context = nv10_fifo_create_context; | ||
175 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
176 | engine->fifo.load_context = nv10_fifo_load_context; | ||
177 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
178 | engine->display.early_init = nv04_display_early_init; | ||
179 | engine->display.late_takedown = nv04_display_late_takedown; | ||
180 | engine->display.create = nv04_display_create; | ||
181 | engine->display.init = nv04_display_init; | ||
182 | engine->display.destroy = nv04_display_destroy; | ||
183 | engine->gpio.init = nouveau_stub_init; | ||
184 | engine->gpio.takedown = nouveau_stub_takedown; | ||
185 | engine->gpio.get = nv10_gpio_get; | ||
186 | engine->gpio.set = nv10_gpio_set; | ||
187 | engine->gpio.irq_enable = NULL; | ||
188 | engine->pm.clock_get = nv04_pm_clock_get; | ||
189 | engine->pm.clock_pre = nv04_pm_clock_pre; | ||
190 | engine->pm.clock_set = nv04_pm_clock_set; | ||
191 | engine->vram.init = nouveau_mem_detect; | ||
192 | engine->vram.takedown = nouveau_stub_takedown; | ||
193 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
194 | break; | ||
195 | case 0x30: | ||
196 | engine->instmem.init = nv04_instmem_init; | ||
197 | engine->instmem.takedown = nv04_instmem_takedown; | ||
198 | engine->instmem.suspend = nv04_instmem_suspend; | ||
199 | engine->instmem.resume = nv04_instmem_resume; | ||
200 | engine->instmem.get = nv04_instmem_get; | ||
201 | engine->instmem.put = nv04_instmem_put; | ||
202 | engine->instmem.map = nv04_instmem_map; | ||
203 | engine->instmem.unmap = nv04_instmem_unmap; | ||
204 | engine->instmem.flush = nv04_instmem_flush; | ||
205 | engine->mc.init = nv04_mc_init; | ||
206 | engine->mc.takedown = nv04_mc_takedown; | ||
207 | engine->timer.init = nv04_timer_init; | ||
208 | engine->timer.read = nv04_timer_read; | ||
209 | engine->timer.takedown = nv04_timer_takedown; | ||
210 | engine->fb.init = nv30_fb_init; | ||
211 | engine->fb.takedown = nv30_fb_takedown; | ||
212 | engine->fb.init_tile_region = nv30_fb_init_tile_region; | ||
213 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | ||
214 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | ||
215 | engine->fifo.channels = 32; | ||
216 | engine->fifo.init = nv10_fifo_init; | ||
217 | engine->fifo.takedown = nv04_fifo_fini; | ||
218 | engine->fifo.disable = nv04_fifo_disable; | ||
219 | engine->fifo.enable = nv04_fifo_enable; | ||
220 | engine->fifo.reassign = nv04_fifo_reassign; | ||
221 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
222 | engine->fifo.channel_id = nv10_fifo_channel_id; | ||
223 | engine->fifo.create_context = nv10_fifo_create_context; | ||
224 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
225 | engine->fifo.load_context = nv10_fifo_load_context; | ||
226 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
227 | engine->display.early_init = nv04_display_early_init; | ||
228 | engine->display.late_takedown = nv04_display_late_takedown; | ||
229 | engine->display.create = nv04_display_create; | ||
230 | engine->display.init = nv04_display_init; | ||
231 | engine->display.destroy = nv04_display_destroy; | ||
232 | engine->gpio.init = nouveau_stub_init; | ||
233 | engine->gpio.takedown = nouveau_stub_takedown; | ||
234 | engine->gpio.get = nv10_gpio_get; | ||
235 | engine->gpio.set = nv10_gpio_set; | ||
236 | engine->gpio.irq_enable = NULL; | ||
237 | engine->pm.clock_get = nv04_pm_clock_get; | ||
238 | engine->pm.clock_pre = nv04_pm_clock_pre; | ||
239 | engine->pm.clock_set = nv04_pm_clock_set; | ||
240 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | ||
241 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | ||
242 | engine->vram.init = nouveau_mem_detect; | ||
243 | engine->vram.takedown = nouveau_stub_takedown; | ||
244 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
245 | break; | ||
246 | case 0x40: | ||
247 | case 0x60: | ||
248 | engine->instmem.init = nv04_instmem_init; | ||
249 | engine->instmem.takedown = nv04_instmem_takedown; | ||
250 | engine->instmem.suspend = nv04_instmem_suspend; | ||
251 | engine->instmem.resume = nv04_instmem_resume; | ||
252 | engine->instmem.get = nv04_instmem_get; | ||
253 | engine->instmem.put = nv04_instmem_put; | ||
254 | engine->instmem.map = nv04_instmem_map; | ||
255 | engine->instmem.unmap = nv04_instmem_unmap; | ||
256 | engine->instmem.flush = nv04_instmem_flush; | ||
257 | engine->mc.init = nv40_mc_init; | ||
258 | engine->mc.takedown = nv40_mc_takedown; | ||
259 | engine->timer.init = nv04_timer_init; | ||
260 | engine->timer.read = nv04_timer_read; | ||
261 | engine->timer.takedown = nv04_timer_takedown; | ||
262 | engine->fb.init = nv40_fb_init; | ||
263 | engine->fb.takedown = nv40_fb_takedown; | ||
264 | engine->fb.init_tile_region = nv30_fb_init_tile_region; | ||
265 | engine->fb.set_tile_region = nv40_fb_set_tile_region; | ||
266 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | ||
267 | engine->fifo.channels = 32; | ||
268 | engine->fifo.init = nv40_fifo_init; | ||
269 | engine->fifo.takedown = nv04_fifo_fini; | ||
270 | engine->fifo.disable = nv04_fifo_disable; | ||
271 | engine->fifo.enable = nv04_fifo_enable; | ||
272 | engine->fifo.reassign = nv04_fifo_reassign; | ||
273 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
274 | engine->fifo.channel_id = nv10_fifo_channel_id; | ||
275 | engine->fifo.create_context = nv40_fifo_create_context; | ||
276 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
277 | engine->fifo.load_context = nv40_fifo_load_context; | ||
278 | engine->fifo.unload_context = nv40_fifo_unload_context; | ||
279 | engine->display.early_init = nv04_display_early_init; | ||
280 | engine->display.late_takedown = nv04_display_late_takedown; | ||
281 | engine->display.create = nv04_display_create; | ||
282 | engine->display.init = nv04_display_init; | ||
283 | engine->display.destroy = nv04_display_destroy; | ||
284 | engine->gpio.init = nouveau_stub_init; | ||
285 | engine->gpio.takedown = nouveau_stub_takedown; | ||
286 | engine->gpio.get = nv10_gpio_get; | ||
287 | engine->gpio.set = nv10_gpio_set; | ||
288 | engine->gpio.irq_enable = NULL; | ||
289 | engine->pm.clock_get = nv04_pm_clock_get; | ||
290 | engine->pm.clock_pre = nv04_pm_clock_pre; | ||
291 | engine->pm.clock_set = nv04_pm_clock_set; | ||
292 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | ||
293 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | ||
294 | engine->pm.temp_get = nv40_temp_get; | ||
295 | engine->vram.init = nouveau_mem_detect; | ||
296 | engine->vram.takedown = nouveau_stub_takedown; | ||
297 | engine->vram.flags_valid = nouveau_mem_flags_valid; | ||
298 | break; | ||
299 | case 0x50: | ||
300 | case 0x80: /* gotta love NVIDIA's consistency.. */ | ||
301 | case 0x90: | ||
302 | case 0xA0: | ||
303 | engine->instmem.init = nv50_instmem_init; | ||
304 | engine->instmem.takedown = nv50_instmem_takedown; | ||
305 | engine->instmem.suspend = nv50_instmem_suspend; | ||
306 | engine->instmem.resume = nv50_instmem_resume; | ||
307 | engine->instmem.get = nv50_instmem_get; | ||
308 | engine->instmem.put = nv50_instmem_put; | ||
309 | engine->instmem.map = nv50_instmem_map; | ||
310 | engine->instmem.unmap = nv50_instmem_unmap; | ||
311 | if (dev_priv->chipset == 0x50) | ||
312 | engine->instmem.flush = nv50_instmem_flush; | ||
313 | else | ||
314 | engine->instmem.flush = nv84_instmem_flush; | ||
315 | engine->mc.init = nv50_mc_init; | ||
316 | engine->mc.takedown = nv50_mc_takedown; | ||
317 | engine->timer.init = nv04_timer_init; | ||
318 | engine->timer.read = nv04_timer_read; | ||
319 | engine->timer.takedown = nv04_timer_takedown; | ||
320 | engine->fb.init = nv50_fb_init; | ||
321 | engine->fb.takedown = nv50_fb_takedown; | ||
322 | engine->fifo.channels = 128; | ||
323 | engine->fifo.init = nv50_fifo_init; | ||
324 | engine->fifo.takedown = nv50_fifo_takedown; | ||
325 | engine->fifo.disable = nv04_fifo_disable; | ||
326 | engine->fifo.enable = nv04_fifo_enable; | ||
327 | engine->fifo.reassign = nv04_fifo_reassign; | ||
328 | engine->fifo.channel_id = nv50_fifo_channel_id; | ||
329 | engine->fifo.create_context = nv50_fifo_create_context; | ||
330 | engine->fifo.destroy_context = nv50_fifo_destroy_context; | ||
331 | engine->fifo.load_context = nv50_fifo_load_context; | ||
332 | engine->fifo.unload_context = nv50_fifo_unload_context; | ||
333 | engine->fifo.tlb_flush = nv50_fifo_tlb_flush; | ||
334 | engine->display.early_init = nv50_display_early_init; | ||
335 | engine->display.late_takedown = nv50_display_late_takedown; | ||
336 | engine->display.create = nv50_display_create; | ||
337 | engine->display.init = nv50_display_init; | ||
338 | engine->display.destroy = nv50_display_destroy; | ||
339 | engine->gpio.init = nv50_gpio_init; | ||
340 | engine->gpio.takedown = nv50_gpio_fini; | ||
341 | engine->gpio.get = nv50_gpio_get; | ||
342 | engine->gpio.set = nv50_gpio_set; | ||
343 | engine->gpio.irq_register = nv50_gpio_irq_register; | ||
344 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | ||
345 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | ||
346 | switch (dev_priv->chipset) { | ||
347 | case 0x84: | ||
348 | case 0x86: | ||
349 | case 0x92: | ||
350 | case 0x94: | ||
351 | case 0x96: | ||
352 | case 0x98: | ||
353 | case 0xa0: | ||
354 | case 0xaa: | ||
355 | case 0xac: | ||
356 | case 0x50: | ||
357 | engine->pm.clock_get = nv50_pm_clock_get; | ||
358 | engine->pm.clock_pre = nv50_pm_clock_pre; | ||
359 | engine->pm.clock_set = nv50_pm_clock_set; | ||
360 | break; | ||
361 | default: | ||
362 | engine->pm.clock_get = nva3_pm_clock_get; | ||
363 | engine->pm.clock_pre = nva3_pm_clock_pre; | ||
364 | engine->pm.clock_set = nva3_pm_clock_set; | ||
365 | break; | ||
366 | } | ||
367 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | ||
368 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | ||
369 | if (dev_priv->chipset >= 0x84) | ||
370 | engine->pm.temp_get = nv84_temp_get; | ||
371 | else | ||
372 | engine->pm.temp_get = nv40_temp_get; | ||
373 | engine->vram.init = nv50_vram_init; | ||
374 | engine->vram.takedown = nv50_vram_fini; | ||
375 | engine->vram.get = nv50_vram_new; | ||
376 | engine->vram.put = nv50_vram_del; | ||
377 | engine->vram.flags_valid = nv50_vram_flags_valid; | ||
378 | break; | ||
379 | case 0xC0: | ||
380 | engine->instmem.init = nvc0_instmem_init; | ||
381 | engine->instmem.takedown = nvc0_instmem_takedown; | ||
382 | engine->instmem.suspend = nvc0_instmem_suspend; | ||
383 | engine->instmem.resume = nvc0_instmem_resume; | ||
384 | engine->instmem.get = nv50_instmem_get; | ||
385 | engine->instmem.put = nv50_instmem_put; | ||
386 | engine->instmem.map = nv50_instmem_map; | ||
387 | engine->instmem.unmap = nv50_instmem_unmap; | ||
388 | engine->instmem.flush = nv84_instmem_flush; | ||
389 | engine->mc.init = nv50_mc_init; | ||
390 | engine->mc.takedown = nv50_mc_takedown; | ||
391 | engine->timer.init = nv04_timer_init; | ||
392 | engine->timer.read = nv04_timer_read; | ||
393 | engine->timer.takedown = nv04_timer_takedown; | ||
394 | engine->fb.init = nvc0_fb_init; | ||
395 | engine->fb.takedown = nvc0_fb_takedown; | ||
396 | engine->fifo.channels = 128; | ||
397 | engine->fifo.init = nvc0_fifo_init; | ||
398 | engine->fifo.takedown = nvc0_fifo_takedown; | ||
399 | engine->fifo.disable = nvc0_fifo_disable; | ||
400 | engine->fifo.enable = nvc0_fifo_enable; | ||
401 | engine->fifo.reassign = nvc0_fifo_reassign; | ||
402 | engine->fifo.channel_id = nvc0_fifo_channel_id; | ||
403 | engine->fifo.create_context = nvc0_fifo_create_context; | ||
404 | engine->fifo.destroy_context = nvc0_fifo_destroy_context; | ||
405 | engine->fifo.load_context = nvc0_fifo_load_context; | ||
406 | engine->fifo.unload_context = nvc0_fifo_unload_context; | ||
407 | engine->display.early_init = nv50_display_early_init; | ||
408 | engine->display.late_takedown = nv50_display_late_takedown; | ||
409 | engine->display.create = nv50_display_create; | ||
410 | engine->display.init = nv50_display_init; | ||
411 | engine->display.destroy = nv50_display_destroy; | ||
412 | engine->gpio.init = nv50_gpio_init; | ||
413 | engine->gpio.takedown = nouveau_stub_takedown; | ||
414 | engine->gpio.get = nv50_gpio_get; | ||
415 | engine->gpio.set = nv50_gpio_set; | ||
416 | engine->gpio.irq_register = nv50_gpio_irq_register; | ||
417 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | ||
418 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | ||
419 | engine->vram.init = nvc0_vram_init; | ||
420 | engine->vram.takedown = nv50_vram_fini; | ||
421 | engine->vram.get = nvc0_vram_new; | ||
422 | engine->vram.put = nv50_vram_del; | ||
423 | engine->vram.flags_valid = nvc0_vram_flags_valid; | ||
424 | engine->pm.temp_get = nv84_temp_get; | ||
425 | break; | ||
426 | default: | ||
427 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); | ||
428 | return 1; | ||
429 | } | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | static unsigned int | ||
435 | nouveau_vga_set_decode(void *priv, bool state) | ||
436 | { | ||
437 | struct drm_device *dev = priv; | ||
438 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
439 | |||
440 | if (dev_priv->chipset >= 0x40) | ||
441 | nv_wr32(dev, 0x88054, state); | ||
442 | else | ||
443 | nv_wr32(dev, 0x1854, state); | ||
444 | |||
445 | if (state) | ||
446 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | ||
447 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | ||
448 | else | ||
449 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | ||
450 | } | ||
451 | |||
452 | static int | ||
453 | nouveau_card_init_channel(struct drm_device *dev) | ||
454 | { | ||
455 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
456 | int ret; | ||
457 | |||
458 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, | ||
459 | NvDmaFB, NvDmaTT); | ||
460 | if (ret) | ||
461 | return ret; | ||
462 | |||
463 | mutex_unlock(&dev_priv->channel->mutex); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | ||
468 | enum vga_switcheroo_state state) | ||
469 | { | ||
470 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
471 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | ||
472 | if (state == VGA_SWITCHEROO_ON) { | ||
473 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); | ||
474 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
475 | nouveau_pci_resume(pdev); | ||
476 | drm_kms_helper_poll_enable(dev); | ||
477 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
478 | } else { | ||
479 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); | ||
480 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
481 | drm_kms_helper_poll_disable(dev); | ||
482 | nouveau_pci_suspend(pdev, pmm); | ||
483 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | static void nouveau_switcheroo_reprobe(struct pci_dev *pdev) | ||
488 | { | ||
489 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
490 | nouveau_fbcon_output_poll_changed(dev); | ||
491 | } | ||
492 | |||
493 | static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) | ||
494 | { | ||
495 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
496 | bool can_switch; | ||
497 | |||
498 | spin_lock(&dev->count_lock); | ||
499 | can_switch = (dev->open_count == 0); | ||
500 | spin_unlock(&dev->count_lock); | ||
501 | return can_switch; | ||
502 | } | ||
503 | |||
504 | int | ||
505 | nouveau_card_init(struct drm_device *dev) | ||
506 | { | ||
507 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
508 | struct nouveau_engine *engine; | ||
509 | int ret, e = 0; | ||
510 | |||
511 | vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); | ||
512 | vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, | ||
513 | nouveau_switcheroo_reprobe, | ||
514 | nouveau_switcheroo_can_switch); | ||
515 | |||
516 | /* Initialise internal driver API hooks */ | ||
517 | ret = nouveau_init_engine_ptrs(dev); | ||
518 | if (ret) | ||
519 | goto out; | ||
520 | engine = &dev_priv->engine; | ||
521 | spin_lock_init(&dev_priv->channels.lock); | ||
522 | spin_lock_init(&dev_priv->tile.lock); | ||
523 | spin_lock_init(&dev_priv->context_switch_lock); | ||
524 | spin_lock_init(&dev_priv->vm_lock); | ||
525 | |||
526 | /* Make the CRTCs and I2C buses accessible */ | ||
527 | ret = engine->display.early_init(dev); | ||
528 | if (ret) | ||
529 | goto out; | ||
530 | |||
531 | /* Parse BIOS tables / Run init tables if card not POSTed */ | ||
532 | ret = nouveau_bios_init(dev); | ||
533 | if (ret) | ||
534 | goto out_display_early; | ||
535 | |||
536 | nouveau_pm_init(dev); | ||
537 | |||
538 | ret = engine->vram.init(dev); | ||
539 | if (ret) | ||
540 | goto out_bios; | ||
541 | |||
542 | ret = nouveau_gpuobj_init(dev); | ||
543 | if (ret) | ||
544 | goto out_vram; | ||
545 | |||
546 | ret = engine->instmem.init(dev); | ||
547 | if (ret) | ||
548 | goto out_gpuobj; | ||
549 | |||
550 | ret = nouveau_mem_vram_init(dev); | ||
551 | if (ret) | ||
552 | goto out_instmem; | ||
553 | |||
554 | ret = nouveau_mem_gart_init(dev); | ||
555 | if (ret) | ||
556 | goto out_ttmvram; | ||
557 | |||
558 | /* PMC */ | ||
559 | ret = engine->mc.init(dev); | ||
560 | if (ret) | ||
561 | goto out_gart; | ||
562 | |||
563 | /* PGPIO */ | ||
564 | ret = engine->gpio.init(dev); | ||
565 | if (ret) | ||
566 | goto out_mc; | ||
567 | |||
568 | /* PTIMER */ | ||
569 | ret = engine->timer.init(dev); | ||
570 | if (ret) | ||
571 | goto out_gpio; | ||
572 | |||
573 | /* PFB */ | ||
574 | ret = engine->fb.init(dev); | ||
575 | if (ret) | ||
576 | goto out_timer; | ||
577 | |||
578 | if (!dev_priv->noaccel) { | ||
579 | switch (dev_priv->card_type) { | ||
580 | case NV_04: | ||
581 | nv04_graph_create(dev); | ||
582 | break; | ||
583 | case NV_10: | ||
584 | nv10_graph_create(dev); | ||
585 | break; | ||
586 | case NV_20: | ||
587 | case NV_30: | ||
588 | nv20_graph_create(dev); | ||
589 | break; | ||
590 | case NV_40: | ||
591 | nv40_graph_create(dev); | ||
592 | break; | ||
593 | case NV_50: | ||
594 | nv50_graph_create(dev); | ||
595 | break; | ||
596 | case NV_C0: | ||
597 | nvc0_graph_create(dev); | ||
598 | break; | ||
599 | default: | ||
600 | break; | ||
601 | } | ||
602 | |||
603 | switch (dev_priv->chipset) { | ||
604 | case 0x84: | ||
605 | case 0x86: | ||
606 | case 0x92: | ||
607 | case 0x94: | ||
608 | case 0x96: | ||
609 | case 0xa0: | ||
610 | nv84_crypt_create(dev); | ||
611 | break; | ||
612 | } | ||
613 | |||
614 | switch (dev_priv->card_type) { | ||
615 | case NV_50: | ||
616 | switch (dev_priv->chipset) { | ||
617 | case 0xa3: | ||
618 | case 0xa5: | ||
619 | case 0xa8: | ||
620 | case 0xaf: | ||
621 | nva3_copy_create(dev); | ||
622 | break; | ||
623 | } | ||
624 | break; | ||
625 | case NV_C0: | ||
626 | nvc0_copy_create(dev, 0); | ||
627 | nvc0_copy_create(dev, 1); | ||
628 | break; | ||
629 | default: | ||
630 | break; | ||
631 | } | ||
632 | |||
633 | if (dev_priv->card_type == NV_40) | ||
634 | nv40_mpeg_create(dev); | ||
635 | else | ||
636 | if (dev_priv->card_type == NV_50 && | ||
637 | (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) | ||
638 | nv50_mpeg_create(dev); | ||
639 | |||
640 | for (e = 0; e < NVOBJ_ENGINE_NR; e++) { | ||
641 | if (dev_priv->eng[e]) { | ||
642 | ret = dev_priv->eng[e]->init(dev, e); | ||
643 | if (ret) | ||
644 | goto out_engine; | ||
645 | } | ||
646 | } | ||
647 | |||
648 | /* PFIFO */ | ||
649 | ret = engine->fifo.init(dev); | ||
650 | if (ret) | ||
651 | goto out_engine; | ||
652 | } | ||
653 | |||
654 | ret = engine->display.create(dev); | ||
655 | if (ret) | ||
656 | goto out_fifo; | ||
657 | |||
658 | ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); | ||
659 | if (ret) | ||
660 | goto out_vblank; | ||
661 | |||
662 | ret = nouveau_irq_init(dev); | ||
663 | if (ret) | ||
664 | goto out_vblank; | ||
665 | |||
666 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ | ||
667 | |||
668 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { | ||
669 | ret = nouveau_fence_init(dev); | ||
670 | if (ret) | ||
671 | goto out_irq; | ||
672 | |||
673 | ret = nouveau_card_init_channel(dev); | ||
674 | if (ret) | ||
675 | goto out_fence; | ||
676 | } | ||
677 | |||
678 | nouveau_fbcon_init(dev); | ||
679 | drm_kms_helper_poll_init(dev); | ||
680 | return 0; | ||
681 | |||
682 | out_fence: | ||
683 | nouveau_fence_fini(dev); | ||
684 | out_irq: | ||
685 | nouveau_irq_fini(dev); | ||
686 | out_vblank: | ||
687 | drm_vblank_cleanup(dev); | ||
688 | engine->display.destroy(dev); | ||
689 | out_fifo: | ||
690 | if (!dev_priv->noaccel) | ||
691 | engine->fifo.takedown(dev); | ||
692 | out_engine: | ||
693 | if (!dev_priv->noaccel) { | ||
694 | for (e = e - 1; e >= 0; e--) { | ||
695 | if (!dev_priv->eng[e]) | ||
696 | continue; | ||
697 | dev_priv->eng[e]->fini(dev, e, false); | ||
698 | dev_priv->eng[e]->destroy(dev,e ); | ||
699 | } | ||
700 | } | ||
701 | |||
702 | engine->fb.takedown(dev); | ||
703 | out_timer: | ||
704 | engine->timer.takedown(dev); | ||
705 | out_gpio: | ||
706 | engine->gpio.takedown(dev); | ||
707 | out_mc: | ||
708 | engine->mc.takedown(dev); | ||
709 | out_gart: | ||
710 | nouveau_mem_gart_fini(dev); | ||
711 | out_ttmvram: | ||
712 | nouveau_mem_vram_fini(dev); | ||
713 | out_instmem: | ||
714 | engine->instmem.takedown(dev); | ||
715 | out_gpuobj: | ||
716 | nouveau_gpuobj_takedown(dev); | ||
717 | out_vram: | ||
718 | engine->vram.takedown(dev); | ||
719 | out_bios: | ||
720 | nouveau_pm_fini(dev); | ||
721 | nouveau_bios_takedown(dev); | ||
722 | out_display_early: | ||
723 | engine->display.late_takedown(dev); | ||
724 | out: | ||
725 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | static void nouveau_card_takedown(struct drm_device *dev) | ||
730 | { | ||
731 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
732 | struct nouveau_engine *engine = &dev_priv->engine; | ||
733 | int e; | ||
734 | |||
735 | drm_kms_helper_poll_fini(dev); | ||
736 | nouveau_fbcon_fini(dev); | ||
737 | |||
738 | if (dev_priv->channel) { | ||
739 | nouveau_channel_put_unlocked(&dev_priv->channel); | ||
740 | nouveau_fence_fini(dev); | ||
741 | } | ||
742 | |||
743 | engine->display.destroy(dev); | ||
744 | |||
745 | if (!dev_priv->noaccel) { | ||
746 | engine->fifo.takedown(dev); | ||
747 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | ||
748 | if (dev_priv->eng[e]) { | ||
749 | dev_priv->eng[e]->fini(dev, e, false); | ||
750 | dev_priv->eng[e]->destroy(dev,e ); | ||
751 | } | ||
752 | } | ||
753 | } | ||
754 | engine->fb.takedown(dev); | ||
755 | engine->timer.takedown(dev); | ||
756 | engine->gpio.takedown(dev); | ||
757 | engine->mc.takedown(dev); | ||
758 | engine->display.late_takedown(dev); | ||
759 | |||
760 | if (dev_priv->vga_ram) { | ||
761 | nouveau_bo_unpin(dev_priv->vga_ram); | ||
762 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | ||
763 | } | ||
764 | |||
765 | mutex_lock(&dev->struct_mutex); | ||
766 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
767 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); | ||
768 | mutex_unlock(&dev->struct_mutex); | ||
769 | nouveau_mem_gart_fini(dev); | ||
770 | nouveau_mem_vram_fini(dev); | ||
771 | |||
772 | engine->instmem.takedown(dev); | ||
773 | nouveau_gpuobj_takedown(dev); | ||
774 | engine->vram.takedown(dev); | ||
775 | |||
776 | nouveau_irq_fini(dev); | ||
777 | drm_vblank_cleanup(dev); | ||
778 | |||
779 | nouveau_pm_fini(dev); | ||
780 | nouveau_bios_takedown(dev); | ||
781 | |||
782 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
783 | } | ||
784 | |||
785 | int | ||
786 | nouveau_open(struct drm_device *dev, struct drm_file *file_priv) | ||
787 | { | ||
788 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
789 | struct nouveau_fpriv *fpriv; | ||
790 | int ret; | ||
791 | |||
792 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | ||
793 | if (unlikely(!fpriv)) | ||
794 | return -ENOMEM; | ||
795 | |||
796 | spin_lock_init(&fpriv->lock); | ||
797 | INIT_LIST_HEAD(&fpriv->channels); | ||
798 | |||
799 | if (dev_priv->card_type == NV_50) { | ||
800 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, | ||
801 | &fpriv->vm); | ||
802 | if (ret) { | ||
803 | kfree(fpriv); | ||
804 | return ret; | ||
805 | } | ||
806 | } else | ||
807 | if (dev_priv->card_type >= NV_C0) { | ||
808 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, | ||
809 | &fpriv->vm); | ||
810 | if (ret) { | ||
811 | kfree(fpriv); | ||
812 | return ret; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | file_priv->driver_priv = fpriv; | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | /* here a client dies, release the stuff that was allocated for its | ||
821 | * file_priv */ | ||
822 | void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) | ||
823 | { | ||
824 | nouveau_channel_cleanup(dev, file_priv); | ||
825 | } | ||
826 | |||
827 | void | ||
828 | nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv) | ||
829 | { | ||
830 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
831 | nouveau_vm_ref(NULL, &fpriv->vm, NULL); | ||
832 | kfree(fpriv); | ||
833 | } | ||
834 | |||
835 | /* first module load, setup the mmio/fb mapping */ | ||
836 | /* KMS: we need mmio at load time, not when the first drm client opens. */ | ||
837 | int nouveau_firstopen(struct drm_device *dev) | ||
838 | { | ||
839 | return 0; | ||
840 | } | ||
841 | |||
842 | /* if we have an OF card, copy vbios to RAMIN */ | ||
843 | static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) | ||
844 | { | ||
845 | #if defined(__powerpc__) | ||
846 | int size, i; | ||
847 | const uint32_t *bios; | ||
848 | struct device_node *dn = pci_device_to_OF_node(dev->pdev); | ||
849 | if (!dn) { | ||
850 | NV_INFO(dev, "Unable to get the OF node\n"); | ||
851 | return; | ||
852 | } | ||
853 | |||
854 | bios = of_get_property(dn, "NVDA,BMP", &size); | ||
855 | if (bios) { | ||
856 | for (i = 0; i < size; i += 4) | ||
857 | nv_wi32(dev, i, bios[i/4]); | ||
858 | NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size); | ||
859 | } else { | ||
860 | NV_INFO(dev, "Unable to get the OF bios\n"); | ||
861 | } | ||
862 | #endif | ||
863 | } | ||
864 | |||
865 | static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev) | ||
866 | { | ||
867 | struct pci_dev *pdev = dev->pdev; | ||
868 | struct apertures_struct *aper = alloc_apertures(3); | ||
869 | if (!aper) | ||
870 | return NULL; | ||
871 | |||
872 | aper->ranges[0].base = pci_resource_start(pdev, 1); | ||
873 | aper->ranges[0].size = pci_resource_len(pdev, 1); | ||
874 | aper->count = 1; | ||
875 | |||
876 | if (pci_resource_len(pdev, 2)) { | ||
877 | aper->ranges[aper->count].base = pci_resource_start(pdev, 2); | ||
878 | aper->ranges[aper->count].size = pci_resource_len(pdev, 2); | ||
879 | aper->count++; | ||
880 | } | ||
881 | |||
882 | if (pci_resource_len(pdev, 3)) { | ||
883 | aper->ranges[aper->count].base = pci_resource_start(pdev, 3); | ||
884 | aper->ranges[aper->count].size = pci_resource_len(pdev, 3); | ||
885 | aper->count++; | ||
886 | } | ||
887 | |||
888 | return aper; | ||
889 | } | ||
890 | |||
891 | static int nouveau_remove_conflicting_drivers(struct drm_device *dev) | ||
892 | { | ||
893 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
894 | bool primary = false; | ||
895 | dev_priv->apertures = nouveau_get_apertures(dev); | ||
896 | if (!dev_priv->apertures) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | #ifdef CONFIG_X86 | ||
900 | primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
901 | #endif | ||
902 | |||
903 | remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | int nouveau_load(struct drm_device *dev, unsigned long flags) | ||
908 | { | ||
909 | struct drm_nouveau_private *dev_priv; | ||
910 | uint32_t reg0; | ||
911 | resource_size_t mmio_start_offs; | ||
912 | int ret; | ||
913 | |||
914 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | ||
915 | if (!dev_priv) { | ||
916 | ret = -ENOMEM; | ||
917 | goto err_out; | ||
918 | } | ||
919 | dev->dev_private = dev_priv; | ||
920 | dev_priv->dev = dev; | ||
921 | |||
922 | dev_priv->flags = flags & NOUVEAU_FLAGS; | ||
923 | |||
924 | NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", | ||
925 | dev->pci_vendor, dev->pci_device, dev->pdev->class); | ||
926 | |||
927 | /* resource 0 is mmio regs */ | ||
928 | /* resource 1 is linear FB */ | ||
929 | /* resource 2 is RAMIN (mmio regs + 0x1000000) */ | ||
930 | /* resource 6 is bios */ | ||
931 | |||
932 | /* map the mmio regs */ | ||
933 | mmio_start_offs = pci_resource_start(dev->pdev, 0); | ||
934 | dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000); | ||
935 | if (!dev_priv->mmio) { | ||
936 | NV_ERROR(dev, "Unable to initialize the mmio mapping. " | ||
937 | "Please report your setup to " DRIVER_EMAIL "\n"); | ||
938 | ret = -EINVAL; | ||
939 | goto err_priv; | ||
940 | } | ||
941 | NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", | ||
942 | (unsigned long long)mmio_start_offs); | ||
943 | |||
944 | #ifdef __BIG_ENDIAN | ||
945 | /* Put the card in BE mode if it's not */ | ||
946 | if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001) | ||
947 | nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001); | ||
948 | |||
949 | DRM_MEMORYBARRIER(); | ||
950 | #endif | ||
951 | |||
952 | /* Time to determine the card architecture */ | ||
953 | reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); | ||
954 | dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */ | ||
955 | |||
956 | /* We're dealing with >=NV10 */ | ||
957 | if ((reg0 & 0x0f000000) > 0) { | ||
958 | /* Bit 27-20 contain the architecture in hex */ | ||
959 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | ||
960 | dev_priv->stepping = (reg0 & 0xff); | ||
961 | /* NV04 or NV05 */ | ||
962 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { | ||
963 | if (reg0 & 0x00f00000) | ||
964 | dev_priv->chipset = 0x05; | ||
965 | else | ||
966 | dev_priv->chipset = 0x04; | ||
967 | } else | ||
968 | dev_priv->chipset = 0xff; | ||
969 | |||
970 | switch (dev_priv->chipset & 0xf0) { | ||
971 | case 0x00: | ||
972 | case 0x10: | ||
973 | case 0x20: | ||
974 | case 0x30: | ||
975 | dev_priv->card_type = dev_priv->chipset & 0xf0; | ||
976 | break; | ||
977 | case 0x40: | ||
978 | case 0x60: | ||
979 | dev_priv->card_type = NV_40; | ||
980 | break; | ||
981 | case 0x50: | ||
982 | case 0x80: | ||
983 | case 0x90: | ||
984 | case 0xa0: | ||
985 | dev_priv->card_type = NV_50; | ||
986 | break; | ||
987 | case 0xc0: | ||
988 | dev_priv->card_type = NV_C0; | ||
989 | break; | ||
990 | default: | ||
991 | NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); | ||
992 | ret = -EINVAL; | ||
993 | goto err_mmio; | ||
994 | } | ||
995 | |||
996 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | ||
997 | dev_priv->card_type, reg0); | ||
998 | |||
999 | /* Determine whether we'll attempt acceleration or not, some | ||
1000 | * cards are disabled by default here due to them being known | ||
1001 | * non-functional, or never been tested due to lack of hw. | ||
1002 | */ | ||
1003 | dev_priv->noaccel = !!nouveau_noaccel; | ||
1004 | if (nouveau_noaccel == -1) { | ||
1005 | switch (dev_priv->chipset) { | ||
1006 | case 0xc1: /* known broken */ | ||
1007 | case 0xc8: /* never tested */ | ||
1008 | NV_INFO(dev, "acceleration disabled by default, pass " | ||
1009 | "noaccel=0 to force enable\n"); | ||
1010 | dev_priv->noaccel = true; | ||
1011 | break; | ||
1012 | default: | ||
1013 | dev_priv->noaccel = false; | ||
1014 | break; | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | ret = nouveau_remove_conflicting_drivers(dev); | ||
1019 | if (ret) | ||
1020 | goto err_mmio; | ||
1021 | |||
1022 | /* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */ | ||
1023 | if (dev_priv->card_type >= NV_40) { | ||
1024 | int ramin_bar = 2; | ||
1025 | if (pci_resource_len(dev->pdev, ramin_bar) == 0) | ||
1026 | ramin_bar = 3; | ||
1027 | |||
1028 | dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); | ||
1029 | dev_priv->ramin = | ||
1030 | ioremap(pci_resource_start(dev->pdev, ramin_bar), | ||
1031 | dev_priv->ramin_size); | ||
1032 | if (!dev_priv->ramin) { | ||
1033 | NV_ERROR(dev, "Failed to PRAMIN BAR"); | ||
1034 | ret = -ENOMEM; | ||
1035 | goto err_mmio; | ||
1036 | } | ||
1037 | } else { | ||
1038 | dev_priv->ramin_size = 1 * 1024 * 1024; | ||
1039 | dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, | ||
1040 | dev_priv->ramin_size); | ||
1041 | if (!dev_priv->ramin) { | ||
1042 | NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); | ||
1043 | ret = -ENOMEM; | ||
1044 | goto err_mmio; | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | nouveau_OF_copy_vbios_to_ramin(dev); | ||
1049 | |||
1050 | /* Special flags */ | ||
1051 | if (dev->pci_device == 0x01a0) | ||
1052 | dev_priv->flags |= NV_NFORCE; | ||
1053 | else if (dev->pci_device == 0x01f0) | ||
1054 | dev_priv->flags |= NV_NFORCE2; | ||
1055 | |||
1056 | /* For kernel modesetting, init card now and bring up fbcon */ | ||
1057 | ret = nouveau_card_init(dev); | ||
1058 | if (ret) | ||
1059 | goto err_ramin; | ||
1060 | |||
1061 | return 0; | ||
1062 | |||
1063 | err_ramin: | ||
1064 | iounmap(dev_priv->ramin); | ||
1065 | err_mmio: | ||
1066 | iounmap(dev_priv->mmio); | ||
1067 | err_priv: | ||
1068 | kfree(dev_priv); | ||
1069 | dev->dev_private = NULL; | ||
1070 | err_out: | ||
1071 | return ret; | ||
1072 | } | ||
1073 | |||
1074 | void nouveau_lastclose(struct drm_device *dev) | ||
1075 | { | ||
1076 | vga_switcheroo_process_delayed_switch(); | ||
1077 | } | ||
1078 | |||
1079 | int nouveau_unload(struct drm_device *dev) | ||
1080 | { | ||
1081 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1082 | |||
1083 | nouveau_card_takedown(dev); | ||
1084 | |||
1085 | iounmap(dev_priv->mmio); | ||
1086 | iounmap(dev_priv->ramin); | ||
1087 | |||
1088 | kfree(dev_priv); | ||
1089 | dev->dev_private = NULL; | ||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | ||
1094 | struct drm_file *file_priv) | ||
1095 | { | ||
1096 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1097 | struct drm_nouveau_getparam *getparam = data; | ||
1098 | |||
1099 | switch (getparam->param) { | ||
1100 | case NOUVEAU_GETPARAM_CHIPSET_ID: | ||
1101 | getparam->value = dev_priv->chipset; | ||
1102 | break; | ||
1103 | case NOUVEAU_GETPARAM_PCI_VENDOR: | ||
1104 | getparam->value = dev->pci_vendor; | ||
1105 | break; | ||
1106 | case NOUVEAU_GETPARAM_PCI_DEVICE: | ||
1107 | getparam->value = dev->pci_device; | ||
1108 | break; | ||
1109 | case NOUVEAU_GETPARAM_BUS_TYPE: | ||
1110 | if (drm_pci_device_is_agp(dev)) | ||
1111 | getparam->value = NV_AGP; | ||
1112 | else if (pci_is_pcie(dev->pdev)) | ||
1113 | getparam->value = NV_PCIE; | ||
1114 | else | ||
1115 | getparam->value = NV_PCI; | ||
1116 | break; | ||
1117 | case NOUVEAU_GETPARAM_FB_SIZE: | ||
1118 | getparam->value = dev_priv->fb_available_size; | ||
1119 | break; | ||
1120 | case NOUVEAU_GETPARAM_AGP_SIZE: | ||
1121 | getparam->value = dev_priv->gart_info.aper_size; | ||
1122 | break; | ||
1123 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | ||
1124 | getparam->value = 0; /* deprecated */ | ||
1125 | break; | ||
1126 | case NOUVEAU_GETPARAM_PTIMER_TIME: | ||
1127 | getparam->value = dev_priv->engine.timer.read(dev); | ||
1128 | break; | ||
1129 | case NOUVEAU_GETPARAM_HAS_BO_USAGE: | ||
1130 | getparam->value = 1; | ||
1131 | break; | ||
1132 | case NOUVEAU_GETPARAM_HAS_PAGEFLIP: | ||
1133 | getparam->value = 1; | ||
1134 | break; | ||
1135 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||
1136 | /* NV40 and NV50 versions are quite different, but register | ||
1137 | * address is the same. User is supposed to know the card | ||
1138 | * family anyway... */ | ||
1139 | if (dev_priv->chipset >= 0x40) { | ||
1140 | getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||
1141 | break; | ||
1142 | } | ||
1143 | /* FALLTHRU */ | ||
1144 | default: | ||
1145 | NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); | ||
1146 | return -EINVAL; | ||
1147 | } | ||
1148 | |||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | int | ||
1153 | nouveau_ioctl_setparam(struct drm_device *dev, void *data, | ||
1154 | struct drm_file *file_priv) | ||
1155 | { | ||
1156 | struct drm_nouveau_setparam *setparam = data; | ||
1157 | |||
1158 | switch (setparam->param) { | ||
1159 | default: | ||
1160 | NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); | ||
1161 | return -EINVAL; | ||
1162 | } | ||
1163 | |||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | /* Wait until (value(reg) & mask) == val, up until timeout has hit */ | ||
1168 | bool | ||
1169 | nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, | ||
1170 | uint32_t reg, uint32_t mask, uint32_t val) | ||
1171 | { | ||
1172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1173 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
1174 | uint64_t start = ptimer->read(dev); | ||
1175 | |||
1176 | do { | ||
1177 | if ((nv_rd32(dev, reg) & mask) == val) | ||
1178 | return true; | ||
1179 | } while (ptimer->read(dev) - start < timeout); | ||
1180 | |||
1181 | return false; | ||
1182 | } | ||
1183 | |||
1184 | /* Wait until (value(reg) & mask) != val, up until timeout has hit */ | ||
1185 | bool | ||
1186 | nouveau_wait_ne(struct drm_device *dev, uint64_t timeout, | ||
1187 | uint32_t reg, uint32_t mask, uint32_t val) | ||
1188 | { | ||
1189 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1190 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
1191 | uint64_t start = ptimer->read(dev); | ||
1192 | |||
1193 | do { | ||
1194 | if ((nv_rd32(dev, reg) & mask) != val) | ||
1195 | return true; | ||
1196 | } while (ptimer->read(dev) - start < timeout); | ||
1197 | |||
1198 | return false; | ||
1199 | } | ||
1200 | |||
1201 | /* Waits for PGRAPH to go completely idle */ | ||
1202 | bool nouveau_wait_for_idle(struct drm_device *dev) | ||
1203 | { | ||
1204 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1205 | uint32_t mask = ~0; | ||
1206 | |||
1207 | if (dev_priv->card_type == NV_40) | ||
1208 | mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; | ||
1209 | |||
1210 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) { | ||
1211 | NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", | ||
1212 | nv_rd32(dev, NV04_PGRAPH_STATUS)); | ||
1213 | return false; | ||
1214 | } | ||
1215 | |||
1216 | return true; | ||
1217 | } | ||
1218 | |||
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c new file mode 100644 index 00000000000..081ca7b03e8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Copyright 2010 PathScale inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Martin Peres | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_pm.h" | ||
29 | |||
30 | static void | ||
31 | nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) | ||
32 | { | ||
33 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
34 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
35 | struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; | ||
36 | struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; | ||
37 | int i, headerlen, recordlen, entries; | ||
38 | |||
39 | if (!temp) { | ||
40 | NV_DEBUG(dev, "temperature table pointer invalid\n"); | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | /* Set the default sensor's contants */ | ||
45 | sensor->offset_constant = 0; | ||
46 | sensor->offset_mult = 0; | ||
47 | sensor->offset_div = 1; | ||
48 | sensor->slope_mult = 1; | ||
49 | sensor->slope_div = 1; | ||
50 | |||
51 | /* Set the default temperature thresholds */ | ||
52 | temps->critical = 110; | ||
53 | temps->down_clock = 100; | ||
54 | temps->fan_boost = 90; | ||
55 | |||
56 | /* Set the known default values to setup the temperature sensor */ | ||
57 | if (dev_priv->card_type >= NV_40) { | ||
58 | switch (dev_priv->chipset) { | ||
59 | case 0x43: | ||
60 | sensor->offset_mult = 32060; | ||
61 | sensor->offset_div = 1000; | ||
62 | sensor->slope_mult = 792; | ||
63 | sensor->slope_div = 1000; | ||
64 | break; | ||
65 | |||
66 | case 0x44: | ||
67 | case 0x47: | ||
68 | case 0x4a: | ||
69 | sensor->offset_mult = 27839; | ||
70 | sensor->offset_div = 1000; | ||
71 | sensor->slope_mult = 780; | ||
72 | sensor->slope_div = 1000; | ||
73 | break; | ||
74 | |||
75 | case 0x46: | ||
76 | sensor->offset_mult = -24775; | ||
77 | sensor->offset_div = 100; | ||
78 | sensor->slope_mult = 467; | ||
79 | sensor->slope_div = 10000; | ||
80 | break; | ||
81 | |||
82 | case 0x49: | ||
83 | sensor->offset_mult = -25051; | ||
84 | sensor->offset_div = 100; | ||
85 | sensor->slope_mult = 458; | ||
86 | sensor->slope_div = 10000; | ||
87 | break; | ||
88 | |||
89 | case 0x4b: | ||
90 | sensor->offset_mult = -24088; | ||
91 | sensor->offset_div = 100; | ||
92 | sensor->slope_mult = 442; | ||
93 | sensor->slope_div = 10000; | ||
94 | break; | ||
95 | |||
96 | case 0x50: | ||
97 | sensor->offset_mult = -22749; | ||
98 | sensor->offset_div = 100; | ||
99 | sensor->slope_mult = 431; | ||
100 | sensor->slope_div = 10000; | ||
101 | break; | ||
102 | |||
103 | case 0x67: | ||
104 | sensor->offset_mult = -26149; | ||
105 | sensor->offset_div = 100; | ||
106 | sensor->slope_mult = 484; | ||
107 | sensor->slope_div = 10000; | ||
108 | break; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | headerlen = temp[1]; | ||
113 | recordlen = temp[2]; | ||
114 | entries = temp[3]; | ||
115 | temp = temp + headerlen; | ||
116 | |||
117 | /* Read the entries from the table */ | ||
118 | for (i = 0; i < entries; i++) { | ||
119 | s16 value = ROM16(temp[1]); | ||
120 | |||
121 | switch (temp[0]) { | ||
122 | case 0x01: | ||
123 | if ((value & 0x8f) == 0) | ||
124 | sensor->offset_constant = (value >> 9) & 0x7f; | ||
125 | break; | ||
126 | |||
127 | case 0x04: | ||
128 | if ((value & 0xf00f) == 0xa000) /* core */ | ||
129 | temps->critical = (value&0x0ff0) >> 4; | ||
130 | break; | ||
131 | |||
132 | case 0x07: | ||
133 | if ((value & 0xf00f) == 0xa000) /* core */ | ||
134 | temps->down_clock = (value&0x0ff0) >> 4; | ||
135 | break; | ||
136 | |||
137 | case 0x08: | ||
138 | if ((value & 0xf00f) == 0xa000) /* core */ | ||
139 | temps->fan_boost = (value&0x0ff0) >> 4; | ||
140 | break; | ||
141 | |||
142 | case 0x10: | ||
143 | sensor->offset_mult = value; | ||
144 | break; | ||
145 | |||
146 | case 0x11: | ||
147 | sensor->offset_div = value; | ||
148 | break; | ||
149 | |||
150 | case 0x12: | ||
151 | sensor->slope_mult = value; | ||
152 | break; | ||
153 | |||
154 | case 0x13: | ||
155 | sensor->slope_div = value; | ||
156 | break; | ||
157 | } | ||
158 | temp += recordlen; | ||
159 | } | ||
160 | |||
161 | nouveau_temp_safety_checks(dev); | ||
162 | } | ||
163 | |||
164 | static int | ||
165 | nv40_sensor_setup(struct drm_device *dev) | ||
166 | { | ||
167 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
168 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
169 | struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; | ||
170 | s32 offset = sensor->offset_mult / sensor->offset_div; | ||
171 | s32 sensor_calibration; | ||
172 | |||
173 | /* set up the sensors */ | ||
174 | sensor_calibration = 120 - offset - sensor->offset_constant; | ||
175 | sensor_calibration = sensor_calibration * sensor->slope_div / | ||
176 | sensor->slope_mult; | ||
177 | |||
178 | if (dev_priv->chipset >= 0x46) | ||
179 | sensor_calibration |= 0x80000000; | ||
180 | else | ||
181 | sensor_calibration |= 0x10000000; | ||
182 | |||
183 | nv_wr32(dev, 0x0015b0, sensor_calibration); | ||
184 | |||
185 | /* Wait for the sensor to update */ | ||
186 | msleep(5); | ||
187 | |||
188 | /* read */ | ||
189 | return nv_rd32(dev, 0x0015b4) & 0x1fff; | ||
190 | } | ||
191 | |||
192 | int | ||
193 | nv40_temp_get(struct drm_device *dev) | ||
194 | { | ||
195 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
196 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
197 | struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; | ||
198 | int offset = sensor->offset_mult / sensor->offset_div; | ||
199 | int core_temp; | ||
200 | |||
201 | if (dev_priv->card_type >= NV_50) { | ||
202 | core_temp = nv_rd32(dev, 0x20008); | ||
203 | } else { | ||
204 | core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; | ||
205 | /* Setup the sensor if the temperature is 0 */ | ||
206 | if (core_temp == 0) | ||
207 | core_temp = nv40_sensor_setup(dev); | ||
208 | } | ||
209 | |||
210 | core_temp = core_temp * sensor->slope_mult / sensor->slope_div; | ||
211 | core_temp = core_temp + offset + sensor->offset_constant; | ||
212 | |||
213 | return core_temp; | ||
214 | } | ||
215 | |||
216 | int | ||
217 | nv84_temp_get(struct drm_device *dev) | ||
218 | { | ||
219 | return nv_rd32(dev, 0x20400); | ||
220 | } | ||
221 | |||
222 | void | ||
223 | nouveau_temp_safety_checks(struct drm_device *dev) | ||
224 | { | ||
225 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
226 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
227 | struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; | ||
228 | |||
229 | if (temps->critical > 120) | ||
230 | temps->critical = 120; | ||
231 | else if (temps->critical < 80) | ||
232 | temps->critical = 80; | ||
233 | |||
234 | if (temps->down_clock > 110) | ||
235 | temps->down_clock = 110; | ||
236 | else if (temps->down_clock < 60) | ||
237 | temps->down_clock = 60; | ||
238 | |||
239 | if (temps->fan_boost > 100) | ||
240 | temps->fan_boost = 100; | ||
241 | else if (temps->fan_boost < 40) | ||
242 | temps->fan_boost = 40; | ||
243 | } | ||
244 | |||
245 | static bool | ||
246 | probe_monitoring_device(struct nouveau_i2c_chan *i2c, | ||
247 | struct i2c_board_info *info) | ||
248 | { | ||
249 | struct i2c_client *client; | ||
250 | |||
251 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); | ||
252 | |||
253 | client = i2c_new_device(&i2c->adapter, info); | ||
254 | if (!client) | ||
255 | return false; | ||
256 | |||
257 | if (!client->driver || client->driver->detect(client, info)) { | ||
258 | i2c_unregister_device(client); | ||
259 | return false; | ||
260 | } | ||
261 | |||
262 | return true; | ||
263 | } | ||
264 | |||
265 | static void | ||
266 | nouveau_temp_probe_i2c(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
269 | struct dcb_table *dcb = &dev_priv->vbios.dcb; | ||
270 | struct i2c_board_info info[] = { | ||
271 | { I2C_BOARD_INFO("w83l785ts", 0x2d) }, | ||
272 | { I2C_BOARD_INFO("w83781d", 0x2d) }, | ||
273 | { I2C_BOARD_INFO("adt7473", 0x2e) }, | ||
274 | { I2C_BOARD_INFO("f75375", 0x2e) }, | ||
275 | { I2C_BOARD_INFO("lm99", 0x4c) }, | ||
276 | { } | ||
277 | }; | ||
278 | int idx = (dcb->version >= 0x40 ? | ||
279 | dcb->i2c_default_indices & 0xf : 2); | ||
280 | |||
281 | nouveau_i2c_identify(dev, "monitoring device", info, | ||
282 | probe_monitoring_device, idx); | ||
283 | } | ||
284 | |||
285 | void | ||
286 | nouveau_temp_init(struct drm_device *dev) | ||
287 | { | ||
288 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
289 | struct nvbios *bios = &dev_priv->vbios; | ||
290 | struct bit_entry P; | ||
291 | u8 *temp = NULL; | ||
292 | |||
293 | if (bios->type == NVBIOS_BIT) { | ||
294 | if (bit_table(dev, 'P', &P)) | ||
295 | return; | ||
296 | |||
297 | if (P.version == 1) | ||
298 | temp = ROMPTR(bios, P.data[12]); | ||
299 | else if (P.version == 2) | ||
300 | temp = ROMPTR(bios, P.data[16]); | ||
301 | else | ||
302 | NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); | ||
303 | |||
304 | nouveau_temp_vbios_parse(dev, temp); | ||
305 | } | ||
306 | |||
307 | nouveau_temp_probe_i2c(dev); | ||
308 | } | ||
309 | |||
310 | void | ||
311 | nouveau_temp_fini(struct drm_device *dev) | ||
312 | { | ||
313 | |||
314 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c new file mode 100644 index 00000000000..e51b51503ba --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Nouveau Project | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <linux/ratelimit.h> | ||
29 | |||
30 | #include "nouveau_util.h" | ||
31 | |||
32 | static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); | ||
33 | |||
34 | void | ||
35 | nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) | ||
36 | { | ||
37 | while (bf->name) { | ||
38 | if (value & bf->mask) { | ||
39 | printk(" %s", bf->name); | ||
40 | value &= ~bf->mask; | ||
41 | } | ||
42 | |||
43 | bf++; | ||
44 | } | ||
45 | |||
46 | if (value) | ||
47 | printk(" (unknown bits 0x%08x)", value); | ||
48 | } | ||
49 | |||
50 | const struct nouveau_enum * | ||
51 | nouveau_enum_find(const struct nouveau_enum *en, u32 value) | ||
52 | { | ||
53 | while (en->name) { | ||
54 | if (en->value == value) | ||
55 | return en; | ||
56 | en++; | ||
57 | } | ||
58 | |||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | void | ||
63 | nouveau_enum_print(const struct nouveau_enum *en, u32 value) | ||
64 | { | ||
65 | en = nouveau_enum_find(en, value); | ||
66 | if (en) { | ||
67 | printk("%s", en->name); | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | printk("(unknown enum 0x%08x)", value); | ||
72 | } | ||
73 | |||
74 | int | ||
75 | nouveau_ratelimit(void) | ||
76 | { | ||
77 | return __ratelimit(&nouveau_ratelimit_state); | ||
78 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h new file mode 100644 index 00000000000..b97719fbb73 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Nouveau Project | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #ifndef __NOUVEAU_UTIL_H__ | ||
29 | #define __NOUVEAU_UTIL_H__ | ||
30 | |||
31 | struct nouveau_bitfield { | ||
32 | u32 mask; | ||
33 | const char *name; | ||
34 | }; | ||
35 | |||
36 | struct nouveau_enum { | ||
37 | u32 value; | ||
38 | const char *name; | ||
39 | void *data; | ||
40 | }; | ||
41 | |||
42 | void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); | ||
43 | void nouveau_enum_print(const struct nouveau_enum *, u32 value); | ||
44 | const struct nouveau_enum * | ||
45 | nouveau_enum_find(const struct nouveau_enum *, u32 value); | ||
46 | |||
47 | int nouveau_ratelimit(void); | ||
48 | |||
49 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c new file mode 100644 index 00000000000..244fd38fdb8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) | ||
32 | { | ||
33 | struct nouveau_vm *vm = vma->vm; | ||
34 | struct nouveau_mm_node *r; | ||
35 | int big = vma->node->type != vm->spg_shift; | ||
36 | u32 offset = vma->node->offset + (delta >> 12); | ||
37 | u32 bits = vma->node->type - 12; | ||
38 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
39 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
40 | u32 max = 1 << (vm->pgt_bits - bits); | ||
41 | u32 end, len; | ||
42 | |||
43 | delta = 0; | ||
44 | list_for_each_entry(r, &node->regions, rl_entry) { | ||
45 | u64 phys = (u64)r->offset << 12; | ||
46 | u32 num = r->length >> bits; | ||
47 | |||
48 | while (num) { | ||
49 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; | ||
50 | |||
51 | end = (pte + num); | ||
52 | if (unlikely(end >= max)) | ||
53 | end = max; | ||
54 | len = end - pte; | ||
55 | |||
56 | vm->map(vma, pgt, node, pte, len, phys, delta); | ||
57 | |||
58 | num -= len; | ||
59 | pte += len; | ||
60 | if (unlikely(end >= max)) { | ||
61 | phys += len << (bits + 12); | ||
62 | pde++; | ||
63 | pte = 0; | ||
64 | } | ||
65 | |||
66 | delta += (u64)len << vma->node->type; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | vm->flush(vm); | ||
71 | } | ||
72 | |||
73 | void | ||
74 | nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) | ||
75 | { | ||
76 | nouveau_vm_map_at(vma, 0, node); | ||
77 | } | ||
78 | |||
79 | void | ||
80 | nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, | ||
81 | struct nouveau_mem *mem, dma_addr_t *list) | ||
82 | { | ||
83 | struct nouveau_vm *vm = vma->vm; | ||
84 | int big = vma->node->type != vm->spg_shift; | ||
85 | u32 offset = vma->node->offset + (delta >> 12); | ||
86 | u32 bits = vma->node->type - 12; | ||
87 | u32 num = length >> vma->node->type; | ||
88 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
89 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
90 | u32 max = 1 << (vm->pgt_bits - bits); | ||
91 | u32 end, len; | ||
92 | |||
93 | while (num) { | ||
94 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; | ||
95 | |||
96 | end = (pte + num); | ||
97 | if (unlikely(end >= max)) | ||
98 | end = max; | ||
99 | len = end - pte; | ||
100 | |||
101 | vm->map_sg(vma, pgt, mem, pte, len, list); | ||
102 | |||
103 | num -= len; | ||
104 | pte += len; | ||
105 | list += len; | ||
106 | if (unlikely(end >= max)) { | ||
107 | pde++; | ||
108 | pte = 0; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | vm->flush(vm); | ||
113 | } | ||
114 | |||
115 | void | ||
116 | nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) | ||
117 | { | ||
118 | struct nouveau_vm *vm = vma->vm; | ||
119 | int big = vma->node->type != vm->spg_shift; | ||
120 | u32 offset = vma->node->offset + (delta >> 12); | ||
121 | u32 bits = vma->node->type - 12; | ||
122 | u32 num = length >> vma->node->type; | ||
123 | u32 pde = (offset >> vm->pgt_bits) - vm->fpde; | ||
124 | u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; | ||
125 | u32 max = 1 << (vm->pgt_bits - bits); | ||
126 | u32 end, len; | ||
127 | |||
128 | while (num) { | ||
129 | struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; | ||
130 | |||
131 | end = (pte + num); | ||
132 | if (unlikely(end >= max)) | ||
133 | end = max; | ||
134 | len = end - pte; | ||
135 | |||
136 | vm->unmap(pgt, pte, len); | ||
137 | |||
138 | num -= len; | ||
139 | pte += len; | ||
140 | if (unlikely(end >= max)) { | ||
141 | pde++; | ||
142 | pte = 0; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | vm->flush(vm); | ||
147 | } | ||
148 | |||
149 | void | ||
150 | nouveau_vm_unmap(struct nouveau_vma *vma) | ||
151 | { | ||
152 | nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) | ||
157 | { | ||
158 | struct nouveau_vm_pgd *vpgd; | ||
159 | struct nouveau_vm_pgt *vpgt; | ||
160 | struct nouveau_gpuobj *pgt; | ||
161 | u32 pde; | ||
162 | |||
163 | for (pde = fpde; pde <= lpde; pde++) { | ||
164 | vpgt = &vm->pgt[pde - vm->fpde]; | ||
165 | if (--vpgt->refcount[big]) | ||
166 | continue; | ||
167 | |||
168 | pgt = vpgt->obj[big]; | ||
169 | vpgt->obj[big] = NULL; | ||
170 | |||
171 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
172 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); | ||
173 | } | ||
174 | |||
175 | mutex_unlock(&vm->mm->mutex); | ||
176 | nouveau_gpuobj_ref(NULL, &pgt); | ||
177 | mutex_lock(&vm->mm->mutex); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static int | ||
182 | nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) | ||
183 | { | ||
184 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
185 | struct nouveau_vm_pgd *vpgd; | ||
186 | struct nouveau_gpuobj *pgt; | ||
187 | int big = (type != vm->spg_shift); | ||
188 | u32 pgt_size; | ||
189 | int ret; | ||
190 | |||
191 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; | ||
192 | pgt_size *= 8; | ||
193 | |||
194 | mutex_unlock(&vm->mm->mutex); | ||
195 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, | ||
196 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); | ||
197 | mutex_lock(&vm->mm->mutex); | ||
198 | if (unlikely(ret)) | ||
199 | return ret; | ||
200 | |||
201 | /* someone beat us to filling the PDE while we didn't have the lock */ | ||
202 | if (unlikely(vpgt->refcount[big]++)) { | ||
203 | mutex_unlock(&vm->mm->mutex); | ||
204 | nouveau_gpuobj_ref(NULL, &pgt); | ||
205 | mutex_lock(&vm->mm->mutex); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | vpgt->obj[big] = pgt; | ||
210 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
211 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); | ||
212 | } | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | int | ||
218 | nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | ||
219 | u32 access, struct nouveau_vma *vma) | ||
220 | { | ||
221 | u32 align = (1 << page_shift) >> 12; | ||
222 | u32 msize = size >> 12; | ||
223 | u32 fpde, lpde, pde; | ||
224 | int ret; | ||
225 | |||
226 | mutex_lock(&vm->mm->mutex); | ||
227 | ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); | ||
228 | if (unlikely(ret != 0)) { | ||
229 | mutex_unlock(&vm->mm->mutex); | ||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
234 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
235 | for (pde = fpde; pde <= lpde; pde++) { | ||
236 | struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; | ||
237 | int big = (vma->node->type != vm->spg_shift); | ||
238 | |||
239 | if (likely(vpgt->refcount[big])) { | ||
240 | vpgt->refcount[big]++; | ||
241 | continue; | ||
242 | } | ||
243 | |||
244 | ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); | ||
245 | if (ret) { | ||
246 | if (pde != fpde) | ||
247 | nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); | ||
248 | nouveau_mm_put(vm->mm, vma->node); | ||
249 | mutex_unlock(&vm->mm->mutex); | ||
250 | vma->node = NULL; | ||
251 | return ret; | ||
252 | } | ||
253 | } | ||
254 | mutex_unlock(&vm->mm->mutex); | ||
255 | |||
256 | vma->vm = vm; | ||
257 | vma->offset = (u64)vma->node->offset << 12; | ||
258 | vma->access = access; | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | void | ||
263 | nouveau_vm_put(struct nouveau_vma *vma) | ||
264 | { | ||
265 | struct nouveau_vm *vm = vma->vm; | ||
266 | u32 fpde, lpde; | ||
267 | |||
268 | if (unlikely(vma->node == NULL)) | ||
269 | return; | ||
270 | fpde = (vma->node->offset >> vm->pgt_bits); | ||
271 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | ||
272 | |||
273 | mutex_lock(&vm->mm->mutex); | ||
274 | nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); | ||
275 | nouveau_mm_put(vm->mm, vma->node); | ||
276 | vma->node = NULL; | ||
277 | mutex_unlock(&vm->mm->mutex); | ||
278 | } | ||
279 | |||
280 | int | ||
281 | nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, | ||
282 | struct nouveau_vm **pvm) | ||
283 | { | ||
284 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
285 | struct nouveau_vm *vm; | ||
286 | u64 mm_length = (offset + length) - mm_offset; | ||
287 | u32 block, pgt_bits; | ||
288 | int ret; | ||
289 | |||
290 | vm = kzalloc(sizeof(*vm), GFP_KERNEL); | ||
291 | if (!vm) | ||
292 | return -ENOMEM; | ||
293 | |||
294 | if (dev_priv->card_type == NV_50) { | ||
295 | vm->map_pgt = nv50_vm_map_pgt; | ||
296 | vm->map = nv50_vm_map; | ||
297 | vm->map_sg = nv50_vm_map_sg; | ||
298 | vm->unmap = nv50_vm_unmap; | ||
299 | vm->flush = nv50_vm_flush; | ||
300 | vm->spg_shift = 12; | ||
301 | vm->lpg_shift = 16; | ||
302 | |||
303 | pgt_bits = 29; | ||
304 | block = (1 << pgt_bits); | ||
305 | if (length < block) | ||
306 | block = length; | ||
307 | |||
308 | } else | ||
309 | if (dev_priv->card_type == NV_C0) { | ||
310 | vm->map_pgt = nvc0_vm_map_pgt; | ||
311 | vm->map = nvc0_vm_map; | ||
312 | vm->map_sg = nvc0_vm_map_sg; | ||
313 | vm->unmap = nvc0_vm_unmap; | ||
314 | vm->flush = nvc0_vm_flush; | ||
315 | vm->spg_shift = 12; | ||
316 | vm->lpg_shift = 17; | ||
317 | pgt_bits = 27; | ||
318 | block = 4096; | ||
319 | } else { | ||
320 | kfree(vm); | ||
321 | return -ENOSYS; | ||
322 | } | ||
323 | |||
324 | vm->fpde = offset >> pgt_bits; | ||
325 | vm->lpde = (offset + length - 1) >> pgt_bits; | ||
326 | vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL); | ||
327 | if (!vm->pgt) { | ||
328 | kfree(vm); | ||
329 | return -ENOMEM; | ||
330 | } | ||
331 | |||
332 | INIT_LIST_HEAD(&vm->pgd_list); | ||
333 | vm->dev = dev; | ||
334 | vm->refcount = 1; | ||
335 | vm->pgt_bits = pgt_bits - 12; | ||
336 | |||
337 | ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, | ||
338 | block >> 12); | ||
339 | if (ret) { | ||
340 | kfree(vm); | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | *pvm = vm; | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static int | ||
349 | nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | ||
350 | { | ||
351 | struct nouveau_vm_pgd *vpgd; | ||
352 | int i; | ||
353 | |||
354 | if (!pgd) | ||
355 | return 0; | ||
356 | |||
357 | vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); | ||
358 | if (!vpgd) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | nouveau_gpuobj_ref(pgd, &vpgd->obj); | ||
362 | |||
363 | mutex_lock(&vm->mm->mutex); | ||
364 | for (i = vm->fpde; i <= vm->lpde; i++) | ||
365 | vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); | ||
366 | list_add(&vpgd->head, &vm->pgd_list); | ||
367 | mutex_unlock(&vm->mm->mutex); | ||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static void | ||
372 | nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | ||
373 | { | ||
374 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
375 | struct nouveau_gpuobj *pgd = NULL; | ||
376 | |||
377 | if (!mpgd) | ||
378 | return; | ||
379 | |||
380 | mutex_lock(&vm->mm->mutex); | ||
381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
382 | if (vpgd->obj == mpgd) { | ||
383 | pgd = vpgd->obj; | ||
384 | list_del(&vpgd->head); | ||
385 | kfree(vpgd); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | mutex_unlock(&vm->mm->mutex); | ||
390 | |||
391 | nouveau_gpuobj_ref(NULL, &pgd); | ||
392 | } | ||
393 | |||
394 | static void | ||
395 | nouveau_vm_del(struct nouveau_vm *vm) | ||
396 | { | ||
397 | struct nouveau_vm_pgd *vpgd, *tmp; | ||
398 | |||
399 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | ||
400 | nouveau_vm_unlink(vm, vpgd->obj); | ||
401 | } | ||
402 | |||
403 | nouveau_mm_fini(&vm->mm); | ||
404 | kfree(vm->pgt); | ||
405 | kfree(vm); | ||
406 | } | ||
407 | |||
408 | int | ||
409 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | ||
410 | struct nouveau_gpuobj *pgd) | ||
411 | { | ||
412 | struct nouveau_vm *vm; | ||
413 | int ret; | ||
414 | |||
415 | vm = ref; | ||
416 | if (vm) { | ||
417 | ret = nouveau_vm_link(vm, pgd); | ||
418 | if (ret) | ||
419 | return ret; | ||
420 | |||
421 | vm->refcount++; | ||
422 | } | ||
423 | |||
424 | vm = *ptr; | ||
425 | *ptr = ref; | ||
426 | |||
427 | if (vm) { | ||
428 | nouveau_vm_unlink(vm, pgd); | ||
429 | |||
430 | if (--vm->refcount == 0) | ||
431 | nouveau_vm_del(vm); | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h new file mode 100644 index 00000000000..579ca8cc223 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NOUVEAU_VM_H__ | ||
26 | #define __NOUVEAU_VM_H__ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | |||
30 | #include "nouveau_drv.h" | ||
31 | #include "nouveau_mm.h" | ||
32 | |||
33 | struct nouveau_vm_pgt { | ||
34 | struct nouveau_gpuobj *obj[2]; | ||
35 | u32 refcount[2]; | ||
36 | }; | ||
37 | |||
38 | struct nouveau_vm_pgd { | ||
39 | struct list_head head; | ||
40 | struct nouveau_gpuobj *obj; | ||
41 | }; | ||
42 | |||
43 | struct nouveau_vma { | ||
44 | struct list_head head; | ||
45 | int refcount; | ||
46 | struct nouveau_vm *vm; | ||
47 | struct nouveau_mm_node *node; | ||
48 | u64 offset; | ||
49 | u32 access; | ||
50 | }; | ||
51 | |||
52 | struct nouveau_vm { | ||
53 | struct drm_device *dev; | ||
54 | struct nouveau_mm *mm; | ||
55 | int refcount; | ||
56 | |||
57 | struct list_head pgd_list; | ||
58 | atomic_t engref[16]; | ||
59 | |||
60 | struct nouveau_vm_pgt *pgt; | ||
61 | u32 fpde; | ||
62 | u32 lpde; | ||
63 | |||
64 | u32 pgt_bits; | ||
65 | u8 spg_shift; | ||
66 | u8 lpg_shift; | ||
67 | |||
68 | void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, | ||
69 | struct nouveau_gpuobj *pgt[2]); | ||
70 | void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
71 | struct nouveau_mem *, u32 pte, u32 cnt, | ||
72 | u64 phys, u64 delta); | ||
73 | void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
74 | struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); | ||
75 | void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); | ||
76 | void (*flush)(struct nouveau_vm *); | ||
77 | }; | ||
78 | |||
79 | /* nouveau_vm.c */ | ||
80 | int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, | ||
81 | struct nouveau_vm **); | ||
82 | int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, | ||
83 | struct nouveau_gpuobj *pgd); | ||
84 | int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, | ||
85 | u32 access, struct nouveau_vma *); | ||
86 | void nouveau_vm_put(struct nouveau_vma *); | ||
87 | void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *); | ||
88 | void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *); | ||
89 | void nouveau_vm_unmap(struct nouveau_vma *); | ||
90 | void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); | ||
91 | void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, | ||
92 | struct nouveau_mem *, dma_addr_t *); | ||
93 | |||
94 | /* nv50_vm.c */ | ||
95 | void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, | ||
96 | struct nouveau_gpuobj *pgt[2]); | ||
97 | void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
98 | struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta); | ||
99 | void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
100 | struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); | ||
101 | void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); | ||
102 | void nv50_vm_flush(struct nouveau_vm *); | ||
103 | void nv50_vm_flush_engine(struct drm_device *, int engine); | ||
104 | |||
105 | /* nvc0_vm.c */ | ||
106 | void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, | ||
107 | struct nouveau_gpuobj *pgt[2]); | ||
108 | void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
109 | struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta); | ||
110 | void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, | ||
111 | struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); | ||
112 | void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); | ||
113 | void nvc0_vm_flush(struct nouveau_vm *); | ||
114 | |||
115 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c new file mode 100644 index 00000000000..638cf601c42 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fb.c | |||
@@ -0,0 +1,21 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv04_fb_init(struct drm_device *dev) | ||
8 | { | ||
9 | /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows | ||
10 | * nvidia reading PFB_CFG_0, then writing back its original value. | ||
11 | * (which was 0x701114 in this case) | ||
12 | */ | ||
13 | |||
14 | nv_wr32(dev, NV04_PFB_CFG0, 0x1114); | ||
15 | return 0; | ||
16 | } | ||
17 | |||
18 | void | ||
19 | nv04_fb_takedown(struct drm_device *dev) | ||
20 | { | ||
21 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c new file mode 100644 index 00000000000..db465a3ee1b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -0,0 +1,543 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nouveau_util.h" | ||
32 | |||
33 | #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) | ||
34 | #define NV04_RAMFC__SIZE 32 | ||
35 | #define NV04_RAMFC_DMA_PUT 0x00 | ||
36 | #define NV04_RAMFC_DMA_GET 0x04 | ||
37 | #define NV04_RAMFC_DMA_INSTANCE 0x08 | ||
38 | #define NV04_RAMFC_DMA_STATE 0x0C | ||
39 | #define NV04_RAMFC_DMA_FETCH 0x10 | ||
40 | #define NV04_RAMFC_ENGINE 0x14 | ||
41 | #define NV04_RAMFC_PULL1_ENGINE 0x18 | ||
42 | |||
43 | #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) | ||
44 | #define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) | ||
45 | |||
46 | void | ||
47 | nv04_fifo_disable(struct drm_device *dev) | ||
48 | { | ||
49 | uint32_t tmp; | ||
50 | |||
51 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH); | ||
52 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1); | ||
53 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | ||
54 | tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1); | ||
55 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1); | ||
56 | } | ||
57 | |||
58 | void | ||
59 | nv04_fifo_enable(struct drm_device *dev) | ||
60 | { | ||
61 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
62 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
63 | } | ||
64 | |||
65 | bool | ||
66 | nv04_fifo_reassign(struct drm_device *dev, bool enable) | ||
67 | { | ||
68 | uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES); | ||
69 | |||
70 | nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0); | ||
71 | return (reassign == 1); | ||
72 | } | ||
73 | |||
74 | bool | ||
75 | nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | ||
76 | { | ||
77 | int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable); | ||
78 | |||
79 | if (!enable) { | ||
80 | /* In some cases the PFIFO puller may be left in an | ||
81 | * inconsistent state if you try to stop it when it's | ||
82 | * busy translating handles. Sometimes you get a | ||
83 | * PFIFO_CACHE_ERROR, sometimes it just fails silently | ||
84 | * sending incorrect instance offsets to PGRAPH after | ||
85 | * it's started up again. To avoid the latter we | ||
86 | * invalidate the most recently calculated instance. | ||
87 | */ | ||
88 | if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, | ||
89 | NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) | ||
90 | NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); | ||
91 | |||
92 | if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & | ||
93 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) | ||
94 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
95 | NV_PFIFO_INTR_CACHE_ERROR); | ||
96 | |||
97 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
98 | } | ||
99 | |||
100 | return pull & 1; | ||
101 | } | ||
102 | |||
103 | int | ||
104 | nv04_fifo_channel_id(struct drm_device *dev) | ||
105 | { | ||
106 | return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & | ||
107 | NV03_PFIFO_CACHE1_PUSH1_CHID_MASK; | ||
108 | } | ||
109 | |||
110 | #ifdef __BIG_ENDIAN | ||
111 | #define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN | ||
112 | #else | ||
113 | #define DMA_FETCH_ENDIANNESS 0 | ||
114 | #endif | ||
115 | |||
116 | int | ||
117 | nv04_fifo_create_context(struct nouveau_channel *chan) | ||
118 | { | ||
119 | struct drm_device *dev = chan->dev; | ||
120 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
121 | unsigned long flags; | ||
122 | int ret; | ||
123 | |||
124 | ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, | ||
125 | NV04_RAMFC__SIZE, | ||
126 | NVOBJ_FLAG_ZERO_ALLOC | | ||
127 | NVOBJ_FLAG_ZERO_FREE, | ||
128 | &chan->ramfc); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
133 | NV03_USER(chan->id), PAGE_SIZE); | ||
134 | if (!chan->user) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
138 | |||
139 | /* Setup initial state */ | ||
140 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); | ||
141 | RAMFC_WR(DMA_GET, chan->pushbuf_base); | ||
142 | RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4); | ||
143 | RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
144 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
145 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | ||
146 | DMA_FETCH_ENDIANNESS)); | ||
147 | |||
148 | /* enable the fifo dma operation */ | ||
149 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
150 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | ||
151 | |||
152 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | void | ||
157 | nv04_fifo_destroy_context(struct nouveau_channel *chan) | ||
158 | { | ||
159 | struct drm_device *dev = chan->dev; | ||
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
161 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
162 | unsigned long flags; | ||
163 | |||
164 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
165 | pfifo->reassign(dev, false); | ||
166 | |||
167 | /* Unload the context if it's the currently active one */ | ||
168 | if (pfifo->channel_id(dev) == chan->id) { | ||
169 | pfifo->disable(dev); | ||
170 | pfifo->unload_context(dev); | ||
171 | pfifo->enable(dev); | ||
172 | } | ||
173 | |||
174 | /* Keep it from being rescheduled */ | ||
175 | nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); | ||
176 | |||
177 | pfifo->reassign(dev, true); | ||
178 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
179 | |||
180 | /* Free the channel resources */ | ||
181 | if (chan->user) { | ||
182 | iounmap(chan->user); | ||
183 | chan->user = NULL; | ||
184 | } | ||
185 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
186 | } | ||
187 | |||
188 | static void | ||
189 | nv04_fifo_do_load_context(struct drm_device *dev, int chid) | ||
190 | { | ||
191 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
192 | uint32_t fc = NV04_RAMFC(chid), tmp; | ||
193 | |||
194 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
195 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
196 | tmp = nv_ri32(dev, fc + 8); | ||
197 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); | ||
198 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); | ||
199 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12)); | ||
200 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16)); | ||
201 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); | ||
202 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); | ||
203 | |||
204 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
205 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
206 | } | ||
207 | |||
208 | int | ||
209 | nv04_fifo_load_context(struct nouveau_channel *chan) | ||
210 | { | ||
211 | uint32_t tmp; | ||
212 | |||
213 | nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1, | ||
214 | NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
215 | nv04_fifo_do_load_context(chan->dev, chan->id); | ||
216 | nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
217 | |||
218 | /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ | ||
219 | tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
220 | nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | int | ||
226 | nv04_fifo_unload_context(struct drm_device *dev) | ||
227 | { | ||
228 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
229 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
230 | struct nouveau_channel *chan = NULL; | ||
231 | uint32_t tmp; | ||
232 | int chid; | ||
233 | |||
234 | chid = pfifo->channel_id(dev); | ||
235 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | ||
236 | return 0; | ||
237 | |||
238 | chan = dev_priv->channels.ptr[chid]; | ||
239 | if (!chan) { | ||
240 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
245 | RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
246 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; | ||
247 | tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE); | ||
248 | RAMFC_WR(DMA_INSTANCE, tmp); | ||
249 | RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
250 | RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); | ||
251 | RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
252 | RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
253 | |||
254 | nv04_fifo_do_load_context(dev, pfifo->channels - 1); | ||
255 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void | ||
260 | nv04_fifo_init_reset(struct drm_device *dev) | ||
261 | { | ||
262 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
263 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | ||
264 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
265 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
266 | |||
267 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
268 | nv_wr32(dev, 0x002044, 0x0101ffff); | ||
269 | nv_wr32(dev, 0x002040, 0x000000ff); | ||
270 | nv_wr32(dev, 0x002500, 0x00000000); | ||
271 | nv_wr32(dev, 0x003000, 0x00000000); | ||
272 | nv_wr32(dev, 0x003050, 0x00000000); | ||
273 | nv_wr32(dev, 0x003200, 0x00000000); | ||
274 | nv_wr32(dev, 0x003250, 0x00000000); | ||
275 | nv_wr32(dev, 0x003220, 0x00000000); | ||
276 | |||
277 | nv_wr32(dev, 0x003250, 0x00000000); | ||
278 | nv_wr32(dev, 0x003270, 0x00000000); | ||
279 | nv_wr32(dev, 0x003210, 0x00000000); | ||
280 | } | ||
281 | |||
282 | static void | ||
283 | nv04_fifo_init_ramxx(struct drm_device *dev) | ||
284 | { | ||
285 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
286 | |||
287 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | ||
288 | ((dev_priv->ramht->bits - 9) << 16) | | ||
289 | (dev_priv->ramht->gpuobj->pinst >> 8)); | ||
290 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | ||
291 | nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); | ||
292 | } | ||
293 | |||
294 | static void | ||
295 | nv04_fifo_init_intr(struct drm_device *dev) | ||
296 | { | ||
297 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
298 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
299 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
300 | } | ||
301 | |||
302 | int | ||
303 | nv04_fifo_init(struct drm_device *dev) | ||
304 | { | ||
305 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
306 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
307 | int i; | ||
308 | |||
309 | nv04_fifo_init_reset(dev); | ||
310 | nv04_fifo_init_ramxx(dev); | ||
311 | |||
312 | nv04_fifo_do_load_context(dev, pfifo->channels - 1); | ||
313 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
314 | |||
315 | nv04_fifo_init_intr(dev); | ||
316 | pfifo->enable(dev); | ||
317 | pfifo->reassign(dev, true); | ||
318 | |||
319 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
320 | if (dev_priv->channels.ptr[i]) { | ||
321 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | ||
322 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
323 | } | ||
324 | } | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | void | ||
330 | nv04_fifo_fini(struct drm_device *dev) | ||
331 | { | ||
332 | nv_wr32(dev, 0x2140, 0x00000000); | ||
333 | nouveau_irq_unregister(dev, 8); | ||
334 | } | ||
335 | |||
336 | static bool | ||
337 | nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | ||
338 | { | ||
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
340 | struct nouveau_channel *chan = NULL; | ||
341 | struct nouveau_gpuobj *obj; | ||
342 | unsigned long flags; | ||
343 | const int subc = (addr >> 13) & 0x7; | ||
344 | const int mthd = addr & 0x1ffc; | ||
345 | bool handled = false; | ||
346 | u32 engine; | ||
347 | |||
348 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
349 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) | ||
350 | chan = dev_priv->channels.ptr[chid]; | ||
351 | if (unlikely(!chan)) | ||
352 | goto out; | ||
353 | |||
354 | switch (mthd) { | ||
355 | case 0x0000: /* bind object to subchannel */ | ||
356 | obj = nouveau_ramht_find(chan, data); | ||
357 | if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) | ||
358 | break; | ||
359 | |||
360 | chan->sw_subchannel[subc] = obj->class; | ||
361 | engine = 0x0000000f << (subc * 4); | ||
362 | |||
363 | nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); | ||
364 | handled = true; | ||
365 | break; | ||
366 | default: | ||
367 | engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); | ||
368 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) | ||
369 | break; | ||
370 | |||
371 | if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], | ||
372 | mthd, data)) | ||
373 | handled = true; | ||
374 | break; | ||
375 | } | ||
376 | |||
377 | out: | ||
378 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
379 | return handled; | ||
380 | } | ||
381 | |||
382 | static const char *nv_dma_state_err(u32 state) | ||
383 | { | ||
384 | static const char * const desc[] = { | ||
385 | "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", | ||
386 | "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" | ||
387 | }; | ||
388 | return desc[(state >> 29) & 0x7]; | ||
389 | } | ||
390 | |||
391 | void | ||
392 | nv04_fifo_isr(struct drm_device *dev) | ||
393 | { | ||
394 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
395 | struct nouveau_engine *engine = &dev_priv->engine; | ||
396 | uint32_t status, reassign; | ||
397 | int cnt = 0; | ||
398 | |||
399 | reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; | ||
400 | while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | ||
401 | uint32_t chid, get; | ||
402 | |||
403 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
404 | |||
405 | chid = engine->fifo.channel_id(dev); | ||
406 | get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); | ||
407 | |||
408 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | ||
409 | uint32_t mthd, data; | ||
410 | int ptr; | ||
411 | |||
412 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before | ||
413 | * wrapping on my G80 chips, but CACHE1 isn't big | ||
414 | * enough for this much data.. Tests show that it | ||
415 | * wraps around to the start at GET=0x800.. No clue | ||
416 | * as to why.. | ||
417 | */ | ||
418 | ptr = (get & 0x7ff) >> 2; | ||
419 | |||
420 | if (dev_priv->card_type < NV_40) { | ||
421 | mthd = nv_rd32(dev, | ||
422 | NV04_PFIFO_CACHE1_METHOD(ptr)); | ||
423 | data = nv_rd32(dev, | ||
424 | NV04_PFIFO_CACHE1_DATA(ptr)); | ||
425 | } else { | ||
426 | mthd = nv_rd32(dev, | ||
427 | NV40_PFIFO_CACHE1_METHOD(ptr)); | ||
428 | data = nv_rd32(dev, | ||
429 | NV40_PFIFO_CACHE1_DATA(ptr)); | ||
430 | } | ||
431 | |||
432 | if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { | ||
433 | NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " | ||
434 | "Mthd 0x%04x Data 0x%08x\n", | ||
435 | chid, (mthd >> 13) & 7, mthd & 0x1ffc, | ||
436 | data); | ||
437 | } | ||
438 | |||
439 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | ||
440 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
441 | NV_PFIFO_INTR_CACHE_ERROR); | ||
442 | |||
443 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
444 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); | ||
445 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
446 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, | ||
447 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); | ||
448 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
449 | |||
450 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, | ||
451 | nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); | ||
452 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
453 | |||
454 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
455 | } | ||
456 | |||
457 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | ||
458 | u32 dma_get = nv_rd32(dev, 0x003244); | ||
459 | u32 dma_put = nv_rd32(dev, 0x003240); | ||
460 | u32 push = nv_rd32(dev, 0x003220); | ||
461 | u32 state = nv_rd32(dev, 0x003228); | ||
462 | |||
463 | if (dev_priv->card_type == NV_50) { | ||
464 | u32 ho_get = nv_rd32(dev, 0x003328); | ||
465 | u32 ho_put = nv_rd32(dev, 0x003320); | ||
466 | u32 ib_get = nv_rd32(dev, 0x003334); | ||
467 | u32 ib_put = nv_rd32(dev, 0x003330); | ||
468 | |||
469 | if (nouveau_ratelimit()) | ||
470 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " | ||
471 | "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " | ||
472 | "State 0x%08x (err: %s) Push 0x%08x\n", | ||
473 | chid, ho_get, dma_get, ho_put, | ||
474 | dma_put, ib_get, ib_put, state, | ||
475 | nv_dma_state_err(state), | ||
476 | push); | ||
477 | |||
478 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ | ||
479 | nv_wr32(dev, 0x003364, 0x00000000); | ||
480 | if (dma_get != dma_put || ho_get != ho_put) { | ||
481 | nv_wr32(dev, 0x003244, dma_put); | ||
482 | nv_wr32(dev, 0x003328, ho_put); | ||
483 | } else | ||
484 | if (ib_get != ib_put) { | ||
485 | nv_wr32(dev, 0x003334, ib_put); | ||
486 | } | ||
487 | } else { | ||
488 | NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " | ||
489 | "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", | ||
490 | chid, dma_get, dma_put, state, | ||
491 | nv_dma_state_err(state), push); | ||
492 | |||
493 | if (dma_get != dma_put) | ||
494 | nv_wr32(dev, 0x003244, dma_put); | ||
495 | } | ||
496 | |||
497 | nv_wr32(dev, 0x003228, 0x00000000); | ||
498 | nv_wr32(dev, 0x003220, 0x00000001); | ||
499 | nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); | ||
500 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
501 | } | ||
502 | |||
503 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
504 | uint32_t sem; | ||
505 | |||
506 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
507 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
508 | NV_PFIFO_INTR_SEMAPHORE); | ||
509 | |||
510 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
511 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
512 | |||
513 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
514 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
515 | } | ||
516 | |||
517 | if (dev_priv->card_type == NV_50) { | ||
518 | if (status & 0x00000010) { | ||
519 | nv50_fb_vm_trap(dev, nouveau_ratelimit()); | ||
520 | status &= ~0x00000010; | ||
521 | nv_wr32(dev, 0x002100, 0x00000010); | ||
522 | } | ||
523 | } | ||
524 | |||
525 | if (status) { | ||
526 | if (nouveau_ratelimit()) | ||
527 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | ||
528 | status, chid); | ||
529 | nv_wr32(dev, NV03_PFIFO_INTR_0, status); | ||
530 | status = 0; | ||
531 | } | ||
532 | |||
533 | nv_wr32(dev, NV03_PFIFO_CACHES, reassign); | ||
534 | } | ||
535 | |||
536 | if (status) { | ||
537 | NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); | ||
538 | nv_wr32(dev, 0x2140, 0); | ||
539 | nv_wr32(dev, 0x140, 0); | ||
540 | } | ||
541 | |||
542 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); | ||
543 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c new file mode 100644 index 00000000000..dbdea8ed392 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -0,0 +1,1353 @@ | |||
1 | /* | ||
2 | * Copyright 2007 Stephane Marchesin | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "nouveau_drm.h" | ||
28 | #include "nouveau_drv.h" | ||
29 | #include "nouveau_hw.h" | ||
30 | #include "nouveau_util.h" | ||
31 | #include "nouveau_ramht.h" | ||
32 | |||
33 | struct nv04_graph_engine { | ||
34 | struct nouveau_exec_engine base; | ||
35 | }; | ||
36 | |||
37 | static uint32_t nv04_graph_ctx_regs[] = { | ||
38 | 0x0040053c, | ||
39 | 0x00400544, | ||
40 | 0x00400540, | ||
41 | 0x00400548, | ||
42 | NV04_PGRAPH_CTX_SWITCH1, | ||
43 | NV04_PGRAPH_CTX_SWITCH2, | ||
44 | NV04_PGRAPH_CTX_SWITCH3, | ||
45 | NV04_PGRAPH_CTX_SWITCH4, | ||
46 | NV04_PGRAPH_CTX_CACHE1, | ||
47 | NV04_PGRAPH_CTX_CACHE2, | ||
48 | NV04_PGRAPH_CTX_CACHE3, | ||
49 | NV04_PGRAPH_CTX_CACHE4, | ||
50 | 0x00400184, | ||
51 | 0x004001a4, | ||
52 | 0x004001c4, | ||
53 | 0x004001e4, | ||
54 | 0x00400188, | ||
55 | 0x004001a8, | ||
56 | 0x004001c8, | ||
57 | 0x004001e8, | ||
58 | 0x0040018c, | ||
59 | 0x004001ac, | ||
60 | 0x004001cc, | ||
61 | 0x004001ec, | ||
62 | 0x00400190, | ||
63 | 0x004001b0, | ||
64 | 0x004001d0, | ||
65 | 0x004001f0, | ||
66 | 0x00400194, | ||
67 | 0x004001b4, | ||
68 | 0x004001d4, | ||
69 | 0x004001f4, | ||
70 | 0x00400198, | ||
71 | 0x004001b8, | ||
72 | 0x004001d8, | ||
73 | 0x004001f8, | ||
74 | 0x0040019c, | ||
75 | 0x004001bc, | ||
76 | 0x004001dc, | ||
77 | 0x004001fc, | ||
78 | 0x00400174, | ||
79 | NV04_PGRAPH_DMA_START_0, | ||
80 | NV04_PGRAPH_DMA_START_1, | ||
81 | NV04_PGRAPH_DMA_LENGTH, | ||
82 | NV04_PGRAPH_DMA_MISC, | ||
83 | NV04_PGRAPH_DMA_PITCH, | ||
84 | NV04_PGRAPH_BOFFSET0, | ||
85 | NV04_PGRAPH_BBASE0, | ||
86 | NV04_PGRAPH_BLIMIT0, | ||
87 | NV04_PGRAPH_BOFFSET1, | ||
88 | NV04_PGRAPH_BBASE1, | ||
89 | NV04_PGRAPH_BLIMIT1, | ||
90 | NV04_PGRAPH_BOFFSET2, | ||
91 | NV04_PGRAPH_BBASE2, | ||
92 | NV04_PGRAPH_BLIMIT2, | ||
93 | NV04_PGRAPH_BOFFSET3, | ||
94 | NV04_PGRAPH_BBASE3, | ||
95 | NV04_PGRAPH_BLIMIT3, | ||
96 | NV04_PGRAPH_BOFFSET4, | ||
97 | NV04_PGRAPH_BBASE4, | ||
98 | NV04_PGRAPH_BLIMIT4, | ||
99 | NV04_PGRAPH_BOFFSET5, | ||
100 | NV04_PGRAPH_BBASE5, | ||
101 | NV04_PGRAPH_BLIMIT5, | ||
102 | NV04_PGRAPH_BPITCH0, | ||
103 | NV04_PGRAPH_BPITCH1, | ||
104 | NV04_PGRAPH_BPITCH2, | ||
105 | NV04_PGRAPH_BPITCH3, | ||
106 | NV04_PGRAPH_BPITCH4, | ||
107 | NV04_PGRAPH_SURFACE, | ||
108 | NV04_PGRAPH_STATE, | ||
109 | NV04_PGRAPH_BSWIZZLE2, | ||
110 | NV04_PGRAPH_BSWIZZLE5, | ||
111 | NV04_PGRAPH_BPIXEL, | ||
112 | NV04_PGRAPH_NOTIFY, | ||
113 | NV04_PGRAPH_PATT_COLOR0, | ||
114 | NV04_PGRAPH_PATT_COLOR1, | ||
115 | NV04_PGRAPH_PATT_COLORRAM+0x00, | ||
116 | NV04_PGRAPH_PATT_COLORRAM+0x04, | ||
117 | NV04_PGRAPH_PATT_COLORRAM+0x08, | ||
118 | NV04_PGRAPH_PATT_COLORRAM+0x0c, | ||
119 | NV04_PGRAPH_PATT_COLORRAM+0x10, | ||
120 | NV04_PGRAPH_PATT_COLORRAM+0x14, | ||
121 | NV04_PGRAPH_PATT_COLORRAM+0x18, | ||
122 | NV04_PGRAPH_PATT_COLORRAM+0x1c, | ||
123 | NV04_PGRAPH_PATT_COLORRAM+0x20, | ||
124 | NV04_PGRAPH_PATT_COLORRAM+0x24, | ||
125 | NV04_PGRAPH_PATT_COLORRAM+0x28, | ||
126 | NV04_PGRAPH_PATT_COLORRAM+0x2c, | ||
127 | NV04_PGRAPH_PATT_COLORRAM+0x30, | ||
128 | NV04_PGRAPH_PATT_COLORRAM+0x34, | ||
129 | NV04_PGRAPH_PATT_COLORRAM+0x38, | ||
130 | NV04_PGRAPH_PATT_COLORRAM+0x3c, | ||
131 | NV04_PGRAPH_PATT_COLORRAM+0x40, | ||
132 | NV04_PGRAPH_PATT_COLORRAM+0x44, | ||
133 | NV04_PGRAPH_PATT_COLORRAM+0x48, | ||
134 | NV04_PGRAPH_PATT_COLORRAM+0x4c, | ||
135 | NV04_PGRAPH_PATT_COLORRAM+0x50, | ||
136 | NV04_PGRAPH_PATT_COLORRAM+0x54, | ||
137 | NV04_PGRAPH_PATT_COLORRAM+0x58, | ||
138 | NV04_PGRAPH_PATT_COLORRAM+0x5c, | ||
139 | NV04_PGRAPH_PATT_COLORRAM+0x60, | ||
140 | NV04_PGRAPH_PATT_COLORRAM+0x64, | ||
141 | NV04_PGRAPH_PATT_COLORRAM+0x68, | ||
142 | NV04_PGRAPH_PATT_COLORRAM+0x6c, | ||
143 | NV04_PGRAPH_PATT_COLORRAM+0x70, | ||
144 | NV04_PGRAPH_PATT_COLORRAM+0x74, | ||
145 | NV04_PGRAPH_PATT_COLORRAM+0x78, | ||
146 | NV04_PGRAPH_PATT_COLORRAM+0x7c, | ||
147 | NV04_PGRAPH_PATT_COLORRAM+0x80, | ||
148 | NV04_PGRAPH_PATT_COLORRAM+0x84, | ||
149 | NV04_PGRAPH_PATT_COLORRAM+0x88, | ||
150 | NV04_PGRAPH_PATT_COLORRAM+0x8c, | ||
151 | NV04_PGRAPH_PATT_COLORRAM+0x90, | ||
152 | NV04_PGRAPH_PATT_COLORRAM+0x94, | ||
153 | NV04_PGRAPH_PATT_COLORRAM+0x98, | ||
154 | NV04_PGRAPH_PATT_COLORRAM+0x9c, | ||
155 | NV04_PGRAPH_PATT_COLORRAM+0xa0, | ||
156 | NV04_PGRAPH_PATT_COLORRAM+0xa4, | ||
157 | NV04_PGRAPH_PATT_COLORRAM+0xa8, | ||
158 | NV04_PGRAPH_PATT_COLORRAM+0xac, | ||
159 | NV04_PGRAPH_PATT_COLORRAM+0xb0, | ||
160 | NV04_PGRAPH_PATT_COLORRAM+0xb4, | ||
161 | NV04_PGRAPH_PATT_COLORRAM+0xb8, | ||
162 | NV04_PGRAPH_PATT_COLORRAM+0xbc, | ||
163 | NV04_PGRAPH_PATT_COLORRAM+0xc0, | ||
164 | NV04_PGRAPH_PATT_COLORRAM+0xc4, | ||
165 | NV04_PGRAPH_PATT_COLORRAM+0xc8, | ||
166 | NV04_PGRAPH_PATT_COLORRAM+0xcc, | ||
167 | NV04_PGRAPH_PATT_COLORRAM+0xd0, | ||
168 | NV04_PGRAPH_PATT_COLORRAM+0xd4, | ||
169 | NV04_PGRAPH_PATT_COLORRAM+0xd8, | ||
170 | NV04_PGRAPH_PATT_COLORRAM+0xdc, | ||
171 | NV04_PGRAPH_PATT_COLORRAM+0xe0, | ||
172 | NV04_PGRAPH_PATT_COLORRAM+0xe4, | ||
173 | NV04_PGRAPH_PATT_COLORRAM+0xe8, | ||
174 | NV04_PGRAPH_PATT_COLORRAM+0xec, | ||
175 | NV04_PGRAPH_PATT_COLORRAM+0xf0, | ||
176 | NV04_PGRAPH_PATT_COLORRAM+0xf4, | ||
177 | NV04_PGRAPH_PATT_COLORRAM+0xf8, | ||
178 | NV04_PGRAPH_PATT_COLORRAM+0xfc, | ||
179 | NV04_PGRAPH_PATTERN, | ||
180 | 0x0040080c, | ||
181 | NV04_PGRAPH_PATTERN_SHAPE, | ||
182 | 0x00400600, | ||
183 | NV04_PGRAPH_ROP3, | ||
184 | NV04_PGRAPH_CHROMA, | ||
185 | NV04_PGRAPH_BETA_AND, | ||
186 | NV04_PGRAPH_BETA_PREMULT, | ||
187 | NV04_PGRAPH_CONTROL0, | ||
188 | NV04_PGRAPH_CONTROL1, | ||
189 | NV04_PGRAPH_CONTROL2, | ||
190 | NV04_PGRAPH_BLEND, | ||
191 | NV04_PGRAPH_STORED_FMT, | ||
192 | NV04_PGRAPH_SOURCE_COLOR, | ||
193 | 0x00400560, | ||
194 | 0x00400568, | ||
195 | 0x00400564, | ||
196 | 0x0040056c, | ||
197 | 0x00400400, | ||
198 | 0x00400480, | ||
199 | 0x00400404, | ||
200 | 0x00400484, | ||
201 | 0x00400408, | ||
202 | 0x00400488, | ||
203 | 0x0040040c, | ||
204 | 0x0040048c, | ||
205 | 0x00400410, | ||
206 | 0x00400490, | ||
207 | 0x00400414, | ||
208 | 0x00400494, | ||
209 | 0x00400418, | ||
210 | 0x00400498, | ||
211 | 0x0040041c, | ||
212 | 0x0040049c, | ||
213 | 0x00400420, | ||
214 | 0x004004a0, | ||
215 | 0x00400424, | ||
216 | 0x004004a4, | ||
217 | 0x00400428, | ||
218 | 0x004004a8, | ||
219 | 0x0040042c, | ||
220 | 0x004004ac, | ||
221 | 0x00400430, | ||
222 | 0x004004b0, | ||
223 | 0x00400434, | ||
224 | 0x004004b4, | ||
225 | 0x00400438, | ||
226 | 0x004004b8, | ||
227 | 0x0040043c, | ||
228 | 0x004004bc, | ||
229 | 0x00400440, | ||
230 | 0x004004c0, | ||
231 | 0x00400444, | ||
232 | 0x004004c4, | ||
233 | 0x00400448, | ||
234 | 0x004004c8, | ||
235 | 0x0040044c, | ||
236 | 0x004004cc, | ||
237 | 0x00400450, | ||
238 | 0x004004d0, | ||
239 | 0x00400454, | ||
240 | 0x004004d4, | ||
241 | 0x00400458, | ||
242 | 0x004004d8, | ||
243 | 0x0040045c, | ||
244 | 0x004004dc, | ||
245 | 0x00400460, | ||
246 | 0x004004e0, | ||
247 | 0x00400464, | ||
248 | 0x004004e4, | ||
249 | 0x00400468, | ||
250 | 0x004004e8, | ||
251 | 0x0040046c, | ||
252 | 0x004004ec, | ||
253 | 0x00400470, | ||
254 | 0x004004f0, | ||
255 | 0x00400474, | ||
256 | 0x004004f4, | ||
257 | 0x00400478, | ||
258 | 0x004004f8, | ||
259 | 0x0040047c, | ||
260 | 0x004004fc, | ||
261 | 0x00400534, | ||
262 | 0x00400538, | ||
263 | 0x00400514, | ||
264 | 0x00400518, | ||
265 | 0x0040051c, | ||
266 | 0x00400520, | ||
267 | 0x00400524, | ||
268 | 0x00400528, | ||
269 | 0x0040052c, | ||
270 | 0x00400530, | ||
271 | 0x00400d00, | ||
272 | 0x00400d40, | ||
273 | 0x00400d80, | ||
274 | 0x00400d04, | ||
275 | 0x00400d44, | ||
276 | 0x00400d84, | ||
277 | 0x00400d08, | ||
278 | 0x00400d48, | ||
279 | 0x00400d88, | ||
280 | 0x00400d0c, | ||
281 | 0x00400d4c, | ||
282 | 0x00400d8c, | ||
283 | 0x00400d10, | ||
284 | 0x00400d50, | ||
285 | 0x00400d90, | ||
286 | 0x00400d14, | ||
287 | 0x00400d54, | ||
288 | 0x00400d94, | ||
289 | 0x00400d18, | ||
290 | 0x00400d58, | ||
291 | 0x00400d98, | ||
292 | 0x00400d1c, | ||
293 | 0x00400d5c, | ||
294 | 0x00400d9c, | ||
295 | 0x00400d20, | ||
296 | 0x00400d60, | ||
297 | 0x00400da0, | ||
298 | 0x00400d24, | ||
299 | 0x00400d64, | ||
300 | 0x00400da4, | ||
301 | 0x00400d28, | ||
302 | 0x00400d68, | ||
303 | 0x00400da8, | ||
304 | 0x00400d2c, | ||
305 | 0x00400d6c, | ||
306 | 0x00400dac, | ||
307 | 0x00400d30, | ||
308 | 0x00400d70, | ||
309 | 0x00400db0, | ||
310 | 0x00400d34, | ||
311 | 0x00400d74, | ||
312 | 0x00400db4, | ||
313 | 0x00400d38, | ||
314 | 0x00400d78, | ||
315 | 0x00400db8, | ||
316 | 0x00400d3c, | ||
317 | 0x00400d7c, | ||
318 | 0x00400dbc, | ||
319 | 0x00400590, | ||
320 | 0x00400594, | ||
321 | 0x00400598, | ||
322 | 0x0040059c, | ||
323 | 0x004005a8, | ||
324 | 0x004005ac, | ||
325 | 0x004005b0, | ||
326 | 0x004005b4, | ||
327 | 0x004005c0, | ||
328 | 0x004005c4, | ||
329 | 0x004005c8, | ||
330 | 0x004005cc, | ||
331 | 0x004005d0, | ||
332 | 0x004005d4, | ||
333 | 0x004005d8, | ||
334 | 0x004005dc, | ||
335 | 0x004005e0, | ||
336 | NV04_PGRAPH_PASSTHRU_0, | ||
337 | NV04_PGRAPH_PASSTHRU_1, | ||
338 | NV04_PGRAPH_PASSTHRU_2, | ||
339 | NV04_PGRAPH_DVD_COLORFMT, | ||
340 | NV04_PGRAPH_SCALED_FORMAT, | ||
341 | NV04_PGRAPH_MISC24_0, | ||
342 | NV04_PGRAPH_MISC24_1, | ||
343 | NV04_PGRAPH_MISC24_2, | ||
344 | 0x00400500, | ||
345 | 0x00400504, | ||
346 | NV04_PGRAPH_VALID1, | ||
347 | NV04_PGRAPH_VALID2, | ||
348 | NV04_PGRAPH_DEBUG_3 | ||
349 | }; | ||
350 | |||
351 | struct graph_state { | ||
352 | uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; | ||
353 | }; | ||
354 | |||
355 | static struct nouveau_channel * | ||
356 | nv04_graph_channel(struct drm_device *dev) | ||
357 | { | ||
358 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
359 | int chid = dev_priv->engine.fifo.channels; | ||
360 | |||
361 | if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) | ||
362 | chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; | ||
363 | |||
364 | if (chid >= dev_priv->engine.fifo.channels) | ||
365 | return NULL; | ||
366 | |||
367 | return dev_priv->channels.ptr[chid]; | ||
368 | } | ||
369 | |||
370 | static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) | ||
371 | { | ||
372 | int i; | ||
373 | |||
374 | for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) { | ||
375 | if (nv04_graph_ctx_regs[i] == reg) | ||
376 | return &ctx->nv04[i]; | ||
377 | } | ||
378 | |||
379 | return NULL; | ||
380 | } | ||
381 | |||
382 | static int | ||
383 | nv04_graph_load_context(struct nouveau_channel *chan) | ||
384 | { | ||
385 | struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
386 | struct drm_device *dev = chan->dev; | ||
387 | uint32_t tmp; | ||
388 | int i; | ||
389 | |||
390 | for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) | ||
391 | nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); | ||
392 | |||
393 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); | ||
394 | |||
395 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | ||
396 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24); | ||
397 | |||
398 | tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); | ||
399 | nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); | ||
400 | |||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int | ||
405 | nv04_graph_unload_context(struct drm_device *dev) | ||
406 | { | ||
407 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
408 | struct nouveau_channel *chan = NULL; | ||
409 | struct graph_state *ctx; | ||
410 | uint32_t tmp; | ||
411 | int i; | ||
412 | |||
413 | chan = nv04_graph_channel(dev); | ||
414 | if (!chan) | ||
415 | return 0; | ||
416 | ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
417 | |||
418 | for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) | ||
419 | ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]); | ||
420 | |||
421 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); | ||
422 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | ||
423 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | ||
424 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static int | ||
429 | nv04_graph_context_new(struct nouveau_channel *chan, int engine) | ||
430 | { | ||
431 | struct graph_state *pgraph_ctx; | ||
432 | NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id); | ||
433 | |||
434 | pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); | ||
435 | if (pgraph_ctx == NULL) | ||
436 | return -ENOMEM; | ||
437 | |||
438 | *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; | ||
439 | |||
440 | chan->engctx[engine] = pgraph_ctx; | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void | ||
445 | nv04_graph_context_del(struct nouveau_channel *chan, int engine) | ||
446 | { | ||
447 | struct drm_device *dev = chan->dev; | ||
448 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
449 | struct graph_state *pgraph_ctx = chan->engctx[engine]; | ||
450 | unsigned long flags; | ||
451 | |||
452 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
453 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
454 | |||
455 | /* Unload the context if it's the currently active one */ | ||
456 | if (nv04_graph_channel(dev) == chan) | ||
457 | nv04_graph_unload_context(dev); | ||
458 | |||
459 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
460 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
461 | |||
462 | /* Free the context resources */ | ||
463 | kfree(pgraph_ctx); | ||
464 | chan->engctx[engine] = NULL; | ||
465 | } | ||
466 | |||
467 | int | ||
468 | nv04_graph_object_new(struct nouveau_channel *chan, int engine, | ||
469 | u32 handle, u16 class) | ||
470 | { | ||
471 | struct drm_device *dev = chan->dev; | ||
472 | struct nouveau_gpuobj *obj = NULL; | ||
473 | int ret; | ||
474 | |||
475 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
476 | if (ret) | ||
477 | return ret; | ||
478 | obj->engine = 1; | ||
479 | obj->class = class; | ||
480 | |||
481 | #ifdef __BIG_ENDIAN | ||
482 | nv_wo32(obj, 0x00, 0x00080000 | class); | ||
483 | #else | ||
484 | nv_wo32(obj, 0x00, class); | ||
485 | #endif | ||
486 | nv_wo32(obj, 0x04, 0x00000000); | ||
487 | nv_wo32(obj, 0x08, 0x00000000); | ||
488 | nv_wo32(obj, 0x0c, 0x00000000); | ||
489 | |||
490 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
491 | nouveau_gpuobj_ref(NULL, &obj); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | static int | ||
496 | nv04_graph_init(struct drm_device *dev, int engine) | ||
497 | { | ||
498 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
499 | uint32_t tmp; | ||
500 | |||
501 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | ||
502 | ~NV_PMC_ENABLE_PGRAPH); | ||
503 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | ||
504 | NV_PMC_ENABLE_PGRAPH); | ||
505 | |||
506 | /* Enable PGRAPH interrupts */ | ||
507 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); | ||
508 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | ||
509 | |||
510 | nv_wr32(dev, NV04_PGRAPH_VALID1, 0); | ||
511 | nv_wr32(dev, NV04_PGRAPH_VALID2, 0); | ||
512 | /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF); | ||
513 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ | ||
514 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000); | ||
515 | /*1231C000 blob, 001 haiku*/ | ||
516 | /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ | ||
517 | nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100); | ||
518 | /*0x72111100 blob , 01 haiku*/ | ||
519 | /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ | ||
520 | nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071); | ||
521 | /*haiku same*/ | ||
522 | |||
523 | /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ | ||
524 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); | ||
525 | /*haiku and blob 10d4*/ | ||
526 | |||
527 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); | ||
528 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); | ||
529 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | ||
530 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | ||
531 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | ||
532 | |||
533 | /* These don't belong here, they're part of a per-channel context */ | ||
534 | nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); | ||
535 | nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static int | ||
541 | nv04_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
542 | { | ||
543 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
544 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
545 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
546 | return -EBUSY; | ||
547 | } | ||
548 | nv04_graph_unload_context(dev); | ||
549 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static int | ||
554 | nv04_graph_mthd_set_ref(struct nouveau_channel *chan, | ||
555 | u32 class, u32 mthd, u32 data) | ||
556 | { | ||
557 | atomic_set(&chan->fence.last_sequence_irq, data); | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | int | ||
562 | nv04_graph_mthd_page_flip(struct nouveau_channel *chan, | ||
563 | u32 class, u32 mthd, u32 data) | ||
564 | { | ||
565 | struct drm_device *dev = chan->dev; | ||
566 | struct nouveau_page_flip_state s; | ||
567 | |||
568 | if (!nouveau_finish_page_flip(chan, &s)) | ||
569 | nv_set_crtc_base(dev, s.crtc, | ||
570 | s.offset + s.y * s.pitch + s.x * s.bpp / 8); | ||
571 | |||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Software methods, why they are needed, and how they all work: | ||
577 | * | ||
578 | * NV04 and NV05 keep most of the state in PGRAPH context itself, but some | ||
579 | * 2d engine settings are kept inside the grobjs themselves. The grobjs are | ||
580 | * 3 words long on both. grobj format on NV04 is: | ||
581 | * | ||
582 | * word 0: | ||
583 | * - bits 0-7: class | ||
584 | * - bit 12: color key active | ||
585 | * - bit 13: clip rect active | ||
586 | * - bit 14: if set, destination surface is swizzled and taken from buffer 5 | ||
587 | * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken | ||
588 | * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or | ||
589 | * NV03_CONTEXT_SURFACE_DST]. | ||
590 | * - bits 15-17: 2d operation [aka patch config] | ||
591 | * - bit 24: patch valid [enables rendering using this object] | ||
592 | * - bit 25: surf3d valid [for tex_tri and multitex_tri only] | ||
593 | * word 1: | ||
594 | * - bits 0-1: mono format | ||
595 | * - bits 8-13: color format | ||
596 | * - bits 16-31: DMA_NOTIFY instance | ||
597 | * word 2: | ||
598 | * - bits 0-15: DMA_A instance | ||
599 | * - bits 16-31: DMA_B instance | ||
600 | * | ||
601 | * On NV05 it's: | ||
602 | * | ||
603 | * word 0: | ||
604 | * - bits 0-7: class | ||
605 | * - bit 12: color key active | ||
606 | * - bit 13: clip rect active | ||
607 | * - bit 14: if set, destination surface is swizzled and taken from buffer 5 | ||
608 | * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken | ||
609 | * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or | ||
610 | * NV03_CONTEXT_SURFACE_DST]. | ||
611 | * - bits 15-17: 2d operation [aka patch config] | ||
612 | * - bits 20-22: dither mode | ||
613 | * - bit 24: patch valid [enables rendering using this object] | ||
614 | * - bit 25: surface_dst/surface_color/surf2d/surf3d valid | ||
615 | * - bit 26: surface_src/surface_zeta valid | ||
616 | * - bit 27: pattern valid | ||
617 | * - bit 28: rop valid | ||
618 | * - bit 29: beta1 valid | ||
619 | * - bit 30: beta4 valid | ||
620 | * word 1: | ||
621 | * - bits 0-1: mono format | ||
622 | * - bits 8-13: color format | ||
623 | * - bits 16-31: DMA_NOTIFY instance | ||
624 | * word 2: | ||
625 | * - bits 0-15: DMA_A instance | ||
626 | * - bits 16-31: DMA_B instance | ||
627 | * | ||
628 | * NV05 will set/unset the relevant valid bits when you poke the relevant | ||
629 | * object-binding methods with object of the proper type, or with the NULL | ||
630 | * type. It'll only allow rendering using the grobj if all needed objects | ||
631 | * are bound. The needed set of objects depends on selected operation: for | ||
632 | * example rop object is needed by ROP_AND, but not by SRCCOPY_AND. | ||
633 | * | ||
634 | * NV04 doesn't have these methods implemented at all, and doesn't have the | ||
635 | * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24 | ||
636 | * is set. So we have to emulate them in software, internally keeping the | ||
637 | * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04, | ||
638 | * but the last word isn't actually used for anything, we abuse it for this | ||
639 | * purpose. | ||
640 | * | ||
641 | * Actually, NV05 can optionally check bit 24 too, but we disable this since | ||
642 | * there's no use for it. | ||
643 | * | ||
644 | * For unknown reasons, NV04 implements surf3d binding in hardware as an | ||
645 | * exception. Also for unknown reasons, NV04 doesn't implement the clipping | ||
646 | * methods on the surf3d object, so we have to emulate them too. | ||
647 | */ | ||
648 | |||
649 | static void | ||
650 | nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) | ||
651 | { | ||
652 | struct drm_device *dev = chan->dev; | ||
653 | u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; | ||
654 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; | ||
655 | u32 tmp; | ||
656 | |||
657 | tmp = nv_ri32(dev, instance); | ||
658 | tmp &= ~mask; | ||
659 | tmp |= value; | ||
660 | |||
661 | nv_wi32(dev, instance, tmp); | ||
662 | nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); | ||
663 | nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); | ||
664 | } | ||
665 | |||
666 | static void | ||
667 | nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) | ||
668 | { | ||
669 | struct drm_device *dev = chan->dev; | ||
670 | u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; | ||
671 | u32 tmp, ctx1; | ||
672 | int class, op, valid = 1; | ||
673 | |||
674 | ctx1 = nv_ri32(dev, instance); | ||
675 | class = ctx1 & 0xff; | ||
676 | op = (ctx1 >> 15) & 7; | ||
677 | tmp = nv_ri32(dev, instance + 0xc); | ||
678 | tmp &= ~mask; | ||
679 | tmp |= value; | ||
680 | nv_wi32(dev, instance + 0xc, tmp); | ||
681 | |||
682 | /* check for valid surf2d/surf_dst/surf_color */ | ||
683 | if (!(tmp & 0x02000000)) | ||
684 | valid = 0; | ||
685 | /* check for valid surf_src/surf_zeta */ | ||
686 | if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000)) | ||
687 | valid = 0; | ||
688 | |||
689 | switch (op) { | ||
690 | /* SRCCOPY_AND, SRCCOPY: no extra objects required */ | ||
691 | case 0: | ||
692 | case 3: | ||
693 | break; | ||
694 | /* ROP_AND: requires pattern and rop */ | ||
695 | case 1: | ||
696 | if (!(tmp & 0x18000000)) | ||
697 | valid = 0; | ||
698 | break; | ||
699 | /* BLEND_AND: requires beta1 */ | ||
700 | case 2: | ||
701 | if (!(tmp & 0x20000000)) | ||
702 | valid = 0; | ||
703 | break; | ||
704 | /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */ | ||
705 | case 4: | ||
706 | case 5: | ||
707 | if (!(tmp & 0x40000000)) | ||
708 | valid = 0; | ||
709 | break; | ||
710 | } | ||
711 | |||
712 | nv04_graph_set_ctx1(chan, 0x01000000, valid << 24); | ||
713 | } | ||
714 | |||
715 | static int | ||
716 | nv04_graph_mthd_set_operation(struct nouveau_channel *chan, | ||
717 | u32 class, u32 mthd, u32 data) | ||
718 | { | ||
719 | if (data > 5) | ||
720 | return 1; | ||
721 | /* Old versions of the objects only accept first three operations. */ | ||
722 | if (data > 2 && class < 0x40) | ||
723 | return 1; | ||
724 | nv04_graph_set_ctx1(chan, 0x00038000, data << 15); | ||
725 | /* changing operation changes set of objects needed for validation */ | ||
726 | nv04_graph_set_ctx_val(chan, 0, 0); | ||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | static int | ||
731 | nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, | ||
732 | u32 class, u32 mthd, u32 data) | ||
733 | { | ||
734 | uint32_t min = data & 0xffff, max; | ||
735 | uint32_t w = data >> 16; | ||
736 | if (min & 0x8000) | ||
737 | /* too large */ | ||
738 | return 1; | ||
739 | if (w & 0x8000) | ||
740 | /* yes, it accepts negative for some reason. */ | ||
741 | w |= 0xffff0000; | ||
742 | max = min + w; | ||
743 | max &= 0x3ffff; | ||
744 | nv_wr32(chan->dev, 0x40053c, min); | ||
745 | nv_wr32(chan->dev, 0x400544, max); | ||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static int | ||
750 | nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, | ||
751 | u32 class, u32 mthd, u32 data) | ||
752 | { | ||
753 | uint32_t min = data & 0xffff, max; | ||
754 | uint32_t w = data >> 16; | ||
755 | if (min & 0x8000) | ||
756 | /* too large */ | ||
757 | return 1; | ||
758 | if (w & 0x8000) | ||
759 | /* yes, it accepts negative for some reason. */ | ||
760 | w |= 0xffff0000; | ||
761 | max = min + w; | ||
762 | max &= 0x3ffff; | ||
763 | nv_wr32(chan->dev, 0x400540, min); | ||
764 | nv_wr32(chan->dev, 0x400548, max); | ||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static int | ||
769 | nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, | ||
770 | u32 class, u32 mthd, u32 data) | ||
771 | { | ||
772 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
773 | case 0x30: | ||
774 | nv04_graph_set_ctx1(chan, 0x00004000, 0); | ||
775 | nv04_graph_set_ctx_val(chan, 0x02000000, 0); | ||
776 | return 0; | ||
777 | case 0x42: | ||
778 | nv04_graph_set_ctx1(chan, 0x00004000, 0); | ||
779 | nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); | ||
780 | return 0; | ||
781 | } | ||
782 | return 1; | ||
783 | } | ||
784 | |||
785 | static int | ||
786 | nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, | ||
787 | u32 class, u32 mthd, u32 data) | ||
788 | { | ||
789 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
790 | case 0x30: | ||
791 | nv04_graph_set_ctx1(chan, 0x00004000, 0); | ||
792 | nv04_graph_set_ctx_val(chan, 0x02000000, 0); | ||
793 | return 0; | ||
794 | case 0x42: | ||
795 | nv04_graph_set_ctx1(chan, 0x00004000, 0); | ||
796 | nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); | ||
797 | return 0; | ||
798 | case 0x52: | ||
799 | nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000); | ||
800 | nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); | ||
801 | return 0; | ||
802 | } | ||
803 | return 1; | ||
804 | } | ||
805 | |||
806 | static int | ||
807 | nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, | ||
808 | u32 class, u32 mthd, u32 data) | ||
809 | { | ||
810 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
811 | case 0x30: | ||
812 | nv04_graph_set_ctx_val(chan, 0x08000000, 0); | ||
813 | return 0; | ||
814 | case 0x18: | ||
815 | nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); | ||
816 | return 0; | ||
817 | } | ||
818 | return 1; | ||
819 | } | ||
820 | |||
821 | static int | ||
822 | nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, | ||
823 | u32 class, u32 mthd, u32 data) | ||
824 | { | ||
825 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
826 | case 0x30: | ||
827 | nv04_graph_set_ctx_val(chan, 0x08000000, 0); | ||
828 | return 0; | ||
829 | case 0x44: | ||
830 | nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); | ||
831 | return 0; | ||
832 | } | ||
833 | return 1; | ||
834 | } | ||
835 | |||
836 | static int | ||
837 | nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, | ||
838 | u32 class, u32 mthd, u32 data) | ||
839 | { | ||
840 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
841 | case 0x30: | ||
842 | nv04_graph_set_ctx_val(chan, 0x10000000, 0); | ||
843 | return 0; | ||
844 | case 0x43: | ||
845 | nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000); | ||
846 | return 0; | ||
847 | } | ||
848 | return 1; | ||
849 | } | ||
850 | |||
851 | static int | ||
852 | nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, | ||
853 | u32 class, u32 mthd, u32 data) | ||
854 | { | ||
855 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
856 | case 0x30: | ||
857 | nv04_graph_set_ctx_val(chan, 0x20000000, 0); | ||
858 | return 0; | ||
859 | case 0x12: | ||
860 | nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000); | ||
861 | return 0; | ||
862 | } | ||
863 | return 1; | ||
864 | } | ||
865 | |||
866 | static int | ||
867 | nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, | ||
868 | u32 class, u32 mthd, u32 data) | ||
869 | { | ||
870 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
871 | case 0x30: | ||
872 | nv04_graph_set_ctx_val(chan, 0x40000000, 0); | ||
873 | return 0; | ||
874 | case 0x72: | ||
875 | nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000); | ||
876 | return 0; | ||
877 | } | ||
878 | return 1; | ||
879 | } | ||
880 | |||
881 | static int | ||
882 | nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, | ||
883 | u32 class, u32 mthd, u32 data) | ||
884 | { | ||
885 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
886 | case 0x30: | ||
887 | nv04_graph_set_ctx_val(chan, 0x02000000, 0); | ||
888 | return 0; | ||
889 | case 0x58: | ||
890 | nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); | ||
891 | return 0; | ||
892 | } | ||
893 | return 1; | ||
894 | } | ||
895 | |||
896 | static int | ||
897 | nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, | ||
898 | u32 class, u32 mthd, u32 data) | ||
899 | { | ||
900 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
901 | case 0x30: | ||
902 | nv04_graph_set_ctx_val(chan, 0x04000000, 0); | ||
903 | return 0; | ||
904 | case 0x59: | ||
905 | nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); | ||
906 | return 0; | ||
907 | } | ||
908 | return 1; | ||
909 | } | ||
910 | |||
911 | static int | ||
912 | nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, | ||
913 | u32 class, u32 mthd, u32 data) | ||
914 | { | ||
915 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
916 | case 0x30: | ||
917 | nv04_graph_set_ctx_val(chan, 0x02000000, 0); | ||
918 | return 0; | ||
919 | case 0x5a: | ||
920 | nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); | ||
921 | return 0; | ||
922 | } | ||
923 | return 1; | ||
924 | } | ||
925 | |||
926 | static int | ||
927 | nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, | ||
928 | u32 class, u32 mthd, u32 data) | ||
929 | { | ||
930 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
931 | case 0x30: | ||
932 | nv04_graph_set_ctx_val(chan, 0x04000000, 0); | ||
933 | return 0; | ||
934 | case 0x5b: | ||
935 | nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); | ||
936 | return 0; | ||
937 | } | ||
938 | return 1; | ||
939 | } | ||
940 | |||
941 | static int | ||
942 | nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, | ||
943 | u32 class, u32 mthd, u32 data) | ||
944 | { | ||
945 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
946 | case 0x30: | ||
947 | nv04_graph_set_ctx1(chan, 0x2000, 0); | ||
948 | return 0; | ||
949 | case 0x19: | ||
950 | nv04_graph_set_ctx1(chan, 0x2000, 0x2000); | ||
951 | return 0; | ||
952 | } | ||
953 | return 1; | ||
954 | } | ||
955 | |||
956 | static int | ||
957 | nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, | ||
958 | u32 class, u32 mthd, u32 data) | ||
959 | { | ||
960 | switch (nv_ri32(chan->dev, data << 4) & 0xff) { | ||
961 | case 0x30: | ||
962 | nv04_graph_set_ctx1(chan, 0x1000, 0); | ||
963 | return 0; | ||
964 | /* Yes, for some reason even the old versions of objects | ||
965 | * accept 0x57 and not 0x17. Consistency be damned. | ||
966 | */ | ||
967 | case 0x57: | ||
968 | nv04_graph_set_ctx1(chan, 0x1000, 0x1000); | ||
969 | return 0; | ||
970 | } | ||
971 | return 1; | ||
972 | } | ||
973 | |||
974 | static struct nouveau_bitfield nv04_graph_intr[] = { | ||
975 | { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, | ||
976 | {} | ||
977 | }; | ||
978 | |||
979 | static struct nouveau_bitfield nv04_graph_nstatus[] = { | ||
980 | { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, | ||
981 | { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, | ||
982 | { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, | ||
983 | { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, | ||
984 | {} | ||
985 | }; | ||
986 | |||
987 | struct nouveau_bitfield nv04_graph_nsource[] = { | ||
988 | { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, | ||
989 | { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, | ||
990 | { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, | ||
991 | { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, | ||
992 | { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, | ||
993 | { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, | ||
994 | { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, | ||
995 | { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, | ||
996 | { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, | ||
997 | { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, | ||
998 | { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, | ||
999 | { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, | ||
1000 | { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, | ||
1001 | { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, | ||
1002 | { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, | ||
1003 | { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, | ||
1004 | { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, | ||
1005 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, | ||
1006 | { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, | ||
1007 | {} | ||
1008 | }; | ||
1009 | |||
1010 | static void | ||
1011 | nv04_graph_context_switch(struct drm_device *dev) | ||
1012 | { | ||
1013 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1014 | struct nouveau_channel *chan = NULL; | ||
1015 | int chid; | ||
1016 | |||
1017 | nouveau_wait_for_idle(dev); | ||
1018 | |||
1019 | /* If previous context is valid, we need to save it */ | ||
1020 | nv04_graph_unload_context(dev); | ||
1021 | |||
1022 | /* Load context for next channel */ | ||
1023 | chid = dev_priv->engine.fifo.channel_id(dev); | ||
1024 | chan = dev_priv->channels.ptr[chid]; | ||
1025 | if (chan) | ||
1026 | nv04_graph_load_context(chan); | ||
1027 | } | ||
1028 | |||
1029 | static void | ||
1030 | nv04_graph_isr(struct drm_device *dev) | ||
1031 | { | ||
1032 | u32 stat; | ||
1033 | |||
1034 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1035 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
1036 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
1037 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
1038 | u32 chid = (addr & 0x0f000000) >> 24; | ||
1039 | u32 subc = (addr & 0x0000e000) >> 13; | ||
1040 | u32 mthd = (addr & 0x00001ffc); | ||
1041 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
1042 | u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; | ||
1043 | u32 show = stat; | ||
1044 | |||
1045 | if (stat & NV_PGRAPH_INTR_NOTIFY) { | ||
1046 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
1047 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
1048 | show &= ~NV_PGRAPH_INTR_NOTIFY; | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
1053 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1054 | stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1055 | show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1056 | nv04_graph_context_switch(dev); | ||
1057 | } | ||
1058 | |||
1059 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
1060 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
1061 | |||
1062 | if (show && nouveau_ratelimit()) { | ||
1063 | NV_INFO(dev, "PGRAPH -"); | ||
1064 | nouveau_bitfield_print(nv04_graph_intr, show); | ||
1065 | printk(" nsource:"); | ||
1066 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
1067 | printk(" nstatus:"); | ||
1068 | nouveau_bitfield_print(nv04_graph_nstatus, nstatus); | ||
1069 | printk("\n"); | ||
1070 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
1071 | "mthd 0x%04x data 0x%08x\n", | ||
1072 | chid, subc, class, mthd, data); | ||
1073 | } | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | static void | ||
1078 | nv04_graph_destroy(struct drm_device *dev, int engine) | ||
1079 | { | ||
1080 | struct nv04_graph_engine *pgraph = nv_engine(dev, engine); | ||
1081 | |||
1082 | nouveau_irq_unregister(dev, 12); | ||
1083 | |||
1084 | NVOBJ_ENGINE_DEL(dev, GR); | ||
1085 | kfree(pgraph); | ||
1086 | } | ||
1087 | |||
1088 | int | ||
1089 | nv04_graph_create(struct drm_device *dev) | ||
1090 | { | ||
1091 | struct nv04_graph_engine *pgraph; | ||
1092 | |||
1093 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
1094 | if (!pgraph) | ||
1095 | return -ENOMEM; | ||
1096 | |||
1097 | pgraph->base.destroy = nv04_graph_destroy; | ||
1098 | pgraph->base.init = nv04_graph_init; | ||
1099 | pgraph->base.fini = nv04_graph_fini; | ||
1100 | pgraph->base.context_new = nv04_graph_context_new; | ||
1101 | pgraph->base.context_del = nv04_graph_context_del; | ||
1102 | pgraph->base.object_new = nv04_graph_object_new; | ||
1103 | |||
1104 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
1105 | nouveau_irq_register(dev, 12, nv04_graph_isr); | ||
1106 | |||
1107 | /* dvd subpicture */ | ||
1108 | NVOBJ_CLASS(dev, 0x0038, GR); | ||
1109 | |||
1110 | /* m2mf */ | ||
1111 | NVOBJ_CLASS(dev, 0x0039, GR); | ||
1112 | |||
1113 | /* nv03 gdirect */ | ||
1114 | NVOBJ_CLASS(dev, 0x004b, GR); | ||
1115 | NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt); | ||
1116 | NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop); | ||
1117 | NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1); | ||
1118 | NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst); | ||
1119 | NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation); | ||
1120 | |||
1121 | /* nv04 gdirect */ | ||
1122 | NVOBJ_CLASS(dev, 0x004a, GR); | ||
1123 | NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1124 | NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop); | ||
1125 | NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1126 | NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1127 | NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1128 | NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation); | ||
1129 | |||
1130 | /* nv01 imageblit */ | ||
1131 | NVOBJ_CLASS(dev, 0x001f, GR); | ||
1132 | NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1133 | NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip); | ||
1134 | NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt); | ||
1135 | NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop); | ||
1136 | NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1137 | NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst); | ||
1138 | NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src); | ||
1139 | NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation); | ||
1140 | |||
1141 | /* nv04 imageblit */ | ||
1142 | NVOBJ_CLASS(dev, 0x005f, GR); | ||
1143 | NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1144 | NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip); | ||
1145 | NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt); | ||
1146 | NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop); | ||
1147 | NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1148 | NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4); | ||
1149 | NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d); | ||
1150 | NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation); | ||
1151 | |||
1152 | /* nv04 iifc */ | ||
1153 | NVOBJ_CLASS(dev, 0x0060, GR); | ||
1154 | NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma); | ||
1155 | NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip); | ||
1156 | NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt); | ||
1157 | NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop); | ||
1158 | NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1); | ||
1159 | NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4); | ||
1160 | NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf); | ||
1161 | NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation); | ||
1162 | |||
1163 | /* nv05 iifc */ | ||
1164 | NVOBJ_CLASS(dev, 0x0064, GR); | ||
1165 | |||
1166 | /* nv01 ifc */ | ||
1167 | NVOBJ_CLASS(dev, 0x0021, GR); | ||
1168 | NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1169 | NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip); | ||
1170 | NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt); | ||
1171 | NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop); | ||
1172 | NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1173 | NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst); | ||
1174 | NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation); | ||
1175 | |||
1176 | /* nv04 ifc */ | ||
1177 | NVOBJ_CLASS(dev, 0x0061, GR); | ||
1178 | NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1179 | NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip); | ||
1180 | NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt); | ||
1181 | NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop); | ||
1182 | NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1); | ||
1183 | NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4); | ||
1184 | NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d); | ||
1185 | NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation); | ||
1186 | |||
1187 | /* nv05 ifc */ | ||
1188 | NVOBJ_CLASS(dev, 0x0065, GR); | ||
1189 | |||
1190 | /* nv03 sifc */ | ||
1191 | NVOBJ_CLASS(dev, 0x0036, GR); | ||
1192 | NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1193 | NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1194 | NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop); | ||
1195 | NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1196 | NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1197 | NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation); | ||
1198 | |||
1199 | /* nv04 sifc */ | ||
1200 | NVOBJ_CLASS(dev, 0x0076, GR); | ||
1201 | NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma); | ||
1202 | NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1203 | NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop); | ||
1204 | NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1205 | NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1206 | NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1207 | NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation); | ||
1208 | |||
1209 | /* nv05 sifc */ | ||
1210 | NVOBJ_CLASS(dev, 0x0066, GR); | ||
1211 | |||
1212 | /* nv03 sifm */ | ||
1213 | NVOBJ_CLASS(dev, 0x0037, GR); | ||
1214 | NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1215 | NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop); | ||
1216 | NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1217 | NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1218 | NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation); | ||
1219 | |||
1220 | /* nv04 sifm */ | ||
1221 | NVOBJ_CLASS(dev, 0x0077, GR); | ||
1222 | NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1223 | NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop); | ||
1224 | NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1225 | NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1226 | NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf); | ||
1227 | NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation); | ||
1228 | |||
1229 | /* null */ | ||
1230 | NVOBJ_CLASS(dev, 0x0030, GR); | ||
1231 | |||
1232 | /* surf2d */ | ||
1233 | NVOBJ_CLASS(dev, 0x0042, GR); | ||
1234 | |||
1235 | /* rop */ | ||
1236 | NVOBJ_CLASS(dev, 0x0043, GR); | ||
1237 | |||
1238 | /* beta1 */ | ||
1239 | NVOBJ_CLASS(dev, 0x0012, GR); | ||
1240 | |||
1241 | /* beta4 */ | ||
1242 | NVOBJ_CLASS(dev, 0x0072, GR); | ||
1243 | |||
1244 | /* cliprect */ | ||
1245 | NVOBJ_CLASS(dev, 0x0019, GR); | ||
1246 | |||
1247 | /* nv01 pattern */ | ||
1248 | NVOBJ_CLASS(dev, 0x0018, GR); | ||
1249 | |||
1250 | /* nv04 pattern */ | ||
1251 | NVOBJ_CLASS(dev, 0x0044, GR); | ||
1252 | |||
1253 | /* swzsurf */ | ||
1254 | NVOBJ_CLASS(dev, 0x0052, GR); | ||
1255 | |||
1256 | /* surf3d */ | ||
1257 | NVOBJ_CLASS(dev, 0x0053, GR); | ||
1258 | NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h); | ||
1259 | NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v); | ||
1260 | |||
1261 | /* nv03 tex_tri */ | ||
1262 | NVOBJ_CLASS(dev, 0x0048, GR); | ||
1263 | NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip); | ||
1264 | NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color); | ||
1265 | NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta); | ||
1266 | |||
1267 | /* tex_tri */ | ||
1268 | NVOBJ_CLASS(dev, 0x0054, GR); | ||
1269 | |||
1270 | /* multitex_tri */ | ||
1271 | NVOBJ_CLASS(dev, 0x0055, GR); | ||
1272 | |||
1273 | /* nv01 chroma */ | ||
1274 | NVOBJ_CLASS(dev, 0x0017, GR); | ||
1275 | |||
1276 | /* nv04 chroma */ | ||
1277 | NVOBJ_CLASS(dev, 0x0057, GR); | ||
1278 | |||
1279 | /* surf_dst */ | ||
1280 | NVOBJ_CLASS(dev, 0x0058, GR); | ||
1281 | |||
1282 | /* surf_src */ | ||
1283 | NVOBJ_CLASS(dev, 0x0059, GR); | ||
1284 | |||
1285 | /* surf_color */ | ||
1286 | NVOBJ_CLASS(dev, 0x005a, GR); | ||
1287 | |||
1288 | /* surf_zeta */ | ||
1289 | NVOBJ_CLASS(dev, 0x005b, GR); | ||
1290 | |||
1291 | /* nv01 line */ | ||
1292 | NVOBJ_CLASS(dev, 0x001c, GR); | ||
1293 | NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip); | ||
1294 | NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1295 | NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop); | ||
1296 | NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1297 | NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1298 | NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation); | ||
1299 | |||
1300 | /* nv04 line */ | ||
1301 | NVOBJ_CLASS(dev, 0x005c, GR); | ||
1302 | NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip); | ||
1303 | NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1304 | NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop); | ||
1305 | NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1306 | NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1307 | NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1308 | NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation); | ||
1309 | |||
1310 | /* nv01 tri */ | ||
1311 | NVOBJ_CLASS(dev, 0x001d, GR); | ||
1312 | NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip); | ||
1313 | NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1314 | NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop); | ||
1315 | NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1316 | NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1317 | NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation); | ||
1318 | |||
1319 | /* nv04 tri */ | ||
1320 | NVOBJ_CLASS(dev, 0x005d, GR); | ||
1321 | NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip); | ||
1322 | NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1323 | NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop); | ||
1324 | NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1325 | NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1326 | NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1327 | NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation); | ||
1328 | |||
1329 | /* nv01 rect */ | ||
1330 | NVOBJ_CLASS(dev, 0x001e, GR); | ||
1331 | NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip); | ||
1332 | NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt); | ||
1333 | NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop); | ||
1334 | NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1335 | NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst); | ||
1336 | NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation); | ||
1337 | |||
1338 | /* nv04 rect */ | ||
1339 | NVOBJ_CLASS(dev, 0x005e, GR); | ||
1340 | NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip); | ||
1341 | NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt); | ||
1342 | NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop); | ||
1343 | NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1); | ||
1344 | NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4); | ||
1345 | NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); | ||
1346 | NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); | ||
1347 | |||
1348 | /* nvsw */ | ||
1349 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
1350 | NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); | ||
1351 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
1352 | return 0; | ||
1353 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c new file mode 100644 index 00000000000..c1248e0740a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -0,0 +1,192 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_ramht.h" | ||
5 | |||
6 | /* returns the size of fifo context */ | ||
7 | static int | ||
8 | nouveau_fifo_ctx_size(struct drm_device *dev) | ||
9 | { | ||
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
11 | |||
12 | if (dev_priv->chipset >= 0x40) | ||
13 | return 128; | ||
14 | else | ||
15 | if (dev_priv->chipset >= 0x17) | ||
16 | return 64; | ||
17 | |||
18 | return 32; | ||
19 | } | ||
20 | |||
21 | int nv04_instmem_init(struct drm_device *dev) | ||
22 | { | ||
23 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
24 | struct nouveau_gpuobj *ramht = NULL; | ||
25 | u32 offset, length; | ||
26 | int ret; | ||
27 | |||
28 | /* RAMIN always available */ | ||
29 | dev_priv->ramin_available = true; | ||
30 | |||
31 | /* Reserve space at end of VRAM for PRAMIN */ | ||
32 | if (dev_priv->card_type >= NV_40) { | ||
33 | u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); | ||
34 | u32 rsvd; | ||
35 | |||
36 | /* estimate grctx size, the magics come from nv40_grctx.c */ | ||
37 | if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; | ||
38 | else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; | ||
39 | else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; | ||
40 | else rsvd = 0x4a40 * vs; | ||
41 | rsvd += 16 * 1024; | ||
42 | rsvd *= dev_priv->engine.fifo.channels; | ||
43 | |||
44 | /* pciegart table */ | ||
45 | if (pci_is_pcie(dev->pdev)) | ||
46 | rsvd += 512 * 1024; | ||
47 | |||
48 | /* object storage */ | ||
49 | rsvd += 512 * 1024; | ||
50 | |||
51 | dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); | ||
52 | } else { | ||
53 | dev_priv->ramin_rsvd_vram = 512 * 1024; | ||
54 | } | ||
55 | |||
56 | /* Setup shared RAMHT */ | ||
57 | ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, | ||
58 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
59 | if (ret) | ||
60 | return ret; | ||
61 | |||
62 | ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); | ||
63 | nouveau_gpuobj_ref(NULL, &ramht); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | /* And RAMRO */ | ||
68 | ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512, | ||
69 | NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro); | ||
70 | if (ret) | ||
71 | return ret; | ||
72 | |||
73 | /* And RAMFC */ | ||
74 | length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); | ||
75 | switch (dev_priv->card_type) { | ||
76 | case NV_40: | ||
77 | offset = 0x20000; | ||
78 | break; | ||
79 | default: | ||
80 | offset = 0x11400; | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length, | ||
85 | NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc); | ||
86 | if (ret) | ||
87 | return ret; | ||
88 | |||
89 | /* Only allow space after RAMFC to be used for object allocation */ | ||
90 | offset += length; | ||
91 | |||
92 | /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 | ||
93 | * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 | ||
94 | * ("new style" control) the upper 16-bits of 0x2220 points at this | ||
95 | * other mysterious table that's clobbering important things. | ||
96 | * | ||
97 | * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting | ||
98 | * smashed to pieces on us, so reserve 0x30000-0x40000 too.. | ||
99 | */ | ||
100 | if (dev_priv->card_type >= NV_40) { | ||
101 | if (offset < 0x40000) | ||
102 | offset = 0x40000; | ||
103 | } | ||
104 | |||
105 | ret = drm_mm_init(&dev_priv->ramin_heap, offset, | ||
106 | dev_priv->ramin_rsvd_vram - offset); | ||
107 | if (ret) { | ||
108 | NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | void | ||
116 | nv04_instmem_takedown(struct drm_device *dev) | ||
117 | { | ||
118 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
119 | |||
120 | nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); | ||
121 | nouveau_gpuobj_ref(NULL, &dev_priv->ramro); | ||
122 | nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); | ||
123 | |||
124 | if (drm_mm_initialized(&dev_priv->ramin_heap)) | ||
125 | drm_mm_takedown(&dev_priv->ramin_heap); | ||
126 | } | ||
127 | |||
128 | int | ||
129 | nv04_instmem_suspend(struct drm_device *dev) | ||
130 | { | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | void | ||
135 | nv04_instmem_resume(struct drm_device *dev) | ||
136 | { | ||
137 | } | ||
138 | |||
139 | int | ||
140 | nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, | ||
141 | u32 size, u32 align) | ||
142 | { | ||
143 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
144 | struct drm_mm_node *ramin = NULL; | ||
145 | |||
146 | do { | ||
147 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | spin_lock(&dev_priv->ramin_lock); | ||
151 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); | ||
152 | if (ramin == NULL) { | ||
153 | spin_unlock(&dev_priv->ramin_lock); | ||
154 | return -ENOMEM; | ||
155 | } | ||
156 | |||
157 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
158 | spin_unlock(&dev_priv->ramin_lock); | ||
159 | } while (ramin == NULL); | ||
160 | |||
161 | gpuobj->node = ramin; | ||
162 | gpuobj->vinst = ramin->start; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | void | ||
167 | nv04_instmem_put(struct nouveau_gpuobj *gpuobj) | ||
168 | { | ||
169 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
170 | |||
171 | spin_lock(&dev_priv->ramin_lock); | ||
172 | drm_mm_put_block(gpuobj->node); | ||
173 | gpuobj->node = NULL; | ||
174 | spin_unlock(&dev_priv->ramin_lock); | ||
175 | } | ||
176 | |||
177 | int | ||
178 | nv04_instmem_map(struct nouveau_gpuobj *gpuobj) | ||
179 | { | ||
180 | gpuobj->pinst = gpuobj->vinst; | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | void | ||
185 | nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) | ||
186 | { | ||
187 | } | ||
188 | |||
189 | void | ||
190 | nv04_instmem_flush(struct drm_device *dev) | ||
191 | { | ||
192 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c new file mode 100644 index 00000000000..2af43a1cb2e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_mc.c | |||
@@ -0,0 +1,24 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv04_mc_init(struct drm_device *dev) | ||
8 | { | ||
9 | /* Power up everything, resetting each individual unit will | ||
10 | * be done later if needed. | ||
11 | */ | ||
12 | |||
13 | nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); | ||
14 | |||
15 | /* Disable PROM access. */ | ||
16 | nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); | ||
17 | |||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | void | ||
22 | nv04_mc_takedown(struct drm_device *dev) | ||
23 | { | ||
24 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c new file mode 100644 index 00000000000..1d09ddd5739 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_timer.c | |||
@@ -0,0 +1,51 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv04_timer_init(struct drm_device *dev) | ||
8 | { | ||
9 | nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); | ||
10 | nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); | ||
11 | |||
12 | /* Just use the pre-existing values when possible for now; these regs | ||
13 | * are not written in nv (driver writer missed a /4 on the address), and | ||
14 | * writing 8 and 3 to the correct regs breaks the timings on the LVDS | ||
15 | * hardware sequencing microcode. | ||
16 | * A correct solution (involving calculations with the GPU PLL) can | ||
17 | * be done when kernel modesetting lands | ||
18 | */ | ||
19 | if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || | ||
20 | !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { | ||
21 | nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008); | ||
22 | nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003); | ||
23 | } | ||
24 | |||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | uint64_t | ||
29 | nv04_timer_read(struct drm_device *dev) | ||
30 | { | ||
31 | uint32_t low; | ||
32 | /* From kmmio dumps on nv28 this looks like how the blob does this. | ||
33 | * It reads the high dword twice, before and after. | ||
34 | * The only explanation seems to be that the 64-bit timer counter | ||
35 | * advances between high and low dword reads and may corrupt the | ||
36 | * result. Not confirmed. | ||
37 | */ | ||
38 | uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); | ||
39 | uint32_t high1; | ||
40 | do { | ||
41 | high1 = high2; | ||
42 | low = nv_rd32(dev, NV04_PTIMER_TIME_0); | ||
43 | high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); | ||
44 | } while (high1 != high2); | ||
45 | return (((uint64_t)high2) << 32) | (uint64_t)low; | ||
46 | } | ||
47 | |||
48 | void | ||
49 | nv04_timer_takedown(struct drm_device *dev) | ||
50 | { | ||
51 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c new file mode 100644 index 00000000000..f78181a59b4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fb.c | |||
@@ -0,0 +1,144 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | static struct drm_mm_node * | ||
7 | nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
11 | struct drm_mm_node *mem; | ||
12 | int ret; | ||
13 | |||
14 | ret = drm_mm_pre_get(&pfb->tag_heap); | ||
15 | if (ret) | ||
16 | return NULL; | ||
17 | |||
18 | spin_lock(&dev_priv->tile.lock); | ||
19 | mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); | ||
20 | if (mem) | ||
21 | mem = drm_mm_get_block_atomic(mem, size, 0); | ||
22 | spin_unlock(&dev_priv->tile.lock); | ||
23 | |||
24 | return mem; | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) | ||
29 | { | ||
30 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
31 | |||
32 | spin_lock(&dev_priv->tile.lock); | ||
33 | drm_mm_put_block(mem); | ||
34 | spin_unlock(&dev_priv->tile.lock); | ||
35 | } | ||
36 | |||
37 | void | ||
38 | nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | ||
39 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
40 | { | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
43 | int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); | ||
44 | |||
45 | tile->addr = addr; | ||
46 | tile->limit = max(1u, addr + size) - 1; | ||
47 | tile->pitch = pitch; | ||
48 | |||
49 | if (dev_priv->card_type == NV_20) { | ||
50 | if (flags & NOUVEAU_GEM_TILE_ZETA) { | ||
51 | /* | ||
52 | * Allocate some of the on-die tag memory, | ||
53 | * used to store Z compression meta-data (most | ||
54 | * likely just a bitmap determining if a given | ||
55 | * tile is compressed or not). | ||
56 | */ | ||
57 | tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); | ||
58 | |||
59 | if (tile->tag_mem) { | ||
60 | /* Enable Z compression */ | ||
61 | if (dev_priv->chipset >= 0x25) | ||
62 | tile->zcomp = tile->tag_mem->start | | ||
63 | (bpp == 16 ? | ||
64 | NV25_PFB_ZCOMP_MODE_16 : | ||
65 | NV25_PFB_ZCOMP_MODE_32); | ||
66 | else | ||
67 | tile->zcomp = tile->tag_mem->start | | ||
68 | NV20_PFB_ZCOMP_EN | | ||
69 | (bpp == 16 ? 0 : | ||
70 | NV20_PFB_ZCOMP_MODE_32); | ||
71 | } | ||
72 | |||
73 | tile->addr |= 3; | ||
74 | } else { | ||
75 | tile->addr |= 1; | ||
76 | } | ||
77 | |||
78 | } else { | ||
79 | tile->addr |= 1 << 31; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | void | ||
84 | nv10_fb_free_tile_region(struct drm_device *dev, int i) | ||
85 | { | ||
86 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
87 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
88 | |||
89 | if (tile->tag_mem) { | ||
90 | nv20_fb_free_tag(dev, tile->tag_mem); | ||
91 | tile->tag_mem = NULL; | ||
92 | } | ||
93 | |||
94 | tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; | ||
95 | } | ||
96 | |||
97 | void | ||
98 | nv10_fb_set_tile_region(struct drm_device *dev, int i) | ||
99 | { | ||
100 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
101 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
102 | |||
103 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); | ||
104 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); | ||
105 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); | ||
106 | |||
107 | if (dev_priv->card_type == NV_20) | ||
108 | nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); | ||
109 | } | ||
110 | |||
111 | int | ||
112 | nv10_fb_init(struct drm_device *dev) | ||
113 | { | ||
114 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
115 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
116 | int i; | ||
117 | |||
118 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
119 | |||
120 | if (dev_priv->card_type == NV_20) | ||
121 | drm_mm_init(&pfb->tag_heap, 0, | ||
122 | (dev_priv->chipset >= 0x25 ? | ||
123 | 64 * 1024 : 32 * 1024)); | ||
124 | |||
125 | /* Turn all the tiling regions off. */ | ||
126 | for (i = 0; i < pfb->num_tiles; i++) | ||
127 | pfb->set_tile_region(dev, i); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | void | ||
133 | nv10_fb_takedown(struct drm_device *dev) | ||
134 | { | ||
135 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
136 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
137 | int i; | ||
138 | |||
139 | for (i = 0; i < pfb->num_tiles; i++) | ||
140 | pfb->free_tile_region(dev, i); | ||
141 | |||
142 | if (dev_priv->card_type == NV_20) | ||
143 | drm_mm_takedown(&pfb->tag_heap); | ||
144 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c new file mode 100644 index 00000000000..d2ecbff4bee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | |||
32 | #define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) | ||
33 | #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) | ||
34 | |||
35 | int | ||
36 | nv10_fifo_channel_id(struct drm_device *dev) | ||
37 | { | ||
38 | return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & | ||
39 | NV10_PFIFO_CACHE1_PUSH1_CHID_MASK; | ||
40 | } | ||
41 | |||
42 | int | ||
43 | nv10_fifo_create_context(struct nouveau_channel *chan) | ||
44 | { | ||
45 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
46 | struct drm_device *dev = chan->dev; | ||
47 | uint32_t fc = NV10_RAMFC(chan->id); | ||
48 | int ret; | ||
49 | |||
50 | ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, | ||
51 | NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | ||
52 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | ||
53 | if (ret) | ||
54 | return ret; | ||
55 | |||
56 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
57 | NV03_USER(chan->id), PAGE_SIZE); | ||
58 | if (!chan->user) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | /* Fill entries that are seen filled in dumps of nvidia driver just | ||
62 | * after channel's is put into DMA mode | ||
63 | */ | ||
64 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | ||
65 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | ||
66 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); | ||
67 | nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
68 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
69 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | ||
70 | #ifdef __BIG_ENDIAN | ||
71 | NV_PFIFO_CACHE1_BIG_ENDIAN | | ||
72 | #endif | ||
73 | 0); | ||
74 | |||
75 | /* enable the fifo dma operation */ | ||
76 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
77 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | nv10_fifo_do_load_context(struct drm_device *dev, int chid) | ||
83 | { | ||
84 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
85 | uint32_t fc = NV10_RAMFC(chid), tmp; | ||
86 | |||
87 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
88 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
89 | nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); | ||
90 | |||
91 | tmp = nv_ri32(dev, fc + 12); | ||
92 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); | ||
93 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); | ||
94 | |||
95 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16)); | ||
96 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20)); | ||
97 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24)); | ||
98 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28)); | ||
99 | |||
100 | if (dev_priv->chipset < 0x17) | ||
101 | goto out; | ||
102 | |||
103 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32)); | ||
104 | tmp = nv_ri32(dev, fc + 36); | ||
105 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); | ||
106 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40)); | ||
107 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44)); | ||
108 | nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); | ||
109 | |||
110 | out: | ||
111 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
112 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
113 | } | ||
114 | |||
115 | int | ||
116 | nv10_fifo_load_context(struct nouveau_channel *chan) | ||
117 | { | ||
118 | struct drm_device *dev = chan->dev; | ||
119 | uint32_t tmp; | ||
120 | |||
121 | nv10_fifo_do_load_context(dev, chan->id); | ||
122 | |||
123 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
124 | NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
125 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
126 | |||
127 | /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ | ||
128 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
129 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | int | ||
135 | nv10_fifo_unload_context(struct drm_device *dev) | ||
136 | { | ||
137 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
138 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
139 | uint32_t fc, tmp; | ||
140 | int chid; | ||
141 | |||
142 | chid = pfifo->channel_id(dev); | ||
143 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | ||
144 | return 0; | ||
145 | fc = NV10_RAMFC(chid); | ||
146 | |||
147 | nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
148 | nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
149 | nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); | ||
150 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; | ||
151 | tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); | ||
152 | nv_wi32(dev, fc + 12, tmp); | ||
153 | nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
154 | nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); | ||
155 | nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
156 | nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
157 | |||
158 | if (dev_priv->chipset < 0x17) | ||
159 | goto out; | ||
160 | |||
161 | nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); | ||
162 | tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); | ||
163 | nv_wi32(dev, fc + 36, tmp); | ||
164 | nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); | ||
165 | nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); | ||
166 | nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
167 | |||
168 | out: | ||
169 | nv10_fifo_do_load_context(dev, pfifo->channels - 1); | ||
170 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static void | ||
175 | nv10_fifo_init_reset(struct drm_device *dev) | ||
176 | { | ||
177 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
178 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | ||
179 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
180 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
181 | |||
182 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
183 | nv_wr32(dev, 0x002044, 0x0101ffff); | ||
184 | nv_wr32(dev, 0x002040, 0x000000ff); | ||
185 | nv_wr32(dev, 0x002500, 0x00000000); | ||
186 | nv_wr32(dev, 0x003000, 0x00000000); | ||
187 | nv_wr32(dev, 0x003050, 0x00000000); | ||
188 | |||
189 | nv_wr32(dev, 0x003258, 0x00000000); | ||
190 | nv_wr32(dev, 0x003210, 0x00000000); | ||
191 | nv_wr32(dev, 0x003270, 0x00000000); | ||
192 | } | ||
193 | |||
194 | static void | ||
195 | nv10_fifo_init_ramxx(struct drm_device *dev) | ||
196 | { | ||
197 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
198 | |||
199 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | ||
200 | ((dev_priv->ramht->bits - 9) << 16) | | ||
201 | (dev_priv->ramht->gpuobj->pinst >> 8)); | ||
202 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | ||
203 | |||
204 | if (dev_priv->chipset < 0x17) { | ||
205 | nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); | ||
206 | } else { | ||
207 | nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | | ||
208 | (1 << 16) /* 64 Bytes entry*/); | ||
209 | /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ | ||
210 | } | ||
211 | } | ||
212 | |||
213 | static void | ||
214 | nv10_fifo_init_intr(struct drm_device *dev) | ||
215 | { | ||
216 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
217 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
218 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
219 | } | ||
220 | |||
221 | int | ||
222 | nv10_fifo_init(struct drm_device *dev) | ||
223 | { | ||
224 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
225 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
226 | int i; | ||
227 | |||
228 | nv10_fifo_init_reset(dev); | ||
229 | nv10_fifo_init_ramxx(dev); | ||
230 | |||
231 | nv10_fifo_do_load_context(dev, pfifo->channels - 1); | ||
232 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
233 | |||
234 | nv10_fifo_init_intr(dev); | ||
235 | pfifo->enable(dev); | ||
236 | pfifo->reassign(dev, true); | ||
237 | |||
238 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
239 | if (dev_priv->channels.ptr[i]) { | ||
240 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | ||
241 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | return 0; | ||
246 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c new file mode 100644 index 00000000000..007fc29e2f8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_gpio.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Francisco Jerez. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "nouveau_drv.h" | ||
29 | #include "nouveau_hw.h" | ||
30 | |||
31 | static bool | ||
32 | get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift, | ||
33 | uint32_t *mask) | ||
34 | { | ||
35 | if (ent->line < 2) { | ||
36 | *reg = NV_PCRTC_GPIO; | ||
37 | *shift = ent->line * 16; | ||
38 | *mask = 0x11; | ||
39 | |||
40 | } else if (ent->line < 10) { | ||
41 | *reg = NV_PCRTC_GPIO_EXT; | ||
42 | *shift = (ent->line - 2) * 4; | ||
43 | *mask = 0x3; | ||
44 | |||
45 | } else if (ent->line < 14) { | ||
46 | *reg = NV_PCRTC_850; | ||
47 | *shift = (ent->line - 10) * 4; | ||
48 | *mask = 0x3; | ||
49 | |||
50 | } else { | ||
51 | return false; | ||
52 | } | ||
53 | |||
54 | return true; | ||
55 | } | ||
56 | |||
57 | int | ||
58 | nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) | ||
59 | { | ||
60 | struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); | ||
61 | uint32_t reg, shift, mask, value; | ||
62 | |||
63 | if (!ent) | ||
64 | return -ENODEV; | ||
65 | |||
66 | if (!get_gpio_location(ent, ®, &shift, &mask)) | ||
67 | return -ENODEV; | ||
68 | |||
69 | value = NVReadCRTC(dev, 0, reg) >> shift; | ||
70 | |||
71 | return (ent->invert ? 1 : 0) ^ (value & 1); | ||
72 | } | ||
73 | |||
74 | int | ||
75 | nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | ||
76 | { | ||
77 | struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); | ||
78 | uint32_t reg, shift, mask, value; | ||
79 | |||
80 | if (!ent) | ||
81 | return -ENODEV; | ||
82 | |||
83 | if (!get_gpio_location(ent, ®, &shift, &mask)) | ||
84 | return -ENODEV; | ||
85 | |||
86 | value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift; | ||
87 | mask = ~(mask << shift); | ||
88 | |||
89 | NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask)); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c new file mode 100644 index 00000000000..7255e4a4d3f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -0,0 +1,1194 @@ | |||
1 | /* | ||
2 | * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr> | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "nouveau_drm.h" | ||
28 | #include "nouveau_drv.h" | ||
29 | #include "nouveau_util.h" | ||
30 | |||
31 | struct nv10_graph_engine { | ||
32 | struct nouveau_exec_engine base; | ||
33 | }; | ||
34 | |||
35 | struct pipe_state { | ||
36 | uint32_t pipe_0x0000[0x040/4]; | ||
37 | uint32_t pipe_0x0040[0x010/4]; | ||
38 | uint32_t pipe_0x0200[0x0c0/4]; | ||
39 | uint32_t pipe_0x4400[0x080/4]; | ||
40 | uint32_t pipe_0x6400[0x3b0/4]; | ||
41 | uint32_t pipe_0x6800[0x2f0/4]; | ||
42 | uint32_t pipe_0x6c00[0x030/4]; | ||
43 | uint32_t pipe_0x7000[0x130/4]; | ||
44 | uint32_t pipe_0x7400[0x0c0/4]; | ||
45 | uint32_t pipe_0x7800[0x0c0/4]; | ||
46 | }; | ||
47 | |||
48 | static int nv10_graph_ctx_regs[] = { | ||
49 | NV10_PGRAPH_CTX_SWITCH(0), | ||
50 | NV10_PGRAPH_CTX_SWITCH(1), | ||
51 | NV10_PGRAPH_CTX_SWITCH(2), | ||
52 | NV10_PGRAPH_CTX_SWITCH(3), | ||
53 | NV10_PGRAPH_CTX_SWITCH(4), | ||
54 | NV10_PGRAPH_CTX_CACHE(0, 0), | ||
55 | NV10_PGRAPH_CTX_CACHE(0, 1), | ||
56 | NV10_PGRAPH_CTX_CACHE(0, 2), | ||
57 | NV10_PGRAPH_CTX_CACHE(0, 3), | ||
58 | NV10_PGRAPH_CTX_CACHE(0, 4), | ||
59 | NV10_PGRAPH_CTX_CACHE(1, 0), | ||
60 | NV10_PGRAPH_CTX_CACHE(1, 1), | ||
61 | NV10_PGRAPH_CTX_CACHE(1, 2), | ||
62 | NV10_PGRAPH_CTX_CACHE(1, 3), | ||
63 | NV10_PGRAPH_CTX_CACHE(1, 4), | ||
64 | NV10_PGRAPH_CTX_CACHE(2, 0), | ||
65 | NV10_PGRAPH_CTX_CACHE(2, 1), | ||
66 | NV10_PGRAPH_CTX_CACHE(2, 2), | ||
67 | NV10_PGRAPH_CTX_CACHE(2, 3), | ||
68 | NV10_PGRAPH_CTX_CACHE(2, 4), | ||
69 | NV10_PGRAPH_CTX_CACHE(3, 0), | ||
70 | NV10_PGRAPH_CTX_CACHE(3, 1), | ||
71 | NV10_PGRAPH_CTX_CACHE(3, 2), | ||
72 | NV10_PGRAPH_CTX_CACHE(3, 3), | ||
73 | NV10_PGRAPH_CTX_CACHE(3, 4), | ||
74 | NV10_PGRAPH_CTX_CACHE(4, 0), | ||
75 | NV10_PGRAPH_CTX_CACHE(4, 1), | ||
76 | NV10_PGRAPH_CTX_CACHE(4, 2), | ||
77 | NV10_PGRAPH_CTX_CACHE(4, 3), | ||
78 | NV10_PGRAPH_CTX_CACHE(4, 4), | ||
79 | NV10_PGRAPH_CTX_CACHE(5, 0), | ||
80 | NV10_PGRAPH_CTX_CACHE(5, 1), | ||
81 | NV10_PGRAPH_CTX_CACHE(5, 2), | ||
82 | NV10_PGRAPH_CTX_CACHE(5, 3), | ||
83 | NV10_PGRAPH_CTX_CACHE(5, 4), | ||
84 | NV10_PGRAPH_CTX_CACHE(6, 0), | ||
85 | NV10_PGRAPH_CTX_CACHE(6, 1), | ||
86 | NV10_PGRAPH_CTX_CACHE(6, 2), | ||
87 | NV10_PGRAPH_CTX_CACHE(6, 3), | ||
88 | NV10_PGRAPH_CTX_CACHE(6, 4), | ||
89 | NV10_PGRAPH_CTX_CACHE(7, 0), | ||
90 | NV10_PGRAPH_CTX_CACHE(7, 1), | ||
91 | NV10_PGRAPH_CTX_CACHE(7, 2), | ||
92 | NV10_PGRAPH_CTX_CACHE(7, 3), | ||
93 | NV10_PGRAPH_CTX_CACHE(7, 4), | ||
94 | NV10_PGRAPH_CTX_USER, | ||
95 | NV04_PGRAPH_DMA_START_0, | ||
96 | NV04_PGRAPH_DMA_START_1, | ||
97 | NV04_PGRAPH_DMA_LENGTH, | ||
98 | NV04_PGRAPH_DMA_MISC, | ||
99 | NV10_PGRAPH_DMA_PITCH, | ||
100 | NV04_PGRAPH_BOFFSET0, | ||
101 | NV04_PGRAPH_BBASE0, | ||
102 | NV04_PGRAPH_BLIMIT0, | ||
103 | NV04_PGRAPH_BOFFSET1, | ||
104 | NV04_PGRAPH_BBASE1, | ||
105 | NV04_PGRAPH_BLIMIT1, | ||
106 | NV04_PGRAPH_BOFFSET2, | ||
107 | NV04_PGRAPH_BBASE2, | ||
108 | NV04_PGRAPH_BLIMIT2, | ||
109 | NV04_PGRAPH_BOFFSET3, | ||
110 | NV04_PGRAPH_BBASE3, | ||
111 | NV04_PGRAPH_BLIMIT3, | ||
112 | NV04_PGRAPH_BOFFSET4, | ||
113 | NV04_PGRAPH_BBASE4, | ||
114 | NV04_PGRAPH_BLIMIT4, | ||
115 | NV04_PGRAPH_BOFFSET5, | ||
116 | NV04_PGRAPH_BBASE5, | ||
117 | NV04_PGRAPH_BLIMIT5, | ||
118 | NV04_PGRAPH_BPITCH0, | ||
119 | NV04_PGRAPH_BPITCH1, | ||
120 | NV04_PGRAPH_BPITCH2, | ||
121 | NV04_PGRAPH_BPITCH3, | ||
122 | NV04_PGRAPH_BPITCH4, | ||
123 | NV10_PGRAPH_SURFACE, | ||
124 | NV10_PGRAPH_STATE, | ||
125 | NV04_PGRAPH_BSWIZZLE2, | ||
126 | NV04_PGRAPH_BSWIZZLE5, | ||
127 | NV04_PGRAPH_BPIXEL, | ||
128 | NV10_PGRAPH_NOTIFY, | ||
129 | NV04_PGRAPH_PATT_COLOR0, | ||
130 | NV04_PGRAPH_PATT_COLOR1, | ||
131 | NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ | ||
132 | 0x00400904, | ||
133 | 0x00400908, | ||
134 | 0x0040090c, | ||
135 | 0x00400910, | ||
136 | 0x00400914, | ||
137 | 0x00400918, | ||
138 | 0x0040091c, | ||
139 | 0x00400920, | ||
140 | 0x00400924, | ||
141 | 0x00400928, | ||
142 | 0x0040092c, | ||
143 | 0x00400930, | ||
144 | 0x00400934, | ||
145 | 0x00400938, | ||
146 | 0x0040093c, | ||
147 | 0x00400940, | ||
148 | 0x00400944, | ||
149 | 0x00400948, | ||
150 | 0x0040094c, | ||
151 | 0x00400950, | ||
152 | 0x00400954, | ||
153 | 0x00400958, | ||
154 | 0x0040095c, | ||
155 | 0x00400960, | ||
156 | 0x00400964, | ||
157 | 0x00400968, | ||
158 | 0x0040096c, | ||
159 | 0x00400970, | ||
160 | 0x00400974, | ||
161 | 0x00400978, | ||
162 | 0x0040097c, | ||
163 | 0x00400980, | ||
164 | 0x00400984, | ||
165 | 0x00400988, | ||
166 | 0x0040098c, | ||
167 | 0x00400990, | ||
168 | 0x00400994, | ||
169 | 0x00400998, | ||
170 | 0x0040099c, | ||
171 | 0x004009a0, | ||
172 | 0x004009a4, | ||
173 | 0x004009a8, | ||
174 | 0x004009ac, | ||
175 | 0x004009b0, | ||
176 | 0x004009b4, | ||
177 | 0x004009b8, | ||
178 | 0x004009bc, | ||
179 | 0x004009c0, | ||
180 | 0x004009c4, | ||
181 | 0x004009c8, | ||
182 | 0x004009cc, | ||
183 | 0x004009d0, | ||
184 | 0x004009d4, | ||
185 | 0x004009d8, | ||
186 | 0x004009dc, | ||
187 | 0x004009e0, | ||
188 | 0x004009e4, | ||
189 | 0x004009e8, | ||
190 | 0x004009ec, | ||
191 | 0x004009f0, | ||
192 | 0x004009f4, | ||
193 | 0x004009f8, | ||
194 | 0x004009fc, | ||
195 | NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ | ||
196 | 0x0040080c, | ||
197 | NV04_PGRAPH_PATTERN_SHAPE, | ||
198 | NV03_PGRAPH_MONO_COLOR0, | ||
199 | NV04_PGRAPH_ROP3, | ||
200 | NV04_PGRAPH_CHROMA, | ||
201 | NV04_PGRAPH_BETA_AND, | ||
202 | NV04_PGRAPH_BETA_PREMULT, | ||
203 | 0x00400e70, | ||
204 | 0x00400e74, | ||
205 | 0x00400e78, | ||
206 | 0x00400e7c, | ||
207 | 0x00400e80, | ||
208 | 0x00400e84, | ||
209 | 0x00400e88, | ||
210 | 0x00400e8c, | ||
211 | 0x00400ea0, | ||
212 | 0x00400ea4, | ||
213 | 0x00400ea8, | ||
214 | 0x00400e90, | ||
215 | 0x00400e94, | ||
216 | 0x00400e98, | ||
217 | 0x00400e9c, | ||
218 | NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */ | ||
219 | NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */ | ||
220 | 0x00400f04, | ||
221 | 0x00400f24, | ||
222 | 0x00400f08, | ||
223 | 0x00400f28, | ||
224 | 0x00400f0c, | ||
225 | 0x00400f2c, | ||
226 | 0x00400f10, | ||
227 | 0x00400f30, | ||
228 | 0x00400f14, | ||
229 | 0x00400f34, | ||
230 | 0x00400f18, | ||
231 | 0x00400f38, | ||
232 | 0x00400f1c, | ||
233 | 0x00400f3c, | ||
234 | NV10_PGRAPH_XFMODE0, | ||
235 | NV10_PGRAPH_XFMODE1, | ||
236 | NV10_PGRAPH_GLOBALSTATE0, | ||
237 | NV10_PGRAPH_GLOBALSTATE1, | ||
238 | NV04_PGRAPH_STORED_FMT, | ||
239 | NV04_PGRAPH_SOURCE_COLOR, | ||
240 | NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ | ||
241 | NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ | ||
242 | 0x00400404, | ||
243 | 0x00400484, | ||
244 | 0x00400408, | ||
245 | 0x00400488, | ||
246 | 0x0040040c, | ||
247 | 0x0040048c, | ||
248 | 0x00400410, | ||
249 | 0x00400490, | ||
250 | 0x00400414, | ||
251 | 0x00400494, | ||
252 | 0x00400418, | ||
253 | 0x00400498, | ||
254 | 0x0040041c, | ||
255 | 0x0040049c, | ||
256 | 0x00400420, | ||
257 | 0x004004a0, | ||
258 | 0x00400424, | ||
259 | 0x004004a4, | ||
260 | 0x00400428, | ||
261 | 0x004004a8, | ||
262 | 0x0040042c, | ||
263 | 0x004004ac, | ||
264 | 0x00400430, | ||
265 | 0x004004b0, | ||
266 | 0x00400434, | ||
267 | 0x004004b4, | ||
268 | 0x00400438, | ||
269 | 0x004004b8, | ||
270 | 0x0040043c, | ||
271 | 0x004004bc, | ||
272 | 0x00400440, | ||
273 | 0x004004c0, | ||
274 | 0x00400444, | ||
275 | 0x004004c4, | ||
276 | 0x00400448, | ||
277 | 0x004004c8, | ||
278 | 0x0040044c, | ||
279 | 0x004004cc, | ||
280 | 0x00400450, | ||
281 | 0x004004d0, | ||
282 | 0x00400454, | ||
283 | 0x004004d4, | ||
284 | 0x00400458, | ||
285 | 0x004004d8, | ||
286 | 0x0040045c, | ||
287 | 0x004004dc, | ||
288 | 0x00400460, | ||
289 | 0x004004e0, | ||
290 | 0x00400464, | ||
291 | 0x004004e4, | ||
292 | 0x00400468, | ||
293 | 0x004004e8, | ||
294 | 0x0040046c, | ||
295 | 0x004004ec, | ||
296 | 0x00400470, | ||
297 | 0x004004f0, | ||
298 | 0x00400474, | ||
299 | 0x004004f4, | ||
300 | 0x00400478, | ||
301 | 0x004004f8, | ||
302 | 0x0040047c, | ||
303 | 0x004004fc, | ||
304 | NV03_PGRAPH_ABS_UCLIP_XMIN, | ||
305 | NV03_PGRAPH_ABS_UCLIP_XMAX, | ||
306 | NV03_PGRAPH_ABS_UCLIP_YMIN, | ||
307 | NV03_PGRAPH_ABS_UCLIP_YMAX, | ||
308 | 0x00400550, | ||
309 | 0x00400558, | ||
310 | 0x00400554, | ||
311 | 0x0040055c, | ||
312 | NV03_PGRAPH_ABS_UCLIPA_XMIN, | ||
313 | NV03_PGRAPH_ABS_UCLIPA_XMAX, | ||
314 | NV03_PGRAPH_ABS_UCLIPA_YMIN, | ||
315 | NV03_PGRAPH_ABS_UCLIPA_YMAX, | ||
316 | NV03_PGRAPH_ABS_ICLIP_XMAX, | ||
317 | NV03_PGRAPH_ABS_ICLIP_YMAX, | ||
318 | NV03_PGRAPH_XY_LOGIC_MISC0, | ||
319 | NV03_PGRAPH_XY_LOGIC_MISC1, | ||
320 | NV03_PGRAPH_XY_LOGIC_MISC2, | ||
321 | NV03_PGRAPH_XY_LOGIC_MISC3, | ||
322 | NV03_PGRAPH_CLIPX_0, | ||
323 | NV03_PGRAPH_CLIPX_1, | ||
324 | NV03_PGRAPH_CLIPY_0, | ||
325 | NV03_PGRAPH_CLIPY_1, | ||
326 | NV10_PGRAPH_COMBINER0_IN_ALPHA, | ||
327 | NV10_PGRAPH_COMBINER1_IN_ALPHA, | ||
328 | NV10_PGRAPH_COMBINER0_IN_RGB, | ||
329 | NV10_PGRAPH_COMBINER1_IN_RGB, | ||
330 | NV10_PGRAPH_COMBINER_COLOR0, | ||
331 | NV10_PGRAPH_COMBINER_COLOR1, | ||
332 | NV10_PGRAPH_COMBINER0_OUT_ALPHA, | ||
333 | NV10_PGRAPH_COMBINER1_OUT_ALPHA, | ||
334 | NV10_PGRAPH_COMBINER0_OUT_RGB, | ||
335 | NV10_PGRAPH_COMBINER1_OUT_RGB, | ||
336 | NV10_PGRAPH_COMBINER_FINAL0, | ||
337 | NV10_PGRAPH_COMBINER_FINAL1, | ||
338 | 0x00400e00, | ||
339 | 0x00400e04, | ||
340 | 0x00400e08, | ||
341 | 0x00400e0c, | ||
342 | 0x00400e10, | ||
343 | 0x00400e14, | ||
344 | 0x00400e18, | ||
345 | 0x00400e1c, | ||
346 | 0x00400e20, | ||
347 | 0x00400e24, | ||
348 | 0x00400e28, | ||
349 | 0x00400e2c, | ||
350 | 0x00400e30, | ||
351 | 0x00400e34, | ||
352 | 0x00400e38, | ||
353 | 0x00400e3c, | ||
354 | NV04_PGRAPH_PASSTHRU_0, | ||
355 | NV04_PGRAPH_PASSTHRU_1, | ||
356 | NV04_PGRAPH_PASSTHRU_2, | ||
357 | NV10_PGRAPH_DIMX_TEXTURE, | ||
358 | NV10_PGRAPH_WDIMX_TEXTURE, | ||
359 | NV10_PGRAPH_DVD_COLORFMT, | ||
360 | NV10_PGRAPH_SCALED_FORMAT, | ||
361 | NV04_PGRAPH_MISC24_0, | ||
362 | NV04_PGRAPH_MISC24_1, | ||
363 | NV04_PGRAPH_MISC24_2, | ||
364 | NV03_PGRAPH_X_MISC, | ||
365 | NV03_PGRAPH_Y_MISC, | ||
366 | NV04_PGRAPH_VALID1, | ||
367 | NV04_PGRAPH_VALID2, | ||
368 | }; | ||
369 | |||
370 | static int nv17_graph_ctx_regs[] = { | ||
371 | NV10_PGRAPH_DEBUG_4, | ||
372 | 0x004006b0, | ||
373 | 0x00400eac, | ||
374 | 0x00400eb0, | ||
375 | 0x00400eb4, | ||
376 | 0x00400eb8, | ||
377 | 0x00400ebc, | ||
378 | 0x00400ec0, | ||
379 | 0x00400ec4, | ||
380 | 0x00400ec8, | ||
381 | 0x00400ecc, | ||
382 | 0x00400ed0, | ||
383 | 0x00400ed4, | ||
384 | 0x00400ed8, | ||
385 | 0x00400edc, | ||
386 | 0x00400ee0, | ||
387 | 0x00400a00, | ||
388 | 0x00400a04, | ||
389 | }; | ||
390 | |||
391 | struct graph_state { | ||
392 | int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; | ||
393 | int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; | ||
394 | struct pipe_state pipe_state; | ||
395 | uint32_t lma_window[4]; | ||
396 | }; | ||
397 | |||
398 | #define PIPE_SAVE(dev, state, addr) \ | ||
399 | do { \ | ||
400 | int __i; \ | ||
401 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | ||
402 | for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ | ||
403 | state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ | ||
404 | } while (0) | ||
405 | |||
406 | #define PIPE_RESTORE(dev, state, addr) \ | ||
407 | do { \ | ||
408 | int __i; \ | ||
409 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | ||
410 | for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ | ||
411 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \ | ||
412 | } while (0) | ||
413 | |||
414 | static void nv10_graph_save_pipe(struct nouveau_channel *chan) | ||
415 | { | ||
416 | struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
417 | struct pipe_state *pipe = &pgraph_ctx->pipe_state; | ||
418 | struct drm_device *dev = chan->dev; | ||
419 | |||
420 | PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); | ||
421 | PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); | ||
422 | PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400); | ||
423 | PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800); | ||
424 | PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00); | ||
425 | PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000); | ||
426 | PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400); | ||
427 | PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800); | ||
428 | PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040); | ||
429 | PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000); | ||
430 | } | ||
431 | |||
432 | static void nv10_graph_load_pipe(struct nouveau_channel *chan) | ||
433 | { | ||
434 | struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
435 | struct pipe_state *pipe = &pgraph_ctx->pipe_state; | ||
436 | struct drm_device *dev = chan->dev; | ||
437 | uint32_t xfmode0, xfmode1; | ||
438 | int i; | ||
439 | |||
440 | nouveau_wait_for_idle(dev); | ||
441 | /* XXX check haiku comments */ | ||
442 | xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); | ||
443 | xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); | ||
444 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); | ||
445 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); | ||
446 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); | ||
447 | for (i = 0; i < 4; i++) | ||
448 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
449 | for (i = 0; i < 4; i++) | ||
450 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
451 | |||
452 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); | ||
453 | for (i = 0; i < 3; i++) | ||
454 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
455 | |||
456 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); | ||
457 | for (i = 0; i < 3; i++) | ||
458 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
459 | |||
460 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); | ||
461 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); | ||
462 | |||
463 | |||
464 | PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); | ||
465 | nouveau_wait_for_idle(dev); | ||
466 | |||
467 | /* restore XFMODE */ | ||
468 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); | ||
469 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); | ||
470 | PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400); | ||
471 | PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800); | ||
472 | PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00); | ||
473 | PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000); | ||
474 | PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400); | ||
475 | PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800); | ||
476 | PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); | ||
477 | PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000); | ||
478 | PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040); | ||
479 | nouveau_wait_for_idle(dev); | ||
480 | } | ||
481 | |||
482 | static void nv10_graph_create_pipe(struct nouveau_channel *chan) | ||
483 | { | ||
484 | struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
485 | struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; | ||
486 | struct drm_device *dev = chan->dev; | ||
487 | uint32_t *fifo_pipe_state_addr; | ||
488 | int i; | ||
489 | #define PIPE_INIT(addr) \ | ||
490 | do { \ | ||
491 | fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ | ||
492 | } while (0) | ||
493 | #define PIPE_INIT_END(addr) \ | ||
494 | do { \ | ||
495 | uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \ | ||
496 | ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \ | ||
497 | if (fifo_pipe_state_addr != __end_addr) \ | ||
498 | NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \ | ||
499 | addr, fifo_pipe_state_addr, __end_addr); \ | ||
500 | } while (0) | ||
501 | #define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value | ||
502 | |||
503 | PIPE_INIT(0x0200); | ||
504 | for (i = 0; i < 48; i++) | ||
505 | NV_WRITE_PIPE_INIT(0x00000000); | ||
506 | PIPE_INIT_END(0x0200); | ||
507 | |||
508 | PIPE_INIT(0x6400); | ||
509 | for (i = 0; i < 211; i++) | ||
510 | NV_WRITE_PIPE_INIT(0x00000000); | ||
511 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
512 | NV_WRITE_PIPE_INIT(0x40000000); | ||
513 | NV_WRITE_PIPE_INIT(0x40000000); | ||
514 | NV_WRITE_PIPE_INIT(0x40000000); | ||
515 | NV_WRITE_PIPE_INIT(0x40000000); | ||
516 | NV_WRITE_PIPE_INIT(0x00000000); | ||
517 | NV_WRITE_PIPE_INIT(0x00000000); | ||
518 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
519 | NV_WRITE_PIPE_INIT(0x00000000); | ||
520 | NV_WRITE_PIPE_INIT(0x3f000000); | ||
521 | NV_WRITE_PIPE_INIT(0x3f000000); | ||
522 | NV_WRITE_PIPE_INIT(0x00000000); | ||
523 | NV_WRITE_PIPE_INIT(0x00000000); | ||
524 | NV_WRITE_PIPE_INIT(0x00000000); | ||
525 | NV_WRITE_PIPE_INIT(0x00000000); | ||
526 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
527 | NV_WRITE_PIPE_INIT(0x00000000); | ||
528 | NV_WRITE_PIPE_INIT(0x00000000); | ||
529 | NV_WRITE_PIPE_INIT(0x00000000); | ||
530 | NV_WRITE_PIPE_INIT(0x00000000); | ||
531 | NV_WRITE_PIPE_INIT(0x00000000); | ||
532 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
533 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
534 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
535 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
536 | PIPE_INIT_END(0x6400); | ||
537 | |||
538 | PIPE_INIT(0x6800); | ||
539 | for (i = 0; i < 162; i++) | ||
540 | NV_WRITE_PIPE_INIT(0x00000000); | ||
541 | NV_WRITE_PIPE_INIT(0x3f800000); | ||
542 | for (i = 0; i < 25; i++) | ||
543 | NV_WRITE_PIPE_INIT(0x00000000); | ||
544 | PIPE_INIT_END(0x6800); | ||
545 | |||
546 | PIPE_INIT(0x6c00); | ||
547 | NV_WRITE_PIPE_INIT(0x00000000); | ||
548 | NV_WRITE_PIPE_INIT(0x00000000); | ||
549 | NV_WRITE_PIPE_INIT(0x00000000); | ||
550 | NV_WRITE_PIPE_INIT(0x00000000); | ||
551 | NV_WRITE_PIPE_INIT(0xbf800000); | ||
552 | NV_WRITE_PIPE_INIT(0x00000000); | ||
553 | NV_WRITE_PIPE_INIT(0x00000000); | ||
554 | NV_WRITE_PIPE_INIT(0x00000000); | ||
555 | NV_WRITE_PIPE_INIT(0x00000000); | ||
556 | NV_WRITE_PIPE_INIT(0x00000000); | ||
557 | NV_WRITE_PIPE_INIT(0x00000000); | ||
558 | NV_WRITE_PIPE_INIT(0x00000000); | ||
559 | PIPE_INIT_END(0x6c00); | ||
560 | |||
561 | PIPE_INIT(0x7000); | ||
562 | NV_WRITE_PIPE_INIT(0x00000000); | ||
563 | NV_WRITE_PIPE_INIT(0x00000000); | ||
564 | NV_WRITE_PIPE_INIT(0x00000000); | ||
565 | NV_WRITE_PIPE_INIT(0x00000000); | ||
566 | NV_WRITE_PIPE_INIT(0x00000000); | ||
567 | NV_WRITE_PIPE_INIT(0x00000000); | ||
568 | NV_WRITE_PIPE_INIT(0x00000000); | ||
569 | NV_WRITE_PIPE_INIT(0x00000000); | ||
570 | NV_WRITE_PIPE_INIT(0x00000000); | ||
571 | NV_WRITE_PIPE_INIT(0x00000000); | ||
572 | NV_WRITE_PIPE_INIT(0x00000000); | ||
573 | NV_WRITE_PIPE_INIT(0x00000000); | ||
574 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
575 | NV_WRITE_PIPE_INIT(0x00000000); | ||
576 | NV_WRITE_PIPE_INIT(0x00000000); | ||
577 | NV_WRITE_PIPE_INIT(0x00000000); | ||
578 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
579 | NV_WRITE_PIPE_INIT(0x00000000); | ||
580 | NV_WRITE_PIPE_INIT(0x00000000); | ||
581 | NV_WRITE_PIPE_INIT(0x00000000); | ||
582 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
583 | NV_WRITE_PIPE_INIT(0x00000000); | ||
584 | NV_WRITE_PIPE_INIT(0x00000000); | ||
585 | NV_WRITE_PIPE_INIT(0x00000000); | ||
586 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
587 | NV_WRITE_PIPE_INIT(0x00000000); | ||
588 | NV_WRITE_PIPE_INIT(0x00000000); | ||
589 | NV_WRITE_PIPE_INIT(0x00000000); | ||
590 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
591 | NV_WRITE_PIPE_INIT(0x00000000); | ||
592 | NV_WRITE_PIPE_INIT(0x00000000); | ||
593 | NV_WRITE_PIPE_INIT(0x00000000); | ||
594 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
595 | NV_WRITE_PIPE_INIT(0x00000000); | ||
596 | NV_WRITE_PIPE_INIT(0x00000000); | ||
597 | NV_WRITE_PIPE_INIT(0x00000000); | ||
598 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
599 | NV_WRITE_PIPE_INIT(0x00000000); | ||
600 | NV_WRITE_PIPE_INIT(0x00000000); | ||
601 | NV_WRITE_PIPE_INIT(0x00000000); | ||
602 | NV_WRITE_PIPE_INIT(0x7149f2ca); | ||
603 | for (i = 0; i < 35; i++) | ||
604 | NV_WRITE_PIPE_INIT(0x00000000); | ||
605 | PIPE_INIT_END(0x7000); | ||
606 | |||
607 | PIPE_INIT(0x7400); | ||
608 | for (i = 0; i < 48; i++) | ||
609 | NV_WRITE_PIPE_INIT(0x00000000); | ||
610 | PIPE_INIT_END(0x7400); | ||
611 | |||
612 | PIPE_INIT(0x7800); | ||
613 | for (i = 0; i < 48; i++) | ||
614 | NV_WRITE_PIPE_INIT(0x00000000); | ||
615 | PIPE_INIT_END(0x7800); | ||
616 | |||
617 | PIPE_INIT(0x4400); | ||
618 | for (i = 0; i < 32; i++) | ||
619 | NV_WRITE_PIPE_INIT(0x00000000); | ||
620 | PIPE_INIT_END(0x4400); | ||
621 | |||
622 | PIPE_INIT(0x0000); | ||
623 | for (i = 0; i < 16; i++) | ||
624 | NV_WRITE_PIPE_INIT(0x00000000); | ||
625 | PIPE_INIT_END(0x0000); | ||
626 | |||
627 | PIPE_INIT(0x0040); | ||
628 | for (i = 0; i < 4; i++) | ||
629 | NV_WRITE_PIPE_INIT(0x00000000); | ||
630 | PIPE_INIT_END(0x0040); | ||
631 | |||
632 | #undef PIPE_INIT | ||
633 | #undef PIPE_INIT_END | ||
634 | #undef NV_WRITE_PIPE_INIT | ||
635 | } | ||
636 | |||
637 | static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) | ||
638 | { | ||
639 | int i; | ||
640 | for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) { | ||
641 | if (nv10_graph_ctx_regs[i] == reg) | ||
642 | return i; | ||
643 | } | ||
644 | NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg); | ||
645 | return -1; | ||
646 | } | ||
647 | |||
648 | static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) | ||
649 | { | ||
650 | int i; | ||
651 | for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) { | ||
652 | if (nv17_graph_ctx_regs[i] == reg) | ||
653 | return i; | ||
654 | } | ||
655 | NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg); | ||
656 | return -1; | ||
657 | } | ||
658 | |||
659 | static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan, | ||
660 | uint32_t inst) | ||
661 | { | ||
662 | struct drm_device *dev = chan->dev; | ||
663 | uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; | ||
664 | uint32_t ctx_user, ctx_switch[5]; | ||
665 | int i, subchan = -1; | ||
666 | |||
667 | /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state | ||
668 | * that cannot be restored via MMIO. Do it through the FIFO | ||
669 | * instead. | ||
670 | */ | ||
671 | |||
672 | /* Look for a celsius object */ | ||
673 | for (i = 0; i < 8; i++) { | ||
674 | int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; | ||
675 | |||
676 | if (class == 0x56 || class == 0x96 || class == 0x99) { | ||
677 | subchan = i; | ||
678 | break; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | if (subchan < 0 || !inst) | ||
683 | return; | ||
684 | |||
685 | /* Save the current ctx object */ | ||
686 | ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER); | ||
687 | for (i = 0; i < 5; i++) | ||
688 | ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i)); | ||
689 | |||
690 | /* Save the FIFO state */ | ||
691 | st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); | ||
692 | st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL); | ||
693 | st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH); | ||
694 | fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR); | ||
695 | |||
696 | for (i = 0; i < ARRAY_SIZE(fifo); i++) | ||
697 | fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i); | ||
698 | |||
699 | /* Switch to the celsius subchannel */ | ||
700 | for (i = 0; i < 5; i++) | ||
701 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), | ||
702 | nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i))); | ||
703 | nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); | ||
704 | |||
705 | /* Inject NV10TCL_DMA_VTXBUF */ | ||
706 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); | ||
707 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, | ||
708 | 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); | ||
709 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); | ||
710 | nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); | ||
711 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
712 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
713 | |||
714 | /* Restore the FIFO state */ | ||
715 | for (i = 0; i < ARRAY_SIZE(fifo); i++) | ||
716 | nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]); | ||
717 | |||
718 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); | ||
719 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2); | ||
720 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); | ||
721 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); | ||
722 | |||
723 | /* Restore the current ctx object */ | ||
724 | for (i = 0; i < 5; i++) | ||
725 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); | ||
726 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); | ||
727 | } | ||
728 | |||
729 | static int | ||
730 | nv10_graph_load_context(struct nouveau_channel *chan) | ||
731 | { | ||
732 | struct drm_device *dev = chan->dev; | ||
733 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
734 | struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
735 | uint32_t tmp; | ||
736 | int i; | ||
737 | |||
738 | for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) | ||
739 | nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); | ||
740 | if (dev_priv->chipset >= 0x17) { | ||
741 | for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) | ||
742 | nv_wr32(dev, nv17_graph_ctx_regs[i], | ||
743 | pgraph_ctx->nv17[i]); | ||
744 | } | ||
745 | |||
746 | nv10_graph_load_pipe(chan); | ||
747 | nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1) | ||
748 | & 0xffff)); | ||
749 | |||
750 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); | ||
751 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER); | ||
752 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24); | ||
753 | tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); | ||
754 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff); | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int | ||
759 | nv10_graph_unload_context(struct drm_device *dev) | ||
760 | { | ||
761 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
762 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
763 | struct nouveau_channel *chan; | ||
764 | struct graph_state *ctx; | ||
765 | uint32_t tmp; | ||
766 | int i; | ||
767 | |||
768 | chan = nv10_graph_channel(dev); | ||
769 | if (!chan) | ||
770 | return 0; | ||
771 | ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
772 | |||
773 | for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) | ||
774 | ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); | ||
775 | |||
776 | if (dev_priv->chipset >= 0x17) { | ||
777 | for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) | ||
778 | ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]); | ||
779 | } | ||
780 | |||
781 | nv10_graph_save_pipe(chan); | ||
782 | |||
783 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); | ||
784 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | ||
785 | tmp |= (pfifo->channels - 1) << 24; | ||
786 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | ||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | static void | ||
791 | nv10_graph_context_switch(struct drm_device *dev) | ||
792 | { | ||
793 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
794 | struct nouveau_channel *chan = NULL; | ||
795 | int chid; | ||
796 | |||
797 | nouveau_wait_for_idle(dev); | ||
798 | |||
799 | /* If previous context is valid, we need to save it */ | ||
800 | nv10_graph_unload_context(dev); | ||
801 | |||
802 | /* Load context for next channel */ | ||
803 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; | ||
804 | chan = dev_priv->channels.ptr[chid]; | ||
805 | if (chan && chan->engctx[NVOBJ_ENGINE_GR]) | ||
806 | nv10_graph_load_context(chan); | ||
807 | } | ||
808 | |||
809 | #define NV_WRITE_CTX(reg, val) do { \ | ||
810 | int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ | ||
811 | if (offset > 0) \ | ||
812 | pgraph_ctx->nv10[offset] = val; \ | ||
813 | } while (0) | ||
814 | |||
815 | #define NV17_WRITE_CTX(reg, val) do { \ | ||
816 | int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ | ||
817 | if (offset > 0) \ | ||
818 | pgraph_ctx->nv17[offset] = val; \ | ||
819 | } while (0) | ||
820 | |||
821 | struct nouveau_channel * | ||
822 | nv10_graph_channel(struct drm_device *dev) | ||
823 | { | ||
824 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
825 | int chid = dev_priv->engine.fifo.channels; | ||
826 | |||
827 | if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) | ||
828 | chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; | ||
829 | |||
830 | if (chid >= dev_priv->engine.fifo.channels) | ||
831 | return NULL; | ||
832 | |||
833 | return dev_priv->channels.ptr[chid]; | ||
834 | } | ||
835 | |||
836 | static int | ||
837 | nv10_graph_context_new(struct nouveau_channel *chan, int engine) | ||
838 | { | ||
839 | struct drm_device *dev = chan->dev; | ||
840 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
841 | struct graph_state *pgraph_ctx; | ||
842 | |||
843 | NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); | ||
844 | |||
845 | pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); | ||
846 | if (pgraph_ctx == NULL) | ||
847 | return -ENOMEM; | ||
848 | chan->engctx[engine] = pgraph_ctx; | ||
849 | |||
850 | NV_WRITE_CTX(0x00400e88, 0x08000000); | ||
851 | NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); | ||
852 | NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); | ||
853 | NV_WRITE_CTX(0x00400e10, 0x00001000); | ||
854 | NV_WRITE_CTX(0x00400e14, 0x00001000); | ||
855 | NV_WRITE_CTX(0x00400e30, 0x00080008); | ||
856 | NV_WRITE_CTX(0x00400e34, 0x00080008); | ||
857 | if (dev_priv->chipset >= 0x17) { | ||
858 | /* is it really needed ??? */ | ||
859 | NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, | ||
860 | nv_rd32(dev, NV10_PGRAPH_DEBUG_4)); | ||
861 | NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0)); | ||
862 | NV17_WRITE_CTX(0x00400eac, 0x0fff0000); | ||
863 | NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); | ||
864 | NV17_WRITE_CTX(0x00400ec0, 0x00000080); | ||
865 | NV17_WRITE_CTX(0x00400ed0, 0x00000080); | ||
866 | } | ||
867 | NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); | ||
868 | |||
869 | nv10_graph_create_pipe(chan); | ||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | static void | ||
874 | nv10_graph_context_del(struct nouveau_channel *chan, int engine) | ||
875 | { | ||
876 | struct drm_device *dev = chan->dev; | ||
877 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
878 | struct graph_state *pgraph_ctx = chan->engctx[engine]; | ||
879 | unsigned long flags; | ||
880 | |||
881 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
882 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
883 | |||
884 | /* Unload the context if it's the currently active one */ | ||
885 | if (nv10_graph_channel(dev) == chan) | ||
886 | nv10_graph_unload_context(dev); | ||
887 | |||
888 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
889 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
890 | |||
891 | /* Free the context resources */ | ||
892 | chan->engctx[engine] = NULL; | ||
893 | kfree(pgraph_ctx); | ||
894 | } | ||
895 | |||
896 | static void | ||
897 | nv10_graph_set_tile_region(struct drm_device *dev, int i) | ||
898 | { | ||
899 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
900 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
901 | |||
902 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); | ||
903 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); | ||
904 | nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); | ||
905 | } | ||
906 | |||
907 | static int | ||
908 | nv10_graph_init(struct drm_device *dev, int engine) | ||
909 | { | ||
910 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
911 | u32 tmp; | ||
912 | int i; | ||
913 | |||
914 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | ||
915 | ~NV_PMC_ENABLE_PGRAPH); | ||
916 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | ||
917 | NV_PMC_ENABLE_PGRAPH); | ||
918 | |||
919 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | ||
920 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | ||
921 | |||
922 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); | ||
923 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); | ||
924 | nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700); | ||
925 | /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */ | ||
926 | nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); | ||
927 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | | ||
928 | (1<<29) | | ||
929 | (1<<31)); | ||
930 | if (dev_priv->chipset >= 0x17) { | ||
931 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); | ||
932 | nv_wr32(dev, 0x400a10, 0x3ff3fb6); | ||
933 | nv_wr32(dev, 0x400838, 0x2f8684); | ||
934 | nv_wr32(dev, 0x40083c, 0x115f3f); | ||
935 | nv_wr32(dev, 0x004006b0, 0x40000020); | ||
936 | } else | ||
937 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); | ||
938 | |||
939 | /* Turn all the tiling regions off. */ | ||
940 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | ||
941 | nv10_graph_set_tile_region(dev, i); | ||
942 | |||
943 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); | ||
944 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); | ||
945 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000); | ||
946 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000); | ||
947 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000); | ||
948 | nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); | ||
949 | |||
950 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | ||
951 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | ||
952 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | ||
953 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | ||
954 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); | ||
955 | |||
956 | return 0; | ||
957 | } | ||
958 | |||
959 | static int | ||
960 | nv10_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
961 | { | ||
962 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
963 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
964 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
965 | return -EBUSY; | ||
966 | } | ||
967 | nv10_graph_unload_context(dev); | ||
968 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | static int | ||
973 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, | ||
974 | u32 class, u32 mthd, u32 data) | ||
975 | { | ||
976 | struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
977 | struct drm_device *dev = chan->dev; | ||
978 | struct pipe_state *pipe = &ctx->pipe_state; | ||
979 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; | ||
980 | uint32_t xfmode0, xfmode1; | ||
981 | int i; | ||
982 | |||
983 | ctx->lma_window[(mthd - 0x1638) / 4] = data; | ||
984 | |||
985 | if (mthd != 0x1644) | ||
986 | return 0; | ||
987 | |||
988 | nouveau_wait_for_idle(dev); | ||
989 | |||
990 | PIPE_SAVE(dev, pipe_0x0040, 0x0040); | ||
991 | PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); | ||
992 | |||
993 | PIPE_RESTORE(dev, ctx->lma_window, 0x6790); | ||
994 | |||
995 | nouveau_wait_for_idle(dev); | ||
996 | |||
997 | xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); | ||
998 | xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); | ||
999 | |||
1000 | PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); | ||
1001 | PIPE_SAVE(dev, pipe_0x64c0, 0x64c0); | ||
1002 | PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0); | ||
1003 | PIPE_SAVE(dev, pipe_0x6a80, 0x6a80); | ||
1004 | |||
1005 | nouveau_wait_for_idle(dev); | ||
1006 | |||
1007 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); | ||
1008 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); | ||
1009 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); | ||
1010 | for (i = 0; i < 4; i++) | ||
1011 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
1012 | for (i = 0; i < 4; i++) | ||
1013 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
1014 | |||
1015 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); | ||
1016 | for (i = 0; i < 3; i++) | ||
1017 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
1018 | |||
1019 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); | ||
1020 | for (i = 0; i < 3; i++) | ||
1021 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
1022 | |||
1023 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); | ||
1024 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); | ||
1025 | |||
1026 | PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); | ||
1027 | |||
1028 | nouveau_wait_for_idle(dev); | ||
1029 | |||
1030 | PIPE_RESTORE(dev, pipe_0x0040, 0x0040); | ||
1031 | |||
1032 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); | ||
1033 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); | ||
1034 | |||
1035 | PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0); | ||
1036 | PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0); | ||
1037 | PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80); | ||
1038 | PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); | ||
1039 | |||
1040 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); | ||
1041 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
1042 | |||
1043 | nouveau_wait_for_idle(dev); | ||
1044 | |||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | static int | ||
1049 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, | ||
1050 | u32 class, u32 mthd, u32 data) | ||
1051 | { | ||
1052 | struct drm_device *dev = chan->dev; | ||
1053 | |||
1054 | nouveau_wait_for_idle(dev); | ||
1055 | |||
1056 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, | ||
1057 | nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8); | ||
1058 | nv_wr32(dev, 0x004006b0, | ||
1059 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); | ||
1060 | |||
1061 | return 0; | ||
1062 | } | ||
1063 | |||
1064 | struct nouveau_bitfield nv10_graph_intr[] = { | ||
1065 | { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, | ||
1066 | { NV_PGRAPH_INTR_ERROR, "ERROR" }, | ||
1067 | {} | ||
1068 | }; | ||
1069 | |||
1070 | struct nouveau_bitfield nv10_graph_nstatus[] = { | ||
1071 | { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, | ||
1072 | { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, | ||
1073 | { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, | ||
1074 | { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, | ||
1075 | {} | ||
1076 | }; | ||
1077 | |||
1078 | static void | ||
1079 | nv10_graph_isr(struct drm_device *dev) | ||
1080 | { | ||
1081 | u32 stat; | ||
1082 | |||
1083 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1084 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
1085 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
1086 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
1087 | u32 chid = (addr & 0x01f00000) >> 20; | ||
1088 | u32 subc = (addr & 0x00070000) >> 16; | ||
1089 | u32 mthd = (addr & 0x00001ffc); | ||
1090 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
1091 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; | ||
1092 | u32 show = stat; | ||
1093 | |||
1094 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
1095 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
1096 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
1097 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
1098 | } | ||
1099 | } | ||
1100 | |||
1101 | if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
1102 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1103 | stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1104 | show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1105 | nv10_graph_context_switch(dev); | ||
1106 | } | ||
1107 | |||
1108 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
1109 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
1110 | |||
1111 | if (show && nouveau_ratelimit()) { | ||
1112 | NV_INFO(dev, "PGRAPH -"); | ||
1113 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
1114 | printk(" nsource:"); | ||
1115 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
1116 | printk(" nstatus:"); | ||
1117 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
1118 | printk("\n"); | ||
1119 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
1120 | "mthd 0x%04x data 0x%08x\n", | ||
1121 | chid, subc, class, mthd, data); | ||
1122 | } | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | static void | ||
1127 | nv10_graph_destroy(struct drm_device *dev, int engine) | ||
1128 | { | ||
1129 | struct nv10_graph_engine *pgraph = nv_engine(dev, engine); | ||
1130 | |||
1131 | nouveau_irq_unregister(dev, 12); | ||
1132 | kfree(pgraph); | ||
1133 | } | ||
1134 | |||
1135 | int | ||
1136 | nv10_graph_create(struct drm_device *dev) | ||
1137 | { | ||
1138 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1139 | struct nv10_graph_engine *pgraph; | ||
1140 | |||
1141 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
1142 | if (!pgraph) | ||
1143 | return -ENOMEM; | ||
1144 | |||
1145 | pgraph->base.destroy = nv10_graph_destroy; | ||
1146 | pgraph->base.init = nv10_graph_init; | ||
1147 | pgraph->base.fini = nv10_graph_fini; | ||
1148 | pgraph->base.context_new = nv10_graph_context_new; | ||
1149 | pgraph->base.context_del = nv10_graph_context_del; | ||
1150 | pgraph->base.object_new = nv04_graph_object_new; | ||
1151 | pgraph->base.set_tile_region = nv10_graph_set_tile_region; | ||
1152 | |||
1153 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
1154 | nouveau_irq_register(dev, 12, nv10_graph_isr); | ||
1155 | |||
1156 | /* nvsw */ | ||
1157 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
1158 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
1159 | |||
1160 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
1161 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
1162 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
1163 | NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ | ||
1164 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
1165 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
1166 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
1167 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
1168 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
1169 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
1170 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
1171 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
1172 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
1173 | NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ | ||
1174 | NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ | ||
1175 | NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ | ||
1176 | NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ | ||
1177 | |||
1178 | /* celcius */ | ||
1179 | if (dev_priv->chipset <= 0x10) { | ||
1180 | NVOBJ_CLASS(dev, 0x0056, GR); | ||
1181 | } else | ||
1182 | if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { | ||
1183 | NVOBJ_CLASS(dev, 0x0096, GR); | ||
1184 | } else { | ||
1185 | NVOBJ_CLASS(dev, 0x0099, GR); | ||
1186 | NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); | ||
1187 | NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); | ||
1188 | NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); | ||
1189 | NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); | ||
1190 | NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); | ||
1191 | } | ||
1192 | |||
1193 | return 0; | ||
1194 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c new file mode 100644 index 00000000000..183e37512ef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -0,0 +1,842 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | /* | ||
7 | * NV20 | ||
8 | * ----- | ||
9 | * There are 3 families : | ||
10 | * NV20 is 0x10de:0x020* | ||
11 | * NV25/28 is 0x10de:0x025* / 0x10de:0x028* | ||
12 | * NV2A is 0x10de:0x02A0 | ||
13 | * | ||
14 | * NV30 | ||
15 | * ----- | ||
16 | * There are 3 families : | ||
17 | * NV30/31 is 0x10de:0x030* / 0x10de:0x031* | ||
18 | * NV34 is 0x10de:0x032* | ||
19 | * NV35/36 is 0x10de:0x033* / 0x10de:0x034* | ||
20 | * | ||
21 | * Not seen in the wild, no dumps (probably NV35) : | ||
22 | * NV37 is 0x10de:0x00fc, 0x10de:0x00fd | ||
23 | * NV38 is 0x10de:0x0333, 0x10de:0x00fe | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | struct nv20_graph_engine { | ||
28 | struct nouveau_exec_engine base; | ||
29 | struct nouveau_gpuobj *ctxtab; | ||
30 | void (*grctx_init)(struct nouveau_gpuobj *); | ||
31 | u32 grctx_size; | ||
32 | u32 grctx_user; | ||
33 | }; | ||
34 | |||
35 | #define NV20_GRCTX_SIZE (3580*4) | ||
36 | #define NV25_GRCTX_SIZE (3529*4) | ||
37 | #define NV2A_GRCTX_SIZE (3500*4) | ||
38 | |||
39 | #define NV30_31_GRCTX_SIZE (24392) | ||
40 | #define NV34_GRCTX_SIZE (18140) | ||
41 | #define NV35_36_GRCTX_SIZE (22396) | ||
42 | |||
43 | int | ||
44 | nv20_graph_unload_context(struct drm_device *dev) | ||
45 | { | ||
46 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
47 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
48 | struct nouveau_channel *chan; | ||
49 | struct nouveau_gpuobj *grctx; | ||
50 | u32 tmp; | ||
51 | |||
52 | chan = nv10_graph_channel(dev); | ||
53 | if (!chan) | ||
54 | return 0; | ||
55 | grctx = chan->engctx[NVOBJ_ENGINE_GR]; | ||
56 | |||
57 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4); | ||
58 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, | ||
59 | NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); | ||
60 | |||
61 | nouveau_wait_for_idle(dev); | ||
62 | |||
63 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); | ||
64 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | ||
65 | tmp |= (pfifo->channels - 1) << 24; | ||
66 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | nv20_graph_rdi(struct drm_device *dev) | ||
72 | { | ||
73 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
74 | int i, writecount = 32; | ||
75 | uint32_t rdi_index = 0x2c80000; | ||
76 | |||
77 | if (dev_priv->chipset == 0x20) { | ||
78 | rdi_index = 0x3d0000; | ||
79 | writecount = 15; | ||
80 | } | ||
81 | |||
82 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index); | ||
83 | for (i = 0; i < writecount; i++) | ||
84 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0); | ||
85 | |||
86 | nouveau_wait_for_idle(dev); | ||
87 | } | ||
88 | |||
89 | static void | ||
90 | nv20_graph_context_init(struct nouveau_gpuobj *ctx) | ||
91 | { | ||
92 | int i; | ||
93 | |||
94 | nv_wo32(ctx, 0x033c, 0xffff0000); | ||
95 | nv_wo32(ctx, 0x03a0, 0x0fff0000); | ||
96 | nv_wo32(ctx, 0x03a4, 0x0fff0000); | ||
97 | nv_wo32(ctx, 0x047c, 0x00000101); | ||
98 | nv_wo32(ctx, 0x0490, 0x00000111); | ||
99 | nv_wo32(ctx, 0x04a8, 0x44400000); | ||
100 | for (i = 0x04d4; i <= 0x04e0; i += 4) | ||
101 | nv_wo32(ctx, i, 0x00030303); | ||
102 | for (i = 0x04f4; i <= 0x0500; i += 4) | ||
103 | nv_wo32(ctx, i, 0x00080000); | ||
104 | for (i = 0x050c; i <= 0x0518; i += 4) | ||
105 | nv_wo32(ctx, i, 0x01012000); | ||
106 | for (i = 0x051c; i <= 0x0528; i += 4) | ||
107 | nv_wo32(ctx, i, 0x000105b8); | ||
108 | for (i = 0x052c; i <= 0x0538; i += 4) | ||
109 | nv_wo32(ctx, i, 0x00080008); | ||
110 | for (i = 0x055c; i <= 0x0598; i += 4) | ||
111 | nv_wo32(ctx, i, 0x07ff0000); | ||
112 | nv_wo32(ctx, 0x05a4, 0x4b7fffff); | ||
113 | nv_wo32(ctx, 0x05fc, 0x00000001); | ||
114 | nv_wo32(ctx, 0x0604, 0x00004000); | ||
115 | nv_wo32(ctx, 0x0610, 0x00000001); | ||
116 | nv_wo32(ctx, 0x0618, 0x00040000); | ||
117 | nv_wo32(ctx, 0x061c, 0x00010000); | ||
118 | for (i = 0x1c1c; i <= 0x248c; i += 16) { | ||
119 | nv_wo32(ctx, (i + 0), 0x10700ff9); | ||
120 | nv_wo32(ctx, (i + 4), 0x0436086c); | ||
121 | nv_wo32(ctx, (i + 8), 0x000c001b); | ||
122 | } | ||
123 | nv_wo32(ctx, 0x281c, 0x3f800000); | ||
124 | nv_wo32(ctx, 0x2830, 0x3f800000); | ||
125 | nv_wo32(ctx, 0x285c, 0x40000000); | ||
126 | nv_wo32(ctx, 0x2860, 0x3f800000); | ||
127 | nv_wo32(ctx, 0x2864, 0x3f000000); | ||
128 | nv_wo32(ctx, 0x286c, 0x40000000); | ||
129 | nv_wo32(ctx, 0x2870, 0x3f800000); | ||
130 | nv_wo32(ctx, 0x2878, 0xbf800000); | ||
131 | nv_wo32(ctx, 0x2880, 0xbf800000); | ||
132 | nv_wo32(ctx, 0x34a4, 0x000fe000); | ||
133 | nv_wo32(ctx, 0x3530, 0x000003f8); | ||
134 | nv_wo32(ctx, 0x3540, 0x002fe000); | ||
135 | for (i = 0x355c; i <= 0x3578; i += 4) | ||
136 | nv_wo32(ctx, i, 0x001c527c); | ||
137 | } | ||
138 | |||
139 | static void | ||
140 | nv25_graph_context_init(struct nouveau_gpuobj *ctx) | ||
141 | { | ||
142 | int i; | ||
143 | |||
144 | nv_wo32(ctx, 0x035c, 0xffff0000); | ||
145 | nv_wo32(ctx, 0x03c0, 0x0fff0000); | ||
146 | nv_wo32(ctx, 0x03c4, 0x0fff0000); | ||
147 | nv_wo32(ctx, 0x049c, 0x00000101); | ||
148 | nv_wo32(ctx, 0x04b0, 0x00000111); | ||
149 | nv_wo32(ctx, 0x04c8, 0x00000080); | ||
150 | nv_wo32(ctx, 0x04cc, 0xffff0000); | ||
151 | nv_wo32(ctx, 0x04d0, 0x00000001); | ||
152 | nv_wo32(ctx, 0x04e4, 0x44400000); | ||
153 | nv_wo32(ctx, 0x04fc, 0x4b800000); | ||
154 | for (i = 0x0510; i <= 0x051c; i += 4) | ||
155 | nv_wo32(ctx, i, 0x00030303); | ||
156 | for (i = 0x0530; i <= 0x053c; i += 4) | ||
157 | nv_wo32(ctx, i, 0x00080000); | ||
158 | for (i = 0x0548; i <= 0x0554; i += 4) | ||
159 | nv_wo32(ctx, i, 0x01012000); | ||
160 | for (i = 0x0558; i <= 0x0564; i += 4) | ||
161 | nv_wo32(ctx, i, 0x000105b8); | ||
162 | for (i = 0x0568; i <= 0x0574; i += 4) | ||
163 | nv_wo32(ctx, i, 0x00080008); | ||
164 | for (i = 0x0598; i <= 0x05d4; i += 4) | ||
165 | nv_wo32(ctx, i, 0x07ff0000); | ||
166 | nv_wo32(ctx, 0x05e0, 0x4b7fffff); | ||
167 | nv_wo32(ctx, 0x0620, 0x00000080); | ||
168 | nv_wo32(ctx, 0x0624, 0x30201000); | ||
169 | nv_wo32(ctx, 0x0628, 0x70605040); | ||
170 | nv_wo32(ctx, 0x062c, 0xb0a09080); | ||
171 | nv_wo32(ctx, 0x0630, 0xf0e0d0c0); | ||
172 | nv_wo32(ctx, 0x0664, 0x00000001); | ||
173 | nv_wo32(ctx, 0x066c, 0x00004000); | ||
174 | nv_wo32(ctx, 0x0678, 0x00000001); | ||
175 | nv_wo32(ctx, 0x0680, 0x00040000); | ||
176 | nv_wo32(ctx, 0x0684, 0x00010000); | ||
177 | for (i = 0x1b04; i <= 0x2374; i += 16) { | ||
178 | nv_wo32(ctx, (i + 0), 0x10700ff9); | ||
179 | nv_wo32(ctx, (i + 4), 0x0436086c); | ||
180 | nv_wo32(ctx, (i + 8), 0x000c001b); | ||
181 | } | ||
182 | nv_wo32(ctx, 0x2704, 0x3f800000); | ||
183 | nv_wo32(ctx, 0x2718, 0x3f800000); | ||
184 | nv_wo32(ctx, 0x2744, 0x40000000); | ||
185 | nv_wo32(ctx, 0x2748, 0x3f800000); | ||
186 | nv_wo32(ctx, 0x274c, 0x3f000000); | ||
187 | nv_wo32(ctx, 0x2754, 0x40000000); | ||
188 | nv_wo32(ctx, 0x2758, 0x3f800000); | ||
189 | nv_wo32(ctx, 0x2760, 0xbf800000); | ||
190 | nv_wo32(ctx, 0x2768, 0xbf800000); | ||
191 | nv_wo32(ctx, 0x308c, 0x000fe000); | ||
192 | nv_wo32(ctx, 0x3108, 0x000003f8); | ||
193 | nv_wo32(ctx, 0x3468, 0x002fe000); | ||
194 | for (i = 0x3484; i <= 0x34a0; i += 4) | ||
195 | nv_wo32(ctx, i, 0x001c527c); | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | nv2a_graph_context_init(struct nouveau_gpuobj *ctx) | ||
200 | { | ||
201 | int i; | ||
202 | |||
203 | nv_wo32(ctx, 0x033c, 0xffff0000); | ||
204 | nv_wo32(ctx, 0x03a0, 0x0fff0000); | ||
205 | nv_wo32(ctx, 0x03a4, 0x0fff0000); | ||
206 | nv_wo32(ctx, 0x047c, 0x00000101); | ||
207 | nv_wo32(ctx, 0x0490, 0x00000111); | ||
208 | nv_wo32(ctx, 0x04a8, 0x44400000); | ||
209 | for (i = 0x04d4; i <= 0x04e0; i += 4) | ||
210 | nv_wo32(ctx, i, 0x00030303); | ||
211 | for (i = 0x04f4; i <= 0x0500; i += 4) | ||
212 | nv_wo32(ctx, i, 0x00080000); | ||
213 | for (i = 0x050c; i <= 0x0518; i += 4) | ||
214 | nv_wo32(ctx, i, 0x01012000); | ||
215 | for (i = 0x051c; i <= 0x0528; i += 4) | ||
216 | nv_wo32(ctx, i, 0x000105b8); | ||
217 | for (i = 0x052c; i <= 0x0538; i += 4) | ||
218 | nv_wo32(ctx, i, 0x00080008); | ||
219 | for (i = 0x055c; i <= 0x0598; i += 4) | ||
220 | nv_wo32(ctx, i, 0x07ff0000); | ||
221 | nv_wo32(ctx, 0x05a4, 0x4b7fffff); | ||
222 | nv_wo32(ctx, 0x05fc, 0x00000001); | ||
223 | nv_wo32(ctx, 0x0604, 0x00004000); | ||
224 | nv_wo32(ctx, 0x0610, 0x00000001); | ||
225 | nv_wo32(ctx, 0x0618, 0x00040000); | ||
226 | nv_wo32(ctx, 0x061c, 0x00010000); | ||
227 | for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ | ||
228 | nv_wo32(ctx, (i + 0), 0x10700ff9); | ||
229 | nv_wo32(ctx, (i + 4), 0x0436086c); | ||
230 | nv_wo32(ctx, (i + 8), 0x000c001b); | ||
231 | } | ||
232 | nv_wo32(ctx, 0x269c, 0x3f800000); | ||
233 | nv_wo32(ctx, 0x26b0, 0x3f800000); | ||
234 | nv_wo32(ctx, 0x26dc, 0x40000000); | ||
235 | nv_wo32(ctx, 0x26e0, 0x3f800000); | ||
236 | nv_wo32(ctx, 0x26e4, 0x3f000000); | ||
237 | nv_wo32(ctx, 0x26ec, 0x40000000); | ||
238 | nv_wo32(ctx, 0x26f0, 0x3f800000); | ||
239 | nv_wo32(ctx, 0x26f8, 0xbf800000); | ||
240 | nv_wo32(ctx, 0x2700, 0xbf800000); | ||
241 | nv_wo32(ctx, 0x3024, 0x000fe000); | ||
242 | nv_wo32(ctx, 0x30a0, 0x000003f8); | ||
243 | nv_wo32(ctx, 0x33fc, 0x002fe000); | ||
244 | for (i = 0x341c; i <= 0x3438; i += 4) | ||
245 | nv_wo32(ctx, i, 0x001c527c); | ||
246 | } | ||
247 | |||
248 | static void | ||
249 | nv30_31_graph_context_init(struct nouveau_gpuobj *ctx) | ||
250 | { | ||
251 | int i; | ||
252 | |||
253 | nv_wo32(ctx, 0x0410, 0x00000101); | ||
254 | nv_wo32(ctx, 0x0424, 0x00000111); | ||
255 | nv_wo32(ctx, 0x0428, 0x00000060); | ||
256 | nv_wo32(ctx, 0x0444, 0x00000080); | ||
257 | nv_wo32(ctx, 0x0448, 0xffff0000); | ||
258 | nv_wo32(ctx, 0x044c, 0x00000001); | ||
259 | nv_wo32(ctx, 0x0460, 0x44400000); | ||
260 | nv_wo32(ctx, 0x048c, 0xffff0000); | ||
261 | for (i = 0x04e0; i < 0x04e8; i += 4) | ||
262 | nv_wo32(ctx, i, 0x0fff0000); | ||
263 | nv_wo32(ctx, 0x04ec, 0x00011100); | ||
264 | for (i = 0x0508; i < 0x0548; i += 4) | ||
265 | nv_wo32(ctx, i, 0x07ff0000); | ||
266 | nv_wo32(ctx, 0x0550, 0x4b7fffff); | ||
267 | nv_wo32(ctx, 0x058c, 0x00000080); | ||
268 | nv_wo32(ctx, 0x0590, 0x30201000); | ||
269 | nv_wo32(ctx, 0x0594, 0x70605040); | ||
270 | nv_wo32(ctx, 0x0598, 0xb8a89888); | ||
271 | nv_wo32(ctx, 0x059c, 0xf8e8d8c8); | ||
272 | nv_wo32(ctx, 0x05b0, 0xb0000000); | ||
273 | for (i = 0x0600; i < 0x0640; i += 4) | ||
274 | nv_wo32(ctx, i, 0x00010588); | ||
275 | for (i = 0x0640; i < 0x0680; i += 4) | ||
276 | nv_wo32(ctx, i, 0x00030303); | ||
277 | for (i = 0x06c0; i < 0x0700; i += 4) | ||
278 | nv_wo32(ctx, i, 0x0008aae4); | ||
279 | for (i = 0x0700; i < 0x0740; i += 4) | ||
280 | nv_wo32(ctx, i, 0x01012000); | ||
281 | for (i = 0x0740; i < 0x0780; i += 4) | ||
282 | nv_wo32(ctx, i, 0x00080008); | ||
283 | nv_wo32(ctx, 0x085c, 0x00040000); | ||
284 | nv_wo32(ctx, 0x0860, 0x00010000); | ||
285 | for (i = 0x0864; i < 0x0874; i += 4) | ||
286 | nv_wo32(ctx, i, 0x00040004); | ||
287 | for (i = 0x1f18; i <= 0x3088 ; i += 16) { | ||
288 | nv_wo32(ctx, i + 0, 0x10700ff9); | ||
289 | nv_wo32(ctx, i + 1, 0x0436086c); | ||
290 | nv_wo32(ctx, i + 2, 0x000c001b); | ||
291 | } | ||
292 | for (i = 0x30b8; i < 0x30c8; i += 4) | ||
293 | nv_wo32(ctx, i, 0x0000ffff); | ||
294 | nv_wo32(ctx, 0x344c, 0x3f800000); | ||
295 | nv_wo32(ctx, 0x3808, 0x3f800000); | ||
296 | nv_wo32(ctx, 0x381c, 0x3f800000); | ||
297 | nv_wo32(ctx, 0x3848, 0x40000000); | ||
298 | nv_wo32(ctx, 0x384c, 0x3f800000); | ||
299 | nv_wo32(ctx, 0x3850, 0x3f000000); | ||
300 | nv_wo32(ctx, 0x3858, 0x40000000); | ||
301 | nv_wo32(ctx, 0x385c, 0x3f800000); | ||
302 | nv_wo32(ctx, 0x3864, 0xbf800000); | ||
303 | nv_wo32(ctx, 0x386c, 0xbf800000); | ||
304 | } | ||
305 | |||
306 | static void | ||
307 | nv34_graph_context_init(struct nouveau_gpuobj *ctx) | ||
308 | { | ||
309 | int i; | ||
310 | |||
311 | nv_wo32(ctx, 0x040c, 0x01000101); | ||
312 | nv_wo32(ctx, 0x0420, 0x00000111); | ||
313 | nv_wo32(ctx, 0x0424, 0x00000060); | ||
314 | nv_wo32(ctx, 0x0440, 0x00000080); | ||
315 | nv_wo32(ctx, 0x0444, 0xffff0000); | ||
316 | nv_wo32(ctx, 0x0448, 0x00000001); | ||
317 | nv_wo32(ctx, 0x045c, 0x44400000); | ||
318 | nv_wo32(ctx, 0x0480, 0xffff0000); | ||
319 | for (i = 0x04d4; i < 0x04dc; i += 4) | ||
320 | nv_wo32(ctx, i, 0x0fff0000); | ||
321 | nv_wo32(ctx, 0x04e0, 0x00011100); | ||
322 | for (i = 0x04fc; i < 0x053c; i += 4) | ||
323 | nv_wo32(ctx, i, 0x07ff0000); | ||
324 | nv_wo32(ctx, 0x0544, 0x4b7fffff); | ||
325 | nv_wo32(ctx, 0x057c, 0x00000080); | ||
326 | nv_wo32(ctx, 0x0580, 0x30201000); | ||
327 | nv_wo32(ctx, 0x0584, 0x70605040); | ||
328 | nv_wo32(ctx, 0x0588, 0xb8a89888); | ||
329 | nv_wo32(ctx, 0x058c, 0xf8e8d8c8); | ||
330 | nv_wo32(ctx, 0x05a0, 0xb0000000); | ||
331 | for (i = 0x05f0; i < 0x0630; i += 4) | ||
332 | nv_wo32(ctx, i, 0x00010588); | ||
333 | for (i = 0x0630; i < 0x0670; i += 4) | ||
334 | nv_wo32(ctx, i, 0x00030303); | ||
335 | for (i = 0x06b0; i < 0x06f0; i += 4) | ||
336 | nv_wo32(ctx, i, 0x0008aae4); | ||
337 | for (i = 0x06f0; i < 0x0730; i += 4) | ||
338 | nv_wo32(ctx, i, 0x01012000); | ||
339 | for (i = 0x0730; i < 0x0770; i += 4) | ||
340 | nv_wo32(ctx, i, 0x00080008); | ||
341 | nv_wo32(ctx, 0x0850, 0x00040000); | ||
342 | nv_wo32(ctx, 0x0854, 0x00010000); | ||
343 | for (i = 0x0858; i < 0x0868; i += 4) | ||
344 | nv_wo32(ctx, i, 0x00040004); | ||
345 | for (i = 0x15ac; i <= 0x271c ; i += 16) { | ||
346 | nv_wo32(ctx, i + 0, 0x10700ff9); | ||
347 | nv_wo32(ctx, i + 1, 0x0436086c); | ||
348 | nv_wo32(ctx, i + 2, 0x000c001b); | ||
349 | } | ||
350 | for (i = 0x274c; i < 0x275c; i += 4) | ||
351 | nv_wo32(ctx, i, 0x0000ffff); | ||
352 | nv_wo32(ctx, 0x2ae0, 0x3f800000); | ||
353 | nv_wo32(ctx, 0x2e9c, 0x3f800000); | ||
354 | nv_wo32(ctx, 0x2eb0, 0x3f800000); | ||
355 | nv_wo32(ctx, 0x2edc, 0x40000000); | ||
356 | nv_wo32(ctx, 0x2ee0, 0x3f800000); | ||
357 | nv_wo32(ctx, 0x2ee4, 0x3f000000); | ||
358 | nv_wo32(ctx, 0x2eec, 0x40000000); | ||
359 | nv_wo32(ctx, 0x2ef0, 0x3f800000); | ||
360 | nv_wo32(ctx, 0x2ef8, 0xbf800000); | ||
361 | nv_wo32(ctx, 0x2f00, 0xbf800000); | ||
362 | } | ||
363 | |||
364 | static void | ||
365 | nv35_36_graph_context_init(struct nouveau_gpuobj *ctx) | ||
366 | { | ||
367 | int i; | ||
368 | |||
369 | nv_wo32(ctx, 0x040c, 0x00000101); | ||
370 | nv_wo32(ctx, 0x0420, 0x00000111); | ||
371 | nv_wo32(ctx, 0x0424, 0x00000060); | ||
372 | nv_wo32(ctx, 0x0440, 0x00000080); | ||
373 | nv_wo32(ctx, 0x0444, 0xffff0000); | ||
374 | nv_wo32(ctx, 0x0448, 0x00000001); | ||
375 | nv_wo32(ctx, 0x045c, 0x44400000); | ||
376 | nv_wo32(ctx, 0x0488, 0xffff0000); | ||
377 | for (i = 0x04dc; i < 0x04e4; i += 4) | ||
378 | nv_wo32(ctx, i, 0x0fff0000); | ||
379 | nv_wo32(ctx, 0x04e8, 0x00011100); | ||
380 | for (i = 0x0504; i < 0x0544; i += 4) | ||
381 | nv_wo32(ctx, i, 0x07ff0000); | ||
382 | nv_wo32(ctx, 0x054c, 0x4b7fffff); | ||
383 | nv_wo32(ctx, 0x0588, 0x00000080); | ||
384 | nv_wo32(ctx, 0x058c, 0x30201000); | ||
385 | nv_wo32(ctx, 0x0590, 0x70605040); | ||
386 | nv_wo32(ctx, 0x0594, 0xb8a89888); | ||
387 | nv_wo32(ctx, 0x0598, 0xf8e8d8c8); | ||
388 | nv_wo32(ctx, 0x05ac, 0xb0000000); | ||
389 | for (i = 0x0604; i < 0x0644; i += 4) | ||
390 | nv_wo32(ctx, i, 0x00010588); | ||
391 | for (i = 0x0644; i < 0x0684; i += 4) | ||
392 | nv_wo32(ctx, i, 0x00030303); | ||
393 | for (i = 0x06c4; i < 0x0704; i += 4) | ||
394 | nv_wo32(ctx, i, 0x0008aae4); | ||
395 | for (i = 0x0704; i < 0x0744; i += 4) | ||
396 | nv_wo32(ctx, i, 0x01012000); | ||
397 | for (i = 0x0744; i < 0x0784; i += 4) | ||
398 | nv_wo32(ctx, i, 0x00080008); | ||
399 | nv_wo32(ctx, 0x0860, 0x00040000); | ||
400 | nv_wo32(ctx, 0x0864, 0x00010000); | ||
401 | for (i = 0x0868; i < 0x0878; i += 4) | ||
402 | nv_wo32(ctx, i, 0x00040004); | ||
403 | for (i = 0x1f1c; i <= 0x308c ; i += 16) { | ||
404 | nv_wo32(ctx, i + 0, 0x10700ff9); | ||
405 | nv_wo32(ctx, i + 4, 0x0436086c); | ||
406 | nv_wo32(ctx, i + 8, 0x000c001b); | ||
407 | } | ||
408 | for (i = 0x30bc; i < 0x30cc; i += 4) | ||
409 | nv_wo32(ctx, i, 0x0000ffff); | ||
410 | nv_wo32(ctx, 0x3450, 0x3f800000); | ||
411 | nv_wo32(ctx, 0x380c, 0x3f800000); | ||
412 | nv_wo32(ctx, 0x3820, 0x3f800000); | ||
413 | nv_wo32(ctx, 0x384c, 0x40000000); | ||
414 | nv_wo32(ctx, 0x3850, 0x3f800000); | ||
415 | nv_wo32(ctx, 0x3854, 0x3f000000); | ||
416 | nv_wo32(ctx, 0x385c, 0x40000000); | ||
417 | nv_wo32(ctx, 0x3860, 0x3f800000); | ||
418 | nv_wo32(ctx, 0x3868, 0xbf800000); | ||
419 | nv_wo32(ctx, 0x3870, 0xbf800000); | ||
420 | } | ||
421 | |||
422 | int | ||
423 | nv20_graph_context_new(struct nouveau_channel *chan, int engine) | ||
424 | { | ||
425 | struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); | ||
426 | struct nouveau_gpuobj *grctx = NULL; | ||
427 | struct drm_device *dev = chan->dev; | ||
428 | int ret; | ||
429 | |||
430 | ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, | ||
431 | NVOBJ_FLAG_ZERO_ALLOC, &grctx); | ||
432 | if (ret) | ||
433 | return ret; | ||
434 | |||
435 | /* Initialise default context values */ | ||
436 | pgraph->grctx_init(grctx); | ||
437 | |||
438 | /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ | ||
439 | /* CTX_USER */ | ||
440 | nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1); | ||
441 | |||
442 | nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4); | ||
443 | chan->engctx[engine] = grctx; | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | void | ||
448 | nv20_graph_context_del(struct nouveau_channel *chan, int engine) | ||
449 | { | ||
450 | struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); | ||
451 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; | ||
452 | struct drm_device *dev = chan->dev; | ||
453 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
454 | unsigned long flags; | ||
455 | |||
456 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
457 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
458 | |||
459 | /* Unload the context if it's the currently active one */ | ||
460 | if (nv10_graph_channel(dev) == chan) | ||
461 | nv20_graph_unload_context(dev); | ||
462 | |||
463 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
464 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
465 | |||
466 | /* Free the context resources */ | ||
467 | nv_wo32(pgraph->ctxtab, chan->id * 4, 0); | ||
468 | |||
469 | nouveau_gpuobj_ref(NULL, &grctx); | ||
470 | chan->engctx[engine] = NULL; | ||
471 | } | ||
472 | |||
473 | static void | ||
474 | nv20_graph_set_tile_region(struct drm_device *dev, int i) | ||
475 | { | ||
476 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
477 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
478 | |||
479 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
480 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
481 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
482 | |||
483 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); | ||
484 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); | ||
485 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); | ||
486 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); | ||
487 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); | ||
488 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); | ||
489 | |||
490 | if (dev_priv->card_type == NV_20) { | ||
491 | nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); | ||
492 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); | ||
493 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); | ||
494 | } | ||
495 | } | ||
496 | |||
497 | int | ||
498 | nv20_graph_init(struct drm_device *dev, int engine) | ||
499 | { | ||
500 | struct nv20_graph_engine *pgraph = nv_engine(dev, engine); | ||
501 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
502 | uint32_t tmp, vramsz; | ||
503 | int i; | ||
504 | |||
505 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
506 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); | ||
507 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
508 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); | ||
509 | |||
510 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); | ||
511 | |||
512 | nv20_graph_rdi(dev); | ||
513 | |||
514 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | ||
515 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | ||
516 | |||
517 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); | ||
518 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); | ||
519 | nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700); | ||
520 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ | ||
521 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); | ||
522 | nv_wr32(dev, 0x40009C , 0x00000040); | ||
523 | |||
524 | if (dev_priv->chipset >= 0x25) { | ||
525 | nv_wr32(dev, 0x400890, 0x00a8cfff); | ||
526 | nv_wr32(dev, 0x400610, 0x304B1FB6); | ||
527 | nv_wr32(dev, 0x400B80, 0x1cbd3883); | ||
528 | nv_wr32(dev, 0x400B84, 0x44000000); | ||
529 | nv_wr32(dev, 0x400098, 0x40000080); | ||
530 | nv_wr32(dev, 0x400B88, 0x000000ff); | ||
531 | |||
532 | } else { | ||
533 | nv_wr32(dev, 0x400880, 0x0008c7df); | ||
534 | nv_wr32(dev, 0x400094, 0x00000005); | ||
535 | nv_wr32(dev, 0x400B80, 0x45eae20e); | ||
536 | nv_wr32(dev, 0x400B84, 0x24000000); | ||
537 | nv_wr32(dev, 0x400098, 0x00000040); | ||
538 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); | ||
539 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); | ||
540 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038); | ||
541 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); | ||
542 | } | ||
543 | |||
544 | /* Turn all the tiling regions off. */ | ||
545 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | ||
546 | nv20_graph_set_tile_region(dev, i); | ||
547 | |||
548 | nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); | ||
549 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); | ||
550 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); | ||
551 | |||
552 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | ||
553 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | ||
554 | |||
555 | tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00; | ||
556 | nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp); | ||
557 | tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100; | ||
558 | nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp); | ||
559 | |||
560 | /* begin RAM config */ | ||
561 | vramsz = pci_resource_len(dev->pdev, 0) - 1; | ||
562 | nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); | ||
563 | nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); | ||
564 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); | ||
565 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0)); | ||
566 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); | ||
567 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1)); | ||
568 | nv_wr32(dev, 0x400820, 0); | ||
569 | nv_wr32(dev, 0x400824, 0); | ||
570 | nv_wr32(dev, 0x400864, vramsz - 1); | ||
571 | nv_wr32(dev, 0x400868, vramsz - 1); | ||
572 | |||
573 | /* interesting.. the below overwrites some of the tile setup above.. */ | ||
574 | nv_wr32(dev, 0x400B20, 0x00000000); | ||
575 | nv_wr32(dev, 0x400B04, 0xFFFFFFFF); | ||
576 | |||
577 | nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); | ||
578 | nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); | ||
579 | nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); | ||
580 | nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); | ||
581 | |||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | int | ||
586 | nv30_graph_init(struct drm_device *dev, int engine) | ||
587 | { | ||
588 | struct nv20_graph_engine *pgraph = nv_engine(dev, engine); | ||
589 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
590 | int i; | ||
591 | |||
592 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
593 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); | ||
594 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
595 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); | ||
596 | |||
597 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4); | ||
598 | |||
599 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | ||
600 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | ||
601 | |||
602 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); | ||
603 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); | ||
604 | nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0); | ||
605 | nv_wr32(dev, 0x400890, 0x01b463ff); | ||
606 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475); | ||
607 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000); | ||
608 | nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); | ||
609 | nv_wr32(dev, 0x400B80, 0x1003d888); | ||
610 | nv_wr32(dev, 0x400B84, 0x0c000000); | ||
611 | nv_wr32(dev, 0x400098, 0x00000000); | ||
612 | nv_wr32(dev, 0x40009C, 0x0005ad00); | ||
613 | nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */ | ||
614 | nv_wr32(dev, 0x4000a0, 0x00000000); | ||
615 | nv_wr32(dev, 0x4000a4, 0x00000008); | ||
616 | nv_wr32(dev, 0x4008a8, 0xb784a400); | ||
617 | nv_wr32(dev, 0x400ba0, 0x002f8685); | ||
618 | nv_wr32(dev, 0x400ba4, 0x00231f3f); | ||
619 | nv_wr32(dev, 0x4008a4, 0x40000020); | ||
620 | |||
621 | if (dev_priv->chipset == 0x34) { | ||
622 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); | ||
623 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201); | ||
624 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008); | ||
625 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008); | ||
626 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); | ||
627 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032); | ||
628 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004); | ||
629 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002); | ||
630 | } | ||
631 | |||
632 | nv_wr32(dev, 0x4000c0, 0x00000016); | ||
633 | |||
634 | /* Turn all the tiling regions off. */ | ||
635 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | ||
636 | nv20_graph_set_tile_region(dev, i); | ||
637 | |||
638 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | ||
639 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | ||
640 | nv_wr32(dev, 0x0040075c , 0x00000001); | ||
641 | |||
642 | /* begin RAM config */ | ||
643 | /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */ | ||
644 | nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); | ||
645 | nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); | ||
646 | if (dev_priv->chipset != 0x34) { | ||
647 | nv_wr32(dev, 0x400750, 0x00EA0000); | ||
648 | nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0)); | ||
649 | nv_wr32(dev, 0x400750, 0x00EA0004); | ||
650 | nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1)); | ||
651 | } | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | int | ||
657 | nv20_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
658 | { | ||
659 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
660 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
661 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
662 | return -EBUSY; | ||
663 | } | ||
664 | nv20_graph_unload_context(dev); | ||
665 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | static void | ||
670 | nv20_graph_isr(struct drm_device *dev) | ||
671 | { | ||
672 | u32 stat; | ||
673 | |||
674 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
675 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
676 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
677 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
678 | u32 chid = (addr & 0x01f00000) >> 20; | ||
679 | u32 subc = (addr & 0x00070000) >> 16; | ||
680 | u32 mthd = (addr & 0x00001ffc); | ||
681 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
682 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; | ||
683 | u32 show = stat; | ||
684 | |||
685 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
686 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
687 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
688 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
689 | } | ||
690 | } | ||
691 | |||
692 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
693 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
694 | |||
695 | if (show && nouveau_ratelimit()) { | ||
696 | NV_INFO(dev, "PGRAPH -"); | ||
697 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
698 | printk(" nsource:"); | ||
699 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
700 | printk(" nstatus:"); | ||
701 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
702 | printk("\n"); | ||
703 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
704 | "mthd 0x%04x data 0x%08x\n", | ||
705 | chid, subc, class, mthd, data); | ||
706 | } | ||
707 | } | ||
708 | } | ||
709 | |||
710 | static void | ||
711 | nv20_graph_destroy(struct drm_device *dev, int engine) | ||
712 | { | ||
713 | struct nv20_graph_engine *pgraph = nv_engine(dev, engine); | ||
714 | |||
715 | nouveau_irq_unregister(dev, 12); | ||
716 | nouveau_gpuobj_ref(NULL, &pgraph->ctxtab); | ||
717 | |||
718 | NVOBJ_ENGINE_DEL(dev, GR); | ||
719 | kfree(pgraph); | ||
720 | } | ||
721 | |||
722 | int | ||
723 | nv20_graph_create(struct drm_device *dev) | ||
724 | { | ||
725 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
726 | struct nv20_graph_engine *pgraph; | ||
727 | int ret; | ||
728 | |||
729 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
730 | if (!pgraph) | ||
731 | return -ENOMEM; | ||
732 | |||
733 | pgraph->base.destroy = nv20_graph_destroy; | ||
734 | pgraph->base.fini = nv20_graph_fini; | ||
735 | pgraph->base.context_new = nv20_graph_context_new; | ||
736 | pgraph->base.context_del = nv20_graph_context_del; | ||
737 | pgraph->base.object_new = nv04_graph_object_new; | ||
738 | pgraph->base.set_tile_region = nv20_graph_set_tile_region; | ||
739 | |||
740 | pgraph->grctx_user = 0x0028; | ||
741 | if (dev_priv->card_type == NV_20) { | ||
742 | pgraph->base.init = nv20_graph_init; | ||
743 | switch (dev_priv->chipset) { | ||
744 | case 0x20: | ||
745 | pgraph->grctx_init = nv20_graph_context_init; | ||
746 | pgraph->grctx_size = NV20_GRCTX_SIZE; | ||
747 | pgraph->grctx_user = 0x0000; | ||
748 | break; | ||
749 | case 0x25: | ||
750 | case 0x28: | ||
751 | pgraph->grctx_init = nv25_graph_context_init; | ||
752 | pgraph->grctx_size = NV25_GRCTX_SIZE; | ||
753 | break; | ||
754 | case 0x2a: | ||
755 | pgraph->grctx_init = nv2a_graph_context_init; | ||
756 | pgraph->grctx_size = NV2A_GRCTX_SIZE; | ||
757 | pgraph->grctx_user = 0x0000; | ||
758 | break; | ||
759 | default: | ||
760 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); | ||
761 | kfree(pgraph); | ||
762 | return 0; | ||
763 | } | ||
764 | } else { | ||
765 | pgraph->base.init = nv30_graph_init; | ||
766 | switch (dev_priv->chipset) { | ||
767 | case 0x30: | ||
768 | case 0x31: | ||
769 | pgraph->grctx_init = nv30_31_graph_context_init; | ||
770 | pgraph->grctx_size = NV30_31_GRCTX_SIZE; | ||
771 | break; | ||
772 | case 0x34: | ||
773 | pgraph->grctx_init = nv34_graph_context_init; | ||
774 | pgraph->grctx_size = NV34_GRCTX_SIZE; | ||
775 | break; | ||
776 | case 0x35: | ||
777 | case 0x36: | ||
778 | pgraph->grctx_init = nv35_36_graph_context_init; | ||
779 | pgraph->grctx_size = NV35_36_GRCTX_SIZE; | ||
780 | break; | ||
781 | default: | ||
782 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); | ||
783 | kfree(pgraph); | ||
784 | return 0; | ||
785 | } | ||
786 | } | ||
787 | |||
788 | /* Create Context Pointer Table */ | ||
789 | ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, | ||
790 | &pgraph->ctxtab); | ||
791 | if (ret) { | ||
792 | kfree(pgraph); | ||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
797 | nouveau_irq_register(dev, 12, nv20_graph_isr); | ||
798 | |||
799 | /* nvsw */ | ||
800 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
801 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
802 | |||
803 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
804 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
805 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
806 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
807 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
808 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
809 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
810 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
811 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
812 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
813 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
814 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
815 | if (dev_priv->card_type == NV_20) { | ||
816 | NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ | ||
817 | NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ | ||
818 | |||
819 | /* kelvin */ | ||
820 | if (dev_priv->chipset < 0x25) | ||
821 | NVOBJ_CLASS(dev, 0x0097, GR); | ||
822 | else | ||
823 | NVOBJ_CLASS(dev, 0x0597, GR); | ||
824 | } else { | ||
825 | NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ | ||
826 | NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ | ||
827 | NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ | ||
828 | NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ | ||
829 | |||
830 | /* rankine */ | ||
831 | if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) | ||
832 | NVOBJ_CLASS(dev, 0x0397, GR); | ||
833 | else | ||
834 | if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) | ||
835 | NVOBJ_CLASS(dev, 0x0697, GR); | ||
836 | else | ||
837 | if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) | ||
838 | NVOBJ_CLASS(dev, 0x0497, GR); | ||
839 | } | ||
840 | |||
841 | return 0; | ||
842 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c new file mode 100644 index 00000000000..e0135f0e214 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv30_fb.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Francisco Jerez. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_drm.h" | ||
31 | |||
32 | void | ||
33 | nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | ||
34 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
35 | { | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
38 | |||
39 | tile->addr = addr | 1; | ||
40 | tile->limit = max(1u, addr + size) - 1; | ||
41 | tile->pitch = pitch; | ||
42 | } | ||
43 | |||
44 | void | ||
45 | nv30_fb_free_tile_region(struct drm_device *dev, int i) | ||
46 | { | ||
47 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
48 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
49 | |||
50 | tile->addr = tile->limit = tile->pitch = 0; | ||
51 | } | ||
52 | |||
53 | static int | ||
54 | calc_bias(struct drm_device *dev, int k, int i, int j) | ||
55 | { | ||
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
57 | int b = (dev_priv->chipset > 0x30 ? | ||
58 | nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) : | ||
59 | 0) & 0xf; | ||
60 | |||
61 | return 2 * (b & 0x8 ? b - 0x10 : b); | ||
62 | } | ||
63 | |||
64 | static int | ||
65 | calc_ref(struct drm_device *dev, int l, int k, int i) | ||
66 | { | ||
67 | int j, x = 0; | ||
68 | |||
69 | for (j = 0; j < 4; j++) { | ||
70 | int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j); | ||
71 | |||
72 | x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j); | ||
73 | } | ||
74 | |||
75 | return x; | ||
76 | } | ||
77 | |||
78 | int | ||
79 | nv30_fb_init(struct drm_device *dev) | ||
80 | { | ||
81 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
82 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
83 | int i, j; | ||
84 | |||
85 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
86 | |||
87 | /* Turn all the tiling regions off. */ | ||
88 | for (i = 0; i < pfb->num_tiles; i++) | ||
89 | pfb->set_tile_region(dev, i); | ||
90 | |||
91 | /* Init the memory timing regs at 0x10037c/0x1003ac */ | ||
92 | if (dev_priv->chipset == 0x30 || | ||
93 | dev_priv->chipset == 0x31 || | ||
94 | dev_priv->chipset == 0x35) { | ||
95 | /* Related to ROP count */ | ||
96 | int n = (dev_priv->chipset == 0x31 ? 2 : 4); | ||
97 | int l = nv_rd32(dev, 0x1003d0); | ||
98 | |||
99 | for (i = 0; i < n; i++) { | ||
100 | for (j = 0; j < 3; j++) | ||
101 | nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j, | ||
102 | calc_ref(dev, l, 0, j)); | ||
103 | |||
104 | for (j = 0; j < 2; j++) | ||
105 | nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j, | ||
106 | calc_ref(dev, l, 1, j)); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void | ||
114 | nv30_fb_takedown(struct drm_device *dev) | ||
115 | { | ||
116 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c new file mode 100644 index 00000000000..f0ac2a768c6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_fb.c | |||
@@ -0,0 +1,118 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | void | ||
7 | nv40_fb_set_tile_region(struct drm_device *dev, int i) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
11 | |||
12 | switch (dev_priv->chipset) { | ||
13 | case 0x40: | ||
14 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); | ||
15 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); | ||
16 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); | ||
17 | break; | ||
18 | |||
19 | default: | ||
20 | nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); | ||
21 | nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); | ||
22 | nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); | ||
23 | break; | ||
24 | } | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | nv40_fb_init_gart(struct drm_device *dev) | ||
29 | { | ||
30 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
31 | struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; | ||
32 | |||
33 | if (dev_priv->gart_info.type != NOUVEAU_GART_HW) { | ||
34 | nv_wr32(dev, 0x100800, 0x00000001); | ||
35 | return; | ||
36 | } | ||
37 | |||
38 | nv_wr32(dev, 0x100800, gart->pinst | 0x00000002); | ||
39 | nv_mask(dev, 0x10008c, 0x00000100, 0x00000100); | ||
40 | nv_wr32(dev, 0x100820, 0x00000000); | ||
41 | } | ||
42 | |||
43 | static void | ||
44 | nv44_fb_init_gart(struct drm_device *dev) | ||
45 | { | ||
46 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
47 | struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; | ||
48 | u32 vinst; | ||
49 | |||
50 | if (dev_priv->gart_info.type != NOUVEAU_GART_HW) { | ||
51 | nv_wr32(dev, 0x100850, 0x80000000); | ||
52 | nv_wr32(dev, 0x100800, 0x00000001); | ||
53 | return; | ||
54 | } | ||
55 | |||
56 | /* calculate vram address of this PRAMIN block, object | ||
57 | * must be allocated on 512KiB alignment, and not exceed | ||
58 | * a total size of 512KiB for this to work correctly | ||
59 | */ | ||
60 | vinst = nv_rd32(dev, 0x10020c); | ||
61 | vinst -= ((gart->pinst >> 19) + 1) << 19; | ||
62 | |||
63 | nv_wr32(dev, 0x100850, 0x80000000); | ||
64 | nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr); | ||
65 | |||
66 | nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size); | ||
67 | nv_wr32(dev, 0x100850, 0x00008000); | ||
68 | nv_mask(dev, 0x10008c, 0x00000200, 0x00000200); | ||
69 | nv_wr32(dev, 0x100820, 0x00000000); | ||
70 | nv_wr32(dev, 0x10082c, 0x00000001); | ||
71 | nv_wr32(dev, 0x100800, vinst | 0x00000010); | ||
72 | } | ||
73 | |||
74 | int | ||
75 | nv40_fb_init(struct drm_device *dev) | ||
76 | { | ||
77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
78 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
79 | uint32_t tmp; | ||
80 | int i; | ||
81 | |||
82 | if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { | ||
83 | if (nv44_graph_class(dev)) | ||
84 | nv44_fb_init_gart(dev); | ||
85 | else | ||
86 | nv40_fb_init_gart(dev); | ||
87 | } | ||
88 | |||
89 | switch (dev_priv->chipset) { | ||
90 | case 0x40: | ||
91 | case 0x45: | ||
92 | tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); | ||
93 | nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); | ||
94 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
95 | break; | ||
96 | case 0x46: /* G72 */ | ||
97 | case 0x47: /* G70 */ | ||
98 | case 0x49: /* G71 */ | ||
99 | case 0x4b: /* G73 */ | ||
100 | case 0x4c: /* C51 (G7X version) */ | ||
101 | pfb->num_tiles = NV40_PFB_TILE__SIZE_1; | ||
102 | break; | ||
103 | default: | ||
104 | pfb->num_tiles = NV40_PFB_TILE__SIZE_0; | ||
105 | break; | ||
106 | } | ||
107 | |||
108 | /* Turn all the tiling regions off. */ | ||
109 | for (i = 0; i < pfb->num_tiles; i++) | ||
110 | pfb->set_tile_region(dev, i); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | void | ||
116 | nv40_fb_takedown(struct drm_device *dev) | ||
117 | { | ||
118 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c new file mode 100644 index 00000000000..68cb2d991c8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "nouveau_drv.h" | ||
29 | #include "nouveau_drm.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | |||
32 | #define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) | ||
33 | #define NV40_RAMFC__SIZE 128 | ||
34 | |||
35 | int | ||
36 | nv40_fifo_create_context(struct nouveau_channel *chan) | ||
37 | { | ||
38 | struct drm_device *dev = chan->dev; | ||
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
40 | uint32_t fc = NV40_RAMFC(chan->id); | ||
41 | unsigned long flags; | ||
42 | int ret; | ||
43 | |||
44 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, | ||
45 | NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | ||
46 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | ||
47 | if (ret) | ||
48 | return ret; | ||
49 | |||
50 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
51 | NV40_USER(chan->id), PAGE_SIZE); | ||
52 | if (!chan->user) | ||
53 | return -ENOMEM; | ||
54 | |||
55 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
56 | |||
57 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | ||
58 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | ||
59 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); | ||
60 | nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
61 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
62 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | ||
63 | #ifdef __BIG_ENDIAN | ||
64 | NV_PFIFO_CACHE1_BIG_ENDIAN | | ||
65 | #endif | ||
66 | 0x30000000 /* no idea.. */); | ||
67 | nv_wi32(dev, fc + 60, 0x0001FFFF); | ||
68 | |||
69 | /* enable the fifo dma operation */ | ||
70 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
71 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | ||
72 | |||
73 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | nv40_fifo_do_load_context(struct drm_device *dev, int chid) | ||
79 | { | ||
80 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
81 | uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; | ||
82 | |||
83 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
84 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
85 | nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); | ||
86 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12)); | ||
87 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16)); | ||
88 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20)); | ||
89 | |||
90 | /* No idea what 0x2058 is.. */ | ||
91 | tmp = nv_ri32(dev, fc + 24); | ||
92 | tmp2 = nv_rd32(dev, 0x2058) & 0xFFF; | ||
93 | tmp2 |= (tmp & 0x30000000); | ||
94 | nv_wr32(dev, 0x2058, tmp2); | ||
95 | tmp &= ~0x30000000; | ||
96 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp); | ||
97 | |||
98 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); | ||
99 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); | ||
100 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36)); | ||
101 | tmp = nv_ri32(dev, fc + 40); | ||
102 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); | ||
103 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44)); | ||
104 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48)); | ||
105 | nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52)); | ||
106 | nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56)); | ||
107 | |||
108 | /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ | ||
109 | tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; | ||
110 | tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; | ||
111 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); | ||
112 | |||
113 | nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64)); | ||
114 | /* NVIDIA does this next line twice... */ | ||
115 | nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); | ||
116 | nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); | ||
117 | nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); | ||
118 | nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84)); | ||
119 | |||
120 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
121 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
122 | } | ||
123 | |||
124 | int | ||
125 | nv40_fifo_load_context(struct nouveau_channel *chan) | ||
126 | { | ||
127 | struct drm_device *dev = chan->dev; | ||
128 | uint32_t tmp; | ||
129 | |||
130 | nv40_fifo_do_load_context(dev, chan->id); | ||
131 | |||
132 | /* Set channel active, and in DMA mode */ | ||
133 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
134 | NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
135 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
136 | |||
137 | /* Reset DMA_CTL_AT_INFO to INVALID */ | ||
138 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
139 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | int | ||
145 | nv40_fifo_unload_context(struct drm_device *dev) | ||
146 | { | ||
147 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
148 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
149 | uint32_t fc, tmp; | ||
150 | int chid; | ||
151 | |||
152 | chid = pfifo->channel_id(dev); | ||
153 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | ||
154 | return 0; | ||
155 | fc = NV40_RAMFC(chid); | ||
156 | |||
157 | nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
158 | nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
159 | nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); | ||
160 | nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE)); | ||
161 | nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT)); | ||
162 | nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
163 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH); | ||
164 | tmp |= nv_rd32(dev, 0x2058) & 0x30000000; | ||
165 | nv_wi32(dev, fc + 24, tmp); | ||
166 | nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
167 | nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
168 | nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); | ||
169 | tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); | ||
170 | nv_wi32(dev, fc + 40, tmp); | ||
171 | nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); | ||
172 | nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); | ||
173 | /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something | ||
174 | * more involved depending on the value of 0x3228? | ||
175 | */ | ||
176 | nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
177 | nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE)); | ||
178 | nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff); | ||
179 | /* No idea what the below is for exactly, ripped from a mmio-trace */ | ||
180 | nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4)); | ||
181 | /* NVIDIA do this next line twice.. bug? */ | ||
182 | nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8)); | ||
183 | nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088)); | ||
184 | nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300)); | ||
185 | #if 0 /* no real idea which is PUT/GET in UNK_48.. */ | ||
186 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET); | ||
187 | tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); | ||
188 | nv_wi32(dev, fc + 72, tmp); | ||
189 | #endif | ||
190 | nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c)); | ||
191 | |||
192 | nv40_fifo_do_load_context(dev, pfifo->channels - 1); | ||
193 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
194 | NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1)); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | nv40_fifo_init_reset(struct drm_device *dev) | ||
200 | { | ||
201 | int i; | ||
202 | |||
203 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
204 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | ||
205 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
206 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
207 | |||
208 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
209 | nv_wr32(dev, 0x003210, 0x00000000); | ||
210 | nv_wr32(dev, 0x003270, 0x00000000); | ||
211 | nv_wr32(dev, 0x003240, 0x00000000); | ||
212 | nv_wr32(dev, 0x003244, 0x00000000); | ||
213 | nv_wr32(dev, 0x003258, 0x00000000); | ||
214 | nv_wr32(dev, 0x002504, 0x00000000); | ||
215 | for (i = 0; i < 16; i++) | ||
216 | nv_wr32(dev, 0x002510 + (i * 4), 0x00000000); | ||
217 | nv_wr32(dev, 0x00250c, 0x0000ffff); | ||
218 | nv_wr32(dev, 0x002048, 0x00000000); | ||
219 | nv_wr32(dev, 0x003228, 0x00000000); | ||
220 | nv_wr32(dev, 0x0032e8, 0x00000000); | ||
221 | nv_wr32(dev, 0x002410, 0x00000000); | ||
222 | nv_wr32(dev, 0x002420, 0x00000000); | ||
223 | nv_wr32(dev, 0x002058, 0x00000001); | ||
224 | nv_wr32(dev, 0x00221c, 0x00000000); | ||
225 | /* something with 0x2084, read/modify/write, no change */ | ||
226 | nv_wr32(dev, 0x002040, 0x000000ff); | ||
227 | nv_wr32(dev, 0x002500, 0x00000000); | ||
228 | nv_wr32(dev, 0x003200, 0x00000000); | ||
229 | |||
230 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); | ||
231 | } | ||
232 | |||
233 | static void | ||
234 | nv40_fifo_init_ramxx(struct drm_device *dev) | ||
235 | { | ||
236 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
237 | |||
238 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | ||
239 | ((dev_priv->ramht->bits - 9) << 16) | | ||
240 | (dev_priv->ramht->gpuobj->pinst >> 8)); | ||
241 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | ||
242 | |||
243 | switch (dev_priv->chipset) { | ||
244 | case 0x47: | ||
245 | case 0x49: | ||
246 | case 0x4b: | ||
247 | nv_wr32(dev, 0x2230, 1); | ||
248 | break; | ||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | switch (dev_priv->chipset) { | ||
254 | case 0x40: | ||
255 | case 0x41: | ||
256 | case 0x42: | ||
257 | case 0x43: | ||
258 | case 0x45: | ||
259 | case 0x47: | ||
260 | case 0x48: | ||
261 | case 0x49: | ||
262 | case 0x4b: | ||
263 | nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002); | ||
264 | break; | ||
265 | default: | ||
266 | nv_wr32(dev, 0x2230, 0); | ||
267 | nv_wr32(dev, NV40_PFIFO_RAMFC, | ||
268 | ((dev_priv->vram_size - 512 * 1024 + | ||
269 | dev_priv->ramfc->pinst) >> 16) | (3 << 16)); | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static void | ||
275 | nv40_fifo_init_intr(struct drm_device *dev) | ||
276 | { | ||
277 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
278 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
279 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
280 | } | ||
281 | |||
282 | int | ||
283 | nv40_fifo_init(struct drm_device *dev) | ||
284 | { | ||
285 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
286 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
287 | int i; | ||
288 | |||
289 | nv40_fifo_init_reset(dev); | ||
290 | nv40_fifo_init_ramxx(dev); | ||
291 | |||
292 | nv40_fifo_do_load_context(dev, pfifo->channels - 1); | ||
293 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
294 | |||
295 | nv40_fifo_init_intr(dev); | ||
296 | pfifo->enable(dev); | ||
297 | pfifo->reassign(dev, true); | ||
298 | |||
299 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
300 | if (dev_priv->channels.ptr[i]) { | ||
301 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | ||
302 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | return 0; | ||
307 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c new file mode 100644 index 00000000000..ba14a93d8af --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -0,0 +1,490 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_grctx.h" | ||
31 | #include "nouveau_ramht.h" | ||
32 | |||
33 | struct nv40_graph_engine { | ||
34 | struct nouveau_exec_engine base; | ||
35 | u32 grctx_size; | ||
36 | }; | ||
37 | |||
38 | static int | ||
39 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) | ||
40 | { | ||
41 | struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); | ||
42 | struct drm_device *dev = chan->dev; | ||
43 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
44 | struct nouveau_gpuobj *grctx = NULL; | ||
45 | struct nouveau_grctx ctx = {}; | ||
46 | unsigned long flags; | ||
47 | int ret; | ||
48 | |||
49 | ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, | ||
50 | NVOBJ_FLAG_ZERO_ALLOC, &grctx); | ||
51 | if (ret) | ||
52 | return ret; | ||
53 | |||
54 | /* Initialise default context values */ | ||
55 | ctx.dev = chan->dev; | ||
56 | ctx.mode = NOUVEAU_GRCTX_VALS; | ||
57 | ctx.data = grctx; | ||
58 | nv40_grctx_init(&ctx); | ||
59 | |||
60 | nv_wo32(grctx, 0, grctx->vinst); | ||
61 | |||
62 | /* init grctx pointer in ramfc, and on PFIFO if channel is | ||
63 | * already active there | ||
64 | */ | ||
65 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
66 | nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); | ||
67 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
68 | if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) | ||
69 | nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); | ||
70 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
71 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
72 | |||
73 | chan->engctx[engine] = grctx; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | nv40_graph_context_del(struct nouveau_channel *chan, int engine) | ||
79 | { | ||
80 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; | ||
81 | struct drm_device *dev = chan->dev; | ||
82 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
83 | u32 inst = 0x01000000 | (grctx->pinst >> 4); | ||
84 | unsigned long flags; | ||
85 | |||
86 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
87 | nv_mask(dev, 0x400720, 0x00000000, 0x00000001); | ||
88 | if (nv_rd32(dev, 0x40032c) == inst) | ||
89 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); | ||
90 | if (nv_rd32(dev, 0x400330) == inst) | ||
91 | nv_mask(dev, 0x400330, 0x01000000, 0x00000000); | ||
92 | nv_mask(dev, 0x400720, 0x00000001, 0x00000001); | ||
93 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
94 | |||
95 | /* Free the context resources */ | ||
96 | nouveau_gpuobj_ref(NULL, &grctx); | ||
97 | chan->engctx[engine] = NULL; | ||
98 | } | ||
99 | |||
100 | int | ||
101 | nv40_graph_object_new(struct nouveau_channel *chan, int engine, | ||
102 | u32 handle, u16 class) | ||
103 | { | ||
104 | struct drm_device *dev = chan->dev; | ||
105 | struct nouveau_gpuobj *obj = NULL; | ||
106 | int ret; | ||
107 | |||
108 | ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
109 | if (ret) | ||
110 | return ret; | ||
111 | obj->engine = 1; | ||
112 | obj->class = class; | ||
113 | |||
114 | nv_wo32(obj, 0x00, class); | ||
115 | nv_wo32(obj, 0x04, 0x00000000); | ||
116 | #ifndef __BIG_ENDIAN | ||
117 | nv_wo32(obj, 0x08, 0x00000000); | ||
118 | #else | ||
119 | nv_wo32(obj, 0x08, 0x01000000); | ||
120 | #endif | ||
121 | nv_wo32(obj, 0x0c, 0x00000000); | ||
122 | nv_wo32(obj, 0x10, 0x00000000); | ||
123 | |||
124 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
125 | nouveau_gpuobj_ref(NULL, &obj); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | nv40_graph_set_tile_region(struct drm_device *dev, int i) | ||
131 | { | ||
132 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
133 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
134 | |||
135 | switch (dev_priv->chipset) { | ||
136 | case 0x40: | ||
137 | case 0x41: /* guess */ | ||
138 | case 0x42: | ||
139 | case 0x43: | ||
140 | case 0x45: /* guess */ | ||
141 | case 0x4e: | ||
142 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
143 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
144 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
145 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
146 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
147 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
148 | break; | ||
149 | case 0x44: | ||
150 | case 0x4a: | ||
151 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
152 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
153 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
154 | break; | ||
155 | case 0x46: | ||
156 | case 0x47: | ||
157 | case 0x49: | ||
158 | case 0x4b: | ||
159 | case 0x4c: | ||
160 | case 0x67: | ||
161 | default: | ||
162 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); | ||
163 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); | ||
164 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); | ||
165 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
166 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
167 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
168 | break; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * G70 0x47 | ||
174 | * G71 0x49 | ||
175 | * NV45 0x48 | ||
176 | * G72[M] 0x46 | ||
177 | * G73 0x4b | ||
178 | * C51_G7X 0x4c | ||
179 | * C51 0x4e | ||
180 | */ | ||
181 | int | ||
182 | nv40_graph_init(struct drm_device *dev, int engine) | ||
183 | { | ||
184 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); | ||
185 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
186 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
187 | struct nouveau_grctx ctx = {}; | ||
188 | uint32_t vramsz, *cp; | ||
189 | int i, j; | ||
190 | |||
191 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | ||
192 | ~NV_PMC_ENABLE_PGRAPH); | ||
193 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | ||
194 | NV_PMC_ENABLE_PGRAPH); | ||
195 | |||
196 | cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); | ||
197 | if (!cp) | ||
198 | return -ENOMEM; | ||
199 | |||
200 | ctx.dev = dev; | ||
201 | ctx.mode = NOUVEAU_GRCTX_PROG; | ||
202 | ctx.data = cp; | ||
203 | ctx.ctxprog_max = 256; | ||
204 | nv40_grctx_init(&ctx); | ||
205 | pgraph->grctx_size = ctx.ctxvals_pos * 4; | ||
206 | |||
207 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
208 | for (i = 0; i < ctx.ctxprog_len; i++) | ||
209 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); | ||
210 | |||
211 | kfree(cp); | ||
212 | |||
213 | /* No context present currently */ | ||
214 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | ||
215 | |||
216 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | ||
217 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); | ||
218 | |||
219 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); | ||
220 | nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); | ||
221 | nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0); | ||
222 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055); | ||
223 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000); | ||
224 | nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); | ||
225 | |||
226 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); | ||
227 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | ||
228 | |||
229 | j = nv_rd32(dev, 0x1540) & 0xff; | ||
230 | if (j) { | ||
231 | for (i = 0; !(j & 1); j >>= 1, i++) | ||
232 | ; | ||
233 | nv_wr32(dev, 0x405000, i); | ||
234 | } | ||
235 | |||
236 | if (dev_priv->chipset == 0x40) { | ||
237 | nv_wr32(dev, 0x4009b0, 0x83280fff); | ||
238 | nv_wr32(dev, 0x4009b4, 0x000000a0); | ||
239 | } else { | ||
240 | nv_wr32(dev, 0x400820, 0x83280eff); | ||
241 | nv_wr32(dev, 0x400824, 0x000000a0); | ||
242 | } | ||
243 | |||
244 | switch (dev_priv->chipset) { | ||
245 | case 0x40: | ||
246 | case 0x45: | ||
247 | nv_wr32(dev, 0x4009b8, 0x0078e366); | ||
248 | nv_wr32(dev, 0x4009bc, 0x0000014c); | ||
249 | break; | ||
250 | case 0x41: | ||
251 | case 0x42: /* pciid also 0x00Cx */ | ||
252 | /* case 0x0120: XXX (pciid) */ | ||
253 | nv_wr32(dev, 0x400828, 0x007596ff); | ||
254 | nv_wr32(dev, 0x40082c, 0x00000108); | ||
255 | break; | ||
256 | case 0x43: | ||
257 | nv_wr32(dev, 0x400828, 0x0072cb77); | ||
258 | nv_wr32(dev, 0x40082c, 0x00000108); | ||
259 | break; | ||
260 | case 0x44: | ||
261 | case 0x46: /* G72 */ | ||
262 | case 0x4a: | ||
263 | case 0x4c: /* G7x-based C51 */ | ||
264 | case 0x4e: | ||
265 | nv_wr32(dev, 0x400860, 0); | ||
266 | nv_wr32(dev, 0x400864, 0); | ||
267 | break; | ||
268 | case 0x47: /* G70 */ | ||
269 | case 0x49: /* G71 */ | ||
270 | case 0x4b: /* G73 */ | ||
271 | nv_wr32(dev, 0x400828, 0x07830610); | ||
272 | nv_wr32(dev, 0x40082c, 0x0000016A); | ||
273 | break; | ||
274 | default: | ||
275 | break; | ||
276 | } | ||
277 | |||
278 | nv_wr32(dev, 0x400b38, 0x2ffff800); | ||
279 | nv_wr32(dev, 0x400b3c, 0x00006000); | ||
280 | |||
281 | /* Tiling related stuff. */ | ||
282 | switch (dev_priv->chipset) { | ||
283 | case 0x44: | ||
284 | case 0x4a: | ||
285 | nv_wr32(dev, 0x400bc4, 0x1003d888); | ||
286 | nv_wr32(dev, 0x400bbc, 0xb7a7b500); | ||
287 | break; | ||
288 | case 0x46: | ||
289 | nv_wr32(dev, 0x400bc4, 0x0000e024); | ||
290 | nv_wr32(dev, 0x400bbc, 0xb7a7b520); | ||
291 | break; | ||
292 | case 0x4c: | ||
293 | case 0x4e: | ||
294 | case 0x67: | ||
295 | nv_wr32(dev, 0x400bc4, 0x1003d888); | ||
296 | nv_wr32(dev, 0x400bbc, 0xb7a7b540); | ||
297 | break; | ||
298 | default: | ||
299 | break; | ||
300 | } | ||
301 | |||
302 | /* Turn all the tiling regions off. */ | ||
303 | for (i = 0; i < pfb->num_tiles; i++) | ||
304 | nv40_graph_set_tile_region(dev, i); | ||
305 | |||
306 | /* begin RAM config */ | ||
307 | vramsz = pci_resource_len(dev->pdev, 0) - 1; | ||
308 | switch (dev_priv->chipset) { | ||
309 | case 0x40: | ||
310 | nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); | ||
311 | nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); | ||
312 | nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0)); | ||
313 | nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1)); | ||
314 | nv_wr32(dev, 0x400820, 0); | ||
315 | nv_wr32(dev, 0x400824, 0); | ||
316 | nv_wr32(dev, 0x400864, vramsz); | ||
317 | nv_wr32(dev, 0x400868, vramsz); | ||
318 | break; | ||
319 | default: | ||
320 | switch (dev_priv->chipset) { | ||
321 | case 0x41: | ||
322 | case 0x42: | ||
323 | case 0x43: | ||
324 | case 0x45: | ||
325 | case 0x4e: | ||
326 | case 0x44: | ||
327 | case 0x4a: | ||
328 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
329 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
330 | break; | ||
331 | default: | ||
332 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
333 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
334 | break; | ||
335 | } | ||
336 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
337 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
338 | nv_wr32(dev, 0x400840, 0); | ||
339 | nv_wr32(dev, 0x400844, 0); | ||
340 | nv_wr32(dev, 0x4008A0, vramsz); | ||
341 | nv_wr32(dev, 0x4008A4, vramsz); | ||
342 | break; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static int | ||
349 | nv40_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
350 | { | ||
351 | u32 inst = nv_rd32(dev, 0x40032c); | ||
352 | if (inst & 0x01000000) { | ||
353 | nv_wr32(dev, 0x400720, 0x00000000); | ||
354 | nv_wr32(dev, 0x400784, inst); | ||
355 | nv_mask(dev, 0x400310, 0x00000020, 0x00000020); | ||
356 | nv_mask(dev, 0x400304, 0x00000001, 0x00000001); | ||
357 | if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) { | ||
358 | u32 insn = nv_rd32(dev, 0x400308); | ||
359 | NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn); | ||
360 | } | ||
361 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int | ||
367 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) | ||
368 | { | ||
369 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
370 | struct nouveau_gpuobj *grctx; | ||
371 | unsigned long flags; | ||
372 | int i; | ||
373 | |||
374 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
375 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
376 | if (!dev_priv->channels.ptr[i]) | ||
377 | continue; | ||
378 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | ||
379 | |||
380 | if (grctx && grctx->pinst == inst) | ||
381 | break; | ||
382 | } | ||
383 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
384 | return i; | ||
385 | } | ||
386 | |||
387 | static void | ||
388 | nv40_graph_isr(struct drm_device *dev) | ||
389 | { | ||
390 | u32 stat; | ||
391 | |||
392 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
393 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
394 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
395 | u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4; | ||
396 | u32 chid = nv40_graph_isr_chid(dev, inst); | ||
397 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
398 | u32 subc = (addr & 0x00070000) >> 16; | ||
399 | u32 mthd = (addr & 0x00001ffc); | ||
400 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
401 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff; | ||
402 | u32 show = stat; | ||
403 | |||
404 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
405 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
406 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
407 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
408 | } else | ||
409 | if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { | ||
410 | nv_mask(dev, 0x402000, 0, 0); | ||
411 | } | ||
412 | } | ||
413 | |||
414 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
415 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
416 | |||
417 | if (show && nouveau_ratelimit()) { | ||
418 | NV_INFO(dev, "PGRAPH -"); | ||
419 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
420 | printk(" nsource:"); | ||
421 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
422 | printk(" nstatus:"); | ||
423 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
424 | printk("\n"); | ||
425 | NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d " | ||
426 | "class 0x%04x mthd 0x%04x data 0x%08x\n", | ||
427 | chid, inst, subc, class, mthd, data); | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void | ||
433 | nv40_graph_destroy(struct drm_device *dev, int engine) | ||
434 | { | ||
435 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); | ||
436 | |||
437 | nouveau_irq_unregister(dev, 12); | ||
438 | |||
439 | NVOBJ_ENGINE_DEL(dev, GR); | ||
440 | kfree(pgraph); | ||
441 | } | ||
442 | |||
443 | int | ||
444 | nv40_graph_create(struct drm_device *dev) | ||
445 | { | ||
446 | struct nv40_graph_engine *pgraph; | ||
447 | |||
448 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
449 | if (!pgraph) | ||
450 | return -ENOMEM; | ||
451 | |||
452 | pgraph->base.destroy = nv40_graph_destroy; | ||
453 | pgraph->base.init = nv40_graph_init; | ||
454 | pgraph->base.fini = nv40_graph_fini; | ||
455 | pgraph->base.context_new = nv40_graph_context_new; | ||
456 | pgraph->base.context_del = nv40_graph_context_del; | ||
457 | pgraph->base.object_new = nv40_graph_object_new; | ||
458 | pgraph->base.set_tile_region = nv40_graph_set_tile_region; | ||
459 | |||
460 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
461 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
462 | |||
463 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
464 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
465 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
466 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
467 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
468 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
469 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
470 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
471 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
472 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
473 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
474 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
475 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
476 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
477 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
478 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
479 | |||
480 | /* curie */ | ||
481 | if (nv44_graph_class(dev)) | ||
482 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
483 | else | ||
484 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
485 | |||
486 | /* nvsw */ | ||
487 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
488 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
489 | return 0; | ||
490 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c new file mode 100644 index 00000000000..f70447d131d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c | |||
@@ -0,0 +1,662 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | /* NVIDIA context programs handle a number of other conditions which are | ||
26 | * not implemented in our versions. It's not clear why NVIDIA context | ||
27 | * programs have this code, nor whether it's strictly necessary for | ||
28 | * correct operation. We'll implement additional handling if/when we | ||
29 | * discover it's necessary. | ||
30 | * | ||
31 | * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state" | ||
32 | * flag is set, this gets saved into the context. | ||
33 | * - On context save, the context program for all cards load nsource | ||
34 | * into a flag register and check for ILLEGAL_MTHD. If it's set, | ||
35 | * opcode 0x60000d is called before resuming normal operation. | ||
36 | * - Some context programs check more conditions than the above. NV44 | ||
37 | * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001)) | ||
38 | * and calls 0x60000d before resuming normal operation. | ||
39 | * - At the very beginning of NVIDIA's context programs, flag 9 is checked | ||
40 | * and if true 0x800001 is called with count=0, pos=0, the flag is cleared | ||
41 | * and then the ctxprog is aborted. It looks like a complicated NOP, | ||
42 | * its purpose is unknown. | ||
43 | * - In the section of code that loads the per-vs state, NVIDIA check | ||
44 | * flag 10. If it's set, they only transfer the small 0x300 byte block | ||
45 | * of state + the state for a single vs as opposed to the state for | ||
46 | * all vs units. It doesn't seem likely that it'll occur in normal | ||
47 | * operation, especially seeing as it appears NVIDIA may have screwed | ||
48 | * up the ctxprogs for some cards and have an invalid instruction | ||
49 | * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction. | ||
50 | * - There's a number of places where context offset 0 (where we place | ||
51 | * the PRAMIN offset of the context) is loaded into either 0x408000, | ||
52 | * 0x408004 or 0x408008. Not sure what's up there either. | ||
53 | * - The ctxprogs for some cards save 0x400a00 again during the cleanup | ||
54 | * path for auto-loadctx. | ||
55 | */ | ||
56 | |||
57 | #define CP_FLAG_CLEAR 0 | ||
58 | #define CP_FLAG_SET 1 | ||
59 | #define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) | ||
60 | #define CP_FLAG_SWAP_DIRECTION_LOAD 0 | ||
61 | #define CP_FLAG_SWAP_DIRECTION_SAVE 1 | ||
62 | #define CP_FLAG_USER_SAVE ((0 * 32) + 5) | ||
63 | #define CP_FLAG_USER_SAVE_NOT_PENDING 0 | ||
64 | #define CP_FLAG_USER_SAVE_PENDING 1 | ||
65 | #define CP_FLAG_USER_LOAD ((0 * 32) + 6) | ||
66 | #define CP_FLAG_USER_LOAD_NOT_PENDING 0 | ||
67 | #define CP_FLAG_USER_LOAD_PENDING 1 | ||
68 | #define CP_FLAG_STATUS ((3 * 32) + 0) | ||
69 | #define CP_FLAG_STATUS_IDLE 0 | ||
70 | #define CP_FLAG_STATUS_BUSY 1 | ||
71 | #define CP_FLAG_AUTO_SAVE ((3 * 32) + 4) | ||
72 | #define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 | ||
73 | #define CP_FLAG_AUTO_SAVE_PENDING 1 | ||
74 | #define CP_FLAG_AUTO_LOAD ((3 * 32) + 5) | ||
75 | #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 | ||
76 | #define CP_FLAG_AUTO_LOAD_PENDING 1 | ||
77 | #define CP_FLAG_UNK54 ((3 * 32) + 6) | ||
78 | #define CP_FLAG_UNK54_CLEAR 0 | ||
79 | #define CP_FLAG_UNK54_SET 1 | ||
80 | #define CP_FLAG_ALWAYS ((3 * 32) + 8) | ||
81 | #define CP_FLAG_ALWAYS_FALSE 0 | ||
82 | #define CP_FLAG_ALWAYS_TRUE 1 | ||
83 | #define CP_FLAG_UNK57 ((3 * 32) + 9) | ||
84 | #define CP_FLAG_UNK57_CLEAR 0 | ||
85 | #define CP_FLAG_UNK57_SET 1 | ||
86 | |||
87 | #define CP_CTX 0x00100000 | ||
88 | #define CP_CTX_COUNT 0x000fc000 | ||
89 | #define CP_CTX_COUNT_SHIFT 14 | ||
90 | #define CP_CTX_REG 0x00003fff | ||
91 | #define CP_LOAD_SR 0x00200000 | ||
92 | #define CP_LOAD_SR_VALUE 0x000fffff | ||
93 | #define CP_BRA 0x00400000 | ||
94 | #define CP_BRA_IP 0x0000ff00 | ||
95 | #define CP_BRA_IP_SHIFT 8 | ||
96 | #define CP_BRA_IF_CLEAR 0x00000080 | ||
97 | #define CP_BRA_FLAG 0x0000007f | ||
98 | #define CP_WAIT 0x00500000 | ||
99 | #define CP_WAIT_SET 0x00000080 | ||
100 | #define CP_WAIT_FLAG 0x0000007f | ||
101 | #define CP_SET 0x00700000 | ||
102 | #define CP_SET_1 0x00000080 | ||
103 | #define CP_SET_FLAG 0x0000007f | ||
104 | #define CP_NEXT_TO_SWAP 0x00600007 | ||
105 | #define CP_NEXT_TO_CURRENT 0x00600009 | ||
106 | #define CP_SET_CONTEXT_POINTER 0x0060000a | ||
107 | #define CP_END 0x0060000e | ||
108 | #define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */ | ||
109 | #define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ | ||
110 | #define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ | ||
111 | |||
112 | #include "drmP.h" | ||
113 | #include "nouveau_drv.h" | ||
114 | #include "nouveau_grctx.h" | ||
115 | |||
116 | /* TODO: | ||
117 | * - get vs count from 0x1540 | ||
118 | */ | ||
119 | |||
120 | static int | ||
121 | nv40_graph_vs_count(struct drm_device *dev) | ||
122 | { | ||
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
124 | |||
125 | switch (dev_priv->chipset) { | ||
126 | case 0x47: | ||
127 | case 0x49: | ||
128 | case 0x4b: | ||
129 | return 8; | ||
130 | case 0x40: | ||
131 | return 6; | ||
132 | case 0x41: | ||
133 | case 0x42: | ||
134 | return 5; | ||
135 | case 0x43: | ||
136 | case 0x44: | ||
137 | case 0x46: | ||
138 | case 0x4a: | ||
139 | return 3; | ||
140 | case 0x4c: | ||
141 | case 0x4e: | ||
142 | case 0x67: | ||
143 | default: | ||
144 | return 1; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | |||
149 | enum cp_label { | ||
150 | cp_check_load = 1, | ||
151 | cp_setup_auto_load, | ||
152 | cp_setup_load, | ||
153 | cp_setup_save, | ||
154 | cp_swap_state, | ||
155 | cp_swap_state3d_3_is_save, | ||
156 | cp_prepare_exit, | ||
157 | cp_exit, | ||
158 | }; | ||
159 | |||
160 | static void | ||
161 | nv40_graph_construct_general(struct nouveau_grctx *ctx) | ||
162 | { | ||
163 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
164 | int i; | ||
165 | |||
166 | cp_ctx(ctx, 0x4000a4, 1); | ||
167 | gr_def(ctx, 0x4000a4, 0x00000008); | ||
168 | cp_ctx(ctx, 0x400144, 58); | ||
169 | gr_def(ctx, 0x400144, 0x00000001); | ||
170 | cp_ctx(ctx, 0x400314, 1); | ||
171 | gr_def(ctx, 0x400314, 0x00000000); | ||
172 | cp_ctx(ctx, 0x400400, 10); | ||
173 | cp_ctx(ctx, 0x400480, 10); | ||
174 | cp_ctx(ctx, 0x400500, 19); | ||
175 | gr_def(ctx, 0x400514, 0x00040000); | ||
176 | gr_def(ctx, 0x400524, 0x55555555); | ||
177 | gr_def(ctx, 0x400528, 0x55555555); | ||
178 | gr_def(ctx, 0x40052c, 0x55555555); | ||
179 | gr_def(ctx, 0x400530, 0x55555555); | ||
180 | cp_ctx(ctx, 0x400560, 6); | ||
181 | gr_def(ctx, 0x400568, 0x0000ffff); | ||
182 | gr_def(ctx, 0x40056c, 0x0000ffff); | ||
183 | cp_ctx(ctx, 0x40057c, 5); | ||
184 | cp_ctx(ctx, 0x400710, 3); | ||
185 | gr_def(ctx, 0x400710, 0x20010001); | ||
186 | gr_def(ctx, 0x400714, 0x0f73ef00); | ||
187 | cp_ctx(ctx, 0x400724, 1); | ||
188 | gr_def(ctx, 0x400724, 0x02008821); | ||
189 | cp_ctx(ctx, 0x400770, 3); | ||
190 | if (dev_priv->chipset == 0x40) { | ||
191 | cp_ctx(ctx, 0x400814, 4); | ||
192 | cp_ctx(ctx, 0x400828, 5); | ||
193 | cp_ctx(ctx, 0x400840, 5); | ||
194 | gr_def(ctx, 0x400850, 0x00000040); | ||
195 | cp_ctx(ctx, 0x400858, 4); | ||
196 | gr_def(ctx, 0x400858, 0x00000040); | ||
197 | gr_def(ctx, 0x40085c, 0x00000040); | ||
198 | gr_def(ctx, 0x400864, 0x80000000); | ||
199 | cp_ctx(ctx, 0x40086c, 9); | ||
200 | gr_def(ctx, 0x40086c, 0x80000000); | ||
201 | gr_def(ctx, 0x400870, 0x80000000); | ||
202 | gr_def(ctx, 0x400874, 0x80000000); | ||
203 | gr_def(ctx, 0x400878, 0x80000000); | ||
204 | gr_def(ctx, 0x400888, 0x00000040); | ||
205 | gr_def(ctx, 0x40088c, 0x80000000); | ||
206 | cp_ctx(ctx, 0x4009c0, 8); | ||
207 | gr_def(ctx, 0x4009cc, 0x80000000); | ||
208 | gr_def(ctx, 0x4009dc, 0x80000000); | ||
209 | } else { | ||
210 | cp_ctx(ctx, 0x400840, 20); | ||
211 | if (nv44_graph_class(ctx->dev)) { | ||
212 | for (i = 0; i < 8; i++) | ||
213 | gr_def(ctx, 0x400860 + (i * 4), 0x00000001); | ||
214 | } | ||
215 | gr_def(ctx, 0x400880, 0x00000040); | ||
216 | gr_def(ctx, 0x400884, 0x00000040); | ||
217 | gr_def(ctx, 0x400888, 0x00000040); | ||
218 | cp_ctx(ctx, 0x400894, 11); | ||
219 | gr_def(ctx, 0x400894, 0x00000040); | ||
220 | if (!nv44_graph_class(ctx->dev)) { | ||
221 | for (i = 0; i < 8; i++) | ||
222 | gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); | ||
223 | } | ||
224 | cp_ctx(ctx, 0x4008e0, 2); | ||
225 | cp_ctx(ctx, 0x4008f8, 2); | ||
226 | if (dev_priv->chipset == 0x4c || | ||
227 | (dev_priv->chipset & 0xf0) == 0x60) | ||
228 | cp_ctx(ctx, 0x4009f8, 1); | ||
229 | } | ||
230 | cp_ctx(ctx, 0x400a00, 73); | ||
231 | gr_def(ctx, 0x400b0c, 0x0b0b0b0c); | ||
232 | cp_ctx(ctx, 0x401000, 4); | ||
233 | cp_ctx(ctx, 0x405004, 1); | ||
234 | switch (dev_priv->chipset) { | ||
235 | case 0x47: | ||
236 | case 0x49: | ||
237 | case 0x4b: | ||
238 | cp_ctx(ctx, 0x403448, 1); | ||
239 | gr_def(ctx, 0x403448, 0x00001010); | ||
240 | break; | ||
241 | default: | ||
242 | cp_ctx(ctx, 0x403440, 1); | ||
243 | switch (dev_priv->chipset) { | ||
244 | case 0x40: | ||
245 | gr_def(ctx, 0x403440, 0x00000010); | ||
246 | break; | ||
247 | case 0x44: | ||
248 | case 0x46: | ||
249 | case 0x4a: | ||
250 | gr_def(ctx, 0x403440, 0x00003010); | ||
251 | break; | ||
252 | case 0x41: | ||
253 | case 0x42: | ||
254 | case 0x43: | ||
255 | case 0x4c: | ||
256 | case 0x4e: | ||
257 | case 0x67: | ||
258 | default: | ||
259 | gr_def(ctx, 0x403440, 0x00001010); | ||
260 | break; | ||
261 | } | ||
262 | break; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static void | ||
267 | nv40_graph_construct_state3d(struct nouveau_grctx *ctx) | ||
268 | { | ||
269 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
270 | int i; | ||
271 | |||
272 | if (dev_priv->chipset == 0x40) { | ||
273 | cp_ctx(ctx, 0x401880, 51); | ||
274 | gr_def(ctx, 0x401940, 0x00000100); | ||
275 | } else | ||
276 | if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 || | ||
277 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { | ||
278 | cp_ctx(ctx, 0x401880, 32); | ||
279 | for (i = 0; i < 16; i++) | ||
280 | gr_def(ctx, 0x401880 + (i * 4), 0x00000111); | ||
281 | if (dev_priv->chipset == 0x46) | ||
282 | cp_ctx(ctx, 0x401900, 16); | ||
283 | cp_ctx(ctx, 0x401940, 3); | ||
284 | } | ||
285 | cp_ctx(ctx, 0x40194c, 18); | ||
286 | gr_def(ctx, 0x401954, 0x00000111); | ||
287 | gr_def(ctx, 0x401958, 0x00080060); | ||
288 | gr_def(ctx, 0x401974, 0x00000080); | ||
289 | gr_def(ctx, 0x401978, 0xffff0000); | ||
290 | gr_def(ctx, 0x40197c, 0x00000001); | ||
291 | gr_def(ctx, 0x401990, 0x46400000); | ||
292 | if (dev_priv->chipset == 0x40) { | ||
293 | cp_ctx(ctx, 0x4019a0, 2); | ||
294 | cp_ctx(ctx, 0x4019ac, 5); | ||
295 | } else { | ||
296 | cp_ctx(ctx, 0x4019a0, 1); | ||
297 | cp_ctx(ctx, 0x4019b4, 3); | ||
298 | } | ||
299 | gr_def(ctx, 0x4019bc, 0xffff0000); | ||
300 | switch (dev_priv->chipset) { | ||
301 | case 0x46: | ||
302 | case 0x47: | ||
303 | case 0x49: | ||
304 | case 0x4b: | ||
305 | cp_ctx(ctx, 0x4019c0, 18); | ||
306 | for (i = 0; i < 16; i++) | ||
307 | gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888); | ||
308 | break; | ||
309 | } | ||
310 | cp_ctx(ctx, 0x401a08, 8); | ||
311 | gr_def(ctx, 0x401a10, 0x0fff0000); | ||
312 | gr_def(ctx, 0x401a14, 0x0fff0000); | ||
313 | gr_def(ctx, 0x401a1c, 0x00011100); | ||
314 | cp_ctx(ctx, 0x401a2c, 4); | ||
315 | cp_ctx(ctx, 0x401a44, 26); | ||
316 | for (i = 0; i < 16; i++) | ||
317 | gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); | ||
318 | gr_def(ctx, 0x401a8c, 0x4b7fffff); | ||
319 | if (dev_priv->chipset == 0x40) { | ||
320 | cp_ctx(ctx, 0x401ab8, 3); | ||
321 | } else { | ||
322 | cp_ctx(ctx, 0x401ab8, 1); | ||
323 | cp_ctx(ctx, 0x401ac0, 1); | ||
324 | } | ||
325 | cp_ctx(ctx, 0x401ad0, 8); | ||
326 | gr_def(ctx, 0x401ad0, 0x30201000); | ||
327 | gr_def(ctx, 0x401ad4, 0x70605040); | ||
328 | gr_def(ctx, 0x401ad8, 0xb8a89888); | ||
329 | gr_def(ctx, 0x401adc, 0xf8e8d8c8); | ||
330 | cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1); | ||
331 | gr_def(ctx, 0x401b10, 0x40100000); | ||
332 | cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5); | ||
333 | gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ? | ||
334 | 0x00000004 : 0x00000000); | ||
335 | cp_ctx(ctx, 0x401b30, 25); | ||
336 | gr_def(ctx, 0x401b34, 0x0000ffff); | ||
337 | gr_def(ctx, 0x401b68, 0x435185d6); | ||
338 | gr_def(ctx, 0x401b6c, 0x2155b699); | ||
339 | gr_def(ctx, 0x401b70, 0xfedcba98); | ||
340 | gr_def(ctx, 0x401b74, 0x00000098); | ||
341 | gr_def(ctx, 0x401b84, 0xffffffff); | ||
342 | gr_def(ctx, 0x401b88, 0x00ff7000); | ||
343 | gr_def(ctx, 0x401b8c, 0x0000ffff); | ||
344 | if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a && | ||
345 | dev_priv->chipset != 0x4e) | ||
346 | cp_ctx(ctx, 0x401b94, 1); | ||
347 | cp_ctx(ctx, 0x401b98, 8); | ||
348 | gr_def(ctx, 0x401b9c, 0x00ff0000); | ||
349 | cp_ctx(ctx, 0x401bc0, 9); | ||
350 | gr_def(ctx, 0x401be0, 0x00ffff00); | ||
351 | cp_ctx(ctx, 0x401c00, 192); | ||
352 | for (i = 0; i < 16; i++) { /* fragment texture units */ | ||
353 | gr_def(ctx, 0x401c40 + (i * 4), 0x00018488); | ||
354 | gr_def(ctx, 0x401c80 + (i * 4), 0x00028202); | ||
355 | gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4); | ||
356 | gr_def(ctx, 0x401d40 + (i * 4), 0x01012000); | ||
357 | gr_def(ctx, 0x401d80 + (i * 4), 0x00080008); | ||
358 | gr_def(ctx, 0x401e00 + (i * 4), 0x00100008); | ||
359 | } | ||
360 | for (i = 0; i < 4; i++) { /* vertex texture units */ | ||
361 | gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80); | ||
362 | gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202); | ||
363 | gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008); | ||
364 | gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008); | ||
365 | } | ||
366 | cp_ctx(ctx, 0x400f5c, 3); | ||
367 | gr_def(ctx, 0x400f5c, 0x00000002); | ||
368 | cp_ctx(ctx, 0x400f84, 1); | ||
369 | } | ||
370 | |||
371 | static void | ||
372 | nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) | ||
373 | { | ||
374 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
375 | int i; | ||
376 | |||
377 | cp_ctx(ctx, 0x402000, 1); | ||
378 | cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2); | ||
379 | switch (dev_priv->chipset) { | ||
380 | case 0x40: | ||
381 | gr_def(ctx, 0x402404, 0x00000001); | ||
382 | break; | ||
383 | case 0x4c: | ||
384 | case 0x4e: | ||
385 | case 0x67: | ||
386 | gr_def(ctx, 0x402404, 0x00000020); | ||
387 | break; | ||
388 | case 0x46: | ||
389 | case 0x49: | ||
390 | case 0x4b: | ||
391 | gr_def(ctx, 0x402404, 0x00000421); | ||
392 | break; | ||
393 | default: | ||
394 | gr_def(ctx, 0x402404, 0x00000021); | ||
395 | } | ||
396 | if (dev_priv->chipset != 0x40) | ||
397 | gr_def(ctx, 0x402408, 0x030c30c3); | ||
398 | switch (dev_priv->chipset) { | ||
399 | case 0x44: | ||
400 | case 0x46: | ||
401 | case 0x4a: | ||
402 | case 0x4c: | ||
403 | case 0x4e: | ||
404 | case 0x67: | ||
405 | cp_ctx(ctx, 0x402440, 1); | ||
406 | gr_def(ctx, 0x402440, 0x00011001); | ||
407 | break; | ||
408 | default: | ||
409 | break; | ||
410 | } | ||
411 | cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9); | ||
412 | gr_def(ctx, 0x402488, 0x3e020200); | ||
413 | gr_def(ctx, 0x40248c, 0x00ffffff); | ||
414 | switch (dev_priv->chipset) { | ||
415 | case 0x40: | ||
416 | gr_def(ctx, 0x402490, 0x60103f00); | ||
417 | break; | ||
418 | case 0x47: | ||
419 | gr_def(ctx, 0x402490, 0x40103f00); | ||
420 | break; | ||
421 | case 0x41: | ||
422 | case 0x42: | ||
423 | case 0x49: | ||
424 | case 0x4b: | ||
425 | gr_def(ctx, 0x402490, 0x20103f00); | ||
426 | break; | ||
427 | default: | ||
428 | gr_def(ctx, 0x402490, 0x0c103f00); | ||
429 | break; | ||
430 | } | ||
431 | gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ? | ||
432 | 0x00020000 : 0x00040000); | ||
433 | cp_ctx(ctx, 0x402500, 31); | ||
434 | gr_def(ctx, 0x402530, 0x00008100); | ||
435 | if (dev_priv->chipset == 0x40) | ||
436 | cp_ctx(ctx, 0x40257c, 6); | ||
437 | cp_ctx(ctx, 0x402594, 16); | ||
438 | cp_ctx(ctx, 0x402800, 17); | ||
439 | gr_def(ctx, 0x402800, 0x00000001); | ||
440 | switch (dev_priv->chipset) { | ||
441 | case 0x47: | ||
442 | case 0x49: | ||
443 | case 0x4b: | ||
444 | cp_ctx(ctx, 0x402864, 1); | ||
445 | gr_def(ctx, 0x402864, 0x00001001); | ||
446 | cp_ctx(ctx, 0x402870, 3); | ||
447 | gr_def(ctx, 0x402878, 0x00000003); | ||
448 | if (dev_priv->chipset != 0x47) { /* belong at end!! */ | ||
449 | cp_ctx(ctx, 0x402900, 1); | ||
450 | cp_ctx(ctx, 0x402940, 1); | ||
451 | cp_ctx(ctx, 0x402980, 1); | ||
452 | cp_ctx(ctx, 0x4029c0, 1); | ||
453 | cp_ctx(ctx, 0x402a00, 1); | ||
454 | cp_ctx(ctx, 0x402a40, 1); | ||
455 | cp_ctx(ctx, 0x402a80, 1); | ||
456 | cp_ctx(ctx, 0x402ac0, 1); | ||
457 | } | ||
458 | break; | ||
459 | case 0x40: | ||
460 | cp_ctx(ctx, 0x402844, 1); | ||
461 | gr_def(ctx, 0x402844, 0x00000001); | ||
462 | cp_ctx(ctx, 0x402850, 1); | ||
463 | break; | ||
464 | default: | ||
465 | cp_ctx(ctx, 0x402844, 1); | ||
466 | gr_def(ctx, 0x402844, 0x00001001); | ||
467 | cp_ctx(ctx, 0x402850, 2); | ||
468 | gr_def(ctx, 0x402854, 0x00000003); | ||
469 | break; | ||
470 | } | ||
471 | |||
472 | cp_ctx(ctx, 0x402c00, 4); | ||
473 | gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ? | ||
474 | 0x80800001 : 0x00888001); | ||
475 | switch (dev_priv->chipset) { | ||
476 | case 0x47: | ||
477 | case 0x49: | ||
478 | case 0x4b: | ||
479 | cp_ctx(ctx, 0x402c20, 40); | ||
480 | for (i = 0; i < 32; i++) | ||
481 | gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff); | ||
482 | cp_ctx(ctx, 0x4030b8, 13); | ||
483 | gr_def(ctx, 0x4030dc, 0x00000005); | ||
484 | gr_def(ctx, 0x4030e8, 0x0000ffff); | ||
485 | break; | ||
486 | default: | ||
487 | cp_ctx(ctx, 0x402c10, 4); | ||
488 | if (dev_priv->chipset == 0x40) | ||
489 | cp_ctx(ctx, 0x402c20, 36); | ||
490 | else | ||
491 | if (dev_priv->chipset <= 0x42) | ||
492 | cp_ctx(ctx, 0x402c20, 24); | ||
493 | else | ||
494 | if (dev_priv->chipset <= 0x4a) | ||
495 | cp_ctx(ctx, 0x402c20, 16); | ||
496 | else | ||
497 | cp_ctx(ctx, 0x402c20, 8); | ||
498 | cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13); | ||
499 | gr_def(ctx, 0x402cd4, 0x00000005); | ||
500 | if (dev_priv->chipset != 0x40) | ||
501 | gr_def(ctx, 0x402ce0, 0x0000ffff); | ||
502 | break; | ||
503 | } | ||
504 | |||
505 | cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3); | ||
506 | cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3); | ||
507 | cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev)); | ||
508 | for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++) | ||
509 | gr_def(ctx, 0x403420 + (i * 4), 0x00005555); | ||
510 | |||
511 | if (dev_priv->chipset != 0x40) { | ||
512 | cp_ctx(ctx, 0x403600, 1); | ||
513 | gr_def(ctx, 0x403600, 0x00000001); | ||
514 | } | ||
515 | cp_ctx(ctx, 0x403800, 1); | ||
516 | |||
517 | cp_ctx(ctx, 0x403c18, 1); | ||
518 | gr_def(ctx, 0x403c18, 0x00000001); | ||
519 | switch (dev_priv->chipset) { | ||
520 | case 0x46: | ||
521 | case 0x47: | ||
522 | case 0x49: | ||
523 | case 0x4b: | ||
524 | cp_ctx(ctx, 0x405018, 1); | ||
525 | gr_def(ctx, 0x405018, 0x08e00001); | ||
526 | cp_ctx(ctx, 0x405c24, 1); | ||
527 | gr_def(ctx, 0x405c24, 0x000e3000); | ||
528 | break; | ||
529 | } | ||
530 | if (dev_priv->chipset != 0x4e) | ||
531 | cp_ctx(ctx, 0x405800, 11); | ||
532 | cp_ctx(ctx, 0x407000, 1); | ||
533 | } | ||
534 | |||
535 | static void | ||
536 | nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) | ||
537 | { | ||
538 | int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; | ||
539 | |||
540 | cp_out (ctx, 0x300000); | ||
541 | cp_lsr (ctx, len - 4); | ||
542 | cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save); | ||
543 | cp_lsr (ctx, len); | ||
544 | cp_name(ctx, cp_swap_state3d_3_is_save); | ||
545 | cp_out (ctx, 0x800001); | ||
546 | |||
547 | ctx->ctxvals_pos += len; | ||
548 | } | ||
549 | |||
550 | static void | ||
551 | nv40_graph_construct_shader(struct nouveau_grctx *ctx) | ||
552 | { | ||
553 | struct drm_device *dev = ctx->dev; | ||
554 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
555 | struct nouveau_gpuobj *obj = ctx->data; | ||
556 | int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; | ||
557 | int offset, i; | ||
558 | |||
559 | vs_nr = nv40_graph_vs_count(ctx->dev); | ||
560 | vs_nr_b0 = 363; | ||
561 | vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64; | ||
562 | if (dev_priv->chipset == 0x40) { | ||
563 | b0_offset = 0x2200/4; /* 33a0 */ | ||
564 | b1_offset = 0x55a0/4; /* 1500 */ | ||
565 | vs_len = 0x6aa0/4; | ||
566 | } else | ||
567 | if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) { | ||
568 | b0_offset = 0x2200/4; /* 2200 */ | ||
569 | b1_offset = 0x4400/4; /* 0b00 */ | ||
570 | vs_len = 0x4f00/4; | ||
571 | } else { | ||
572 | b0_offset = 0x1d40/4; /* 2200 */ | ||
573 | b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ | ||
574 | vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; | ||
575 | } | ||
576 | |||
577 | cp_lsr(ctx, vs_len * vs_nr + 0x300/4); | ||
578 | cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); | ||
579 | |||
580 | offset = ctx->ctxvals_pos; | ||
581 | ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); | ||
582 | |||
583 | if (ctx->mode != NOUVEAU_GRCTX_VALS) | ||
584 | return; | ||
585 | |||
586 | offset += 0x0280/4; | ||
587 | for (i = 0; i < 16; i++, offset += 2) | ||
588 | nv_wo32(obj, offset * 4, 0x3f800000); | ||
589 | |||
590 | for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { | ||
591 | for (i = 0; i < vs_nr_b0 * 6; i += 6) | ||
592 | nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); | ||
593 | for (i = 0; i < vs_nr_b1 * 4; i += 4) | ||
594 | nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); | ||
595 | } | ||
596 | } | ||
597 | |||
598 | void | ||
599 | nv40_grctx_init(struct nouveau_grctx *ctx) | ||
600 | { | ||
601 | /* decide whether we're loading/unloading the context */ | ||
602 | cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); | ||
603 | cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); | ||
604 | |||
605 | cp_name(ctx, cp_check_load); | ||
606 | cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); | ||
607 | cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); | ||
608 | cp_bra (ctx, ALWAYS, TRUE, cp_exit); | ||
609 | |||
610 | /* setup for context load */ | ||
611 | cp_name(ctx, cp_setup_auto_load); | ||
612 | cp_wait(ctx, STATUS, IDLE); | ||
613 | cp_out (ctx, CP_NEXT_TO_SWAP); | ||
614 | cp_name(ctx, cp_setup_load); | ||
615 | cp_wait(ctx, STATUS, IDLE); | ||
616 | cp_set (ctx, SWAP_DIRECTION, LOAD); | ||
617 | cp_out (ctx, 0x00910880); /* ?? */ | ||
618 | cp_out (ctx, 0x00901ffe); /* ?? */ | ||
619 | cp_out (ctx, 0x01940000); /* ?? */ | ||
620 | cp_lsr (ctx, 0x20); | ||
621 | cp_out (ctx, 0x0060000b); /* ?? */ | ||
622 | cp_wait(ctx, UNK57, CLEAR); | ||
623 | cp_out (ctx, 0x0060000c); /* ?? */ | ||
624 | cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); | ||
625 | |||
626 | /* setup for context save */ | ||
627 | cp_name(ctx, cp_setup_save); | ||
628 | cp_set (ctx, SWAP_DIRECTION, SAVE); | ||
629 | |||
630 | /* general PGRAPH state */ | ||
631 | cp_name(ctx, cp_swap_state); | ||
632 | cp_pos (ctx, 0x00020/4); | ||
633 | nv40_graph_construct_general(ctx); | ||
634 | cp_wait(ctx, STATUS, IDLE); | ||
635 | |||
636 | /* 3D state, block 1 */ | ||
637 | cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit); | ||
638 | nv40_graph_construct_state3d(ctx); | ||
639 | cp_wait(ctx, STATUS, IDLE); | ||
640 | |||
641 | /* 3D state, block 2 */ | ||
642 | nv40_graph_construct_state3d_2(ctx); | ||
643 | |||
644 | /* Some other block of "random" state */ | ||
645 | nv40_graph_construct_state3d_3(ctx); | ||
646 | |||
647 | /* Per-vertex shader state */ | ||
648 | cp_pos (ctx, ctx->ctxvals_pos); | ||
649 | nv40_graph_construct_shader(ctx); | ||
650 | |||
651 | /* pre-exit state updates */ | ||
652 | cp_name(ctx, cp_prepare_exit); | ||
653 | cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); | ||
654 | cp_bra (ctx, USER_SAVE, PENDING, cp_exit); | ||
655 | cp_out (ctx, CP_NEXT_TO_CURRENT); | ||
656 | |||
657 | cp_name(ctx, cp_exit); | ||
658 | cp_set (ctx, USER_SAVE, NOT_PENDING); | ||
659 | cp_set (ctx, USER_LOAD, NOT_PENDING); | ||
660 | cp_out (ctx, CP_END); | ||
661 | } | ||
662 | |||
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c new file mode 100644 index 00000000000..03c0d4c3f35 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_mc.c | |||
@@ -0,0 +1,28 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | int | ||
7 | nv40_mc_init(struct drm_device *dev) | ||
8 | { | ||
9 | /* Power up everything, resetting each individual unit will | ||
10 | * be done later if needed. | ||
11 | */ | ||
12 | nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); | ||
13 | |||
14 | if (nv44_graph_class(dev)) { | ||
15 | u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
16 | nv_wr32(dev, NV40_PMC_1700, tmp); | ||
17 | nv_wr32(dev, NV40_PMC_1704, 0); | ||
18 | nv_wr32(dev, NV40_PMC_1708, 0); | ||
19 | nv_wr32(dev, NV40_PMC_170C, tmp); | ||
20 | } | ||
21 | |||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | void | ||
26 | nv40_mc_takedown(struct drm_device *dev) | ||
27 | { | ||
28 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c new file mode 100644 index 00000000000..ad03a0e1fc7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_ramht.h" | ||
28 | |||
29 | struct nv40_mpeg_engine { | ||
30 | struct nouveau_exec_engine base; | ||
31 | }; | ||
32 | |||
33 | static int | ||
34 | nv40_mpeg_context_new(struct nouveau_channel *chan, int engine) | ||
35 | { | ||
36 | struct drm_device *dev = chan->dev; | ||
37 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
38 | struct nouveau_gpuobj *ctx = NULL; | ||
39 | unsigned long flags; | ||
40 | int ret; | ||
41 | |||
42 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
43 | |||
44 | ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC | | ||
45 | NVOBJ_FLAG_ZERO_FREE, &ctx); | ||
46 | if (ret) | ||
47 | return ret; | ||
48 | |||
49 | nv_wo32(ctx, 0x78, 0x02001ec1); | ||
50 | |||
51 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
52 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
53 | if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id) | ||
54 | nv_wr32(dev, 0x00330c, ctx->pinst >> 4); | ||
55 | nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4); | ||
56 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
57 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
58 | |||
59 | chan->engctx[engine] = ctx; | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void | ||
64 | nv40_mpeg_context_del(struct nouveau_channel *chan, int engine) | ||
65 | { | ||
66 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
67 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
68 | struct drm_device *dev = chan->dev; | ||
69 | unsigned long flags; | ||
70 | u32 inst = 0x80000000 | (ctx->pinst >> 4); | ||
71 | |||
72 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
73 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | ||
74 | if (nv_rd32(dev, 0x00b318) == inst) | ||
75 | nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); | ||
76 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | ||
77 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
78 | |||
79 | nouveau_gpuobj_ref(NULL, &ctx); | ||
80 | chan->engctx[engine] = NULL; | ||
81 | } | ||
82 | |||
83 | static int | ||
84 | nv40_mpeg_object_new(struct nouveau_channel *chan, int engine, | ||
85 | u32 handle, u16 class) | ||
86 | { | ||
87 | struct drm_device *dev = chan->dev; | ||
88 | struct nouveau_gpuobj *obj = NULL; | ||
89 | int ret; | ||
90 | |||
91 | ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC | | ||
92 | NVOBJ_FLAG_ZERO_FREE, &obj); | ||
93 | if (ret) | ||
94 | return ret; | ||
95 | obj->engine = 2; | ||
96 | obj->class = class; | ||
97 | |||
98 | nv_wo32(obj, 0x00, class); | ||
99 | |||
100 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
101 | nouveau_gpuobj_ref(NULL, &obj); | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | static int | ||
106 | nv40_mpeg_init(struct drm_device *dev, int engine) | ||
107 | { | ||
108 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
109 | struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); | ||
110 | int i; | ||
111 | |||
112 | /* VPE init */ | ||
113 | nv_mask(dev, 0x000200, 0x00000002, 0x00000000); | ||
114 | nv_mask(dev, 0x000200, 0x00000002, 0x00000002); | ||
115 | nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ | ||
116 | nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ | ||
117 | |||
118 | for (i = 0; i < dev_priv->engine.fb.num_tiles; i++) | ||
119 | pmpeg->base.set_tile_region(dev, i); | ||
120 | |||
121 | /* PMPEG init */ | ||
122 | nv_wr32(dev, 0x00b32c, 0x00000000); | ||
123 | nv_wr32(dev, 0x00b314, 0x00000100); | ||
124 | nv_wr32(dev, 0x00b220, 0x00000044); | ||
125 | nv_wr32(dev, 0x00b300, 0x02001ec1); | ||
126 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | ||
127 | |||
128 | nv_wr32(dev, 0x00b100, 0xffffffff); | ||
129 | nv_wr32(dev, 0x00b140, 0xffffffff); | ||
130 | |||
131 | if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) { | ||
132 | NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200)); | ||
133 | return -EBUSY; | ||
134 | } | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int | ||
140 | nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend) | ||
141 | { | ||
142 | /*XXX: context save? */ | ||
143 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | ||
144 | nv_wr32(dev, 0x00b140, 0x00000000); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) | ||
150 | { | ||
151 | struct drm_device *dev = chan->dev; | ||
152 | u32 inst = data << 4; | ||
153 | u32 dma0 = nv_ri32(dev, inst + 0); | ||
154 | u32 dma1 = nv_ri32(dev, inst + 4); | ||
155 | u32 dma2 = nv_ri32(dev, inst + 8); | ||
156 | u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); | ||
157 | u32 size = dma1 + 1; | ||
158 | |||
159 | /* only allow linear DMA objects */ | ||
160 | if (!(dma0 & 0x00002000)) | ||
161 | return -EINVAL; | ||
162 | |||
163 | if (mthd == 0x0190) { | ||
164 | /* DMA_CMD */ | ||
165 | nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000)); | ||
166 | nv_wr32(dev, 0x00b334, base); | ||
167 | nv_wr32(dev, 0x00b324, size); | ||
168 | } else | ||
169 | if (mthd == 0x01a0) { | ||
170 | /* DMA_DATA */ | ||
171 | nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); | ||
172 | nv_wr32(dev, 0x00b360, base); | ||
173 | nv_wr32(dev, 0x00b364, size); | ||
174 | } else { | ||
175 | /* DMA_IMAGE, VRAM only */ | ||
176 | if (dma0 & 0x000c0000) | ||
177 | return -EINVAL; | ||
178 | |||
179 | nv_wr32(dev, 0x00b370, base); | ||
180 | nv_wr32(dev, 0x00b374, size); | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int | ||
187 | nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst) | ||
188 | { | ||
189 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
190 | struct nouveau_gpuobj *ctx; | ||
191 | unsigned long flags; | ||
192 | int i; | ||
193 | |||
194 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
195 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
196 | if (!dev_priv->channels.ptr[i]) | ||
197 | continue; | ||
198 | |||
199 | ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG]; | ||
200 | if (ctx && ctx->pinst == inst) | ||
201 | break; | ||
202 | } | ||
203 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
204 | return i; | ||
205 | } | ||
206 | |||
207 | static void | ||
208 | nv40_vpe_set_tile_region(struct drm_device *dev, int i) | ||
209 | { | ||
210 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
212 | |||
213 | nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch); | ||
214 | nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit); | ||
215 | nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr); | ||
216 | } | ||
217 | |||
218 | static void | ||
219 | nv40_mpeg_isr(struct drm_device *dev) | ||
220 | { | ||
221 | u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; | ||
222 | u32 chid = nv40_mpeg_isr_chid(dev, inst); | ||
223 | u32 stat = nv_rd32(dev, 0x00b100); | ||
224 | u32 type = nv_rd32(dev, 0x00b230); | ||
225 | u32 mthd = nv_rd32(dev, 0x00b234); | ||
226 | u32 data = nv_rd32(dev, 0x00b238); | ||
227 | u32 show = stat; | ||
228 | |||
229 | if (stat & 0x01000000) { | ||
230 | /* happens on initial binding of the object */ | ||
231 | if (type == 0x00000020 && mthd == 0x0000) { | ||
232 | nv_mask(dev, 0x00b308, 0x00000000, 0x00000000); | ||
233 | show &= ~0x01000000; | ||
234 | } | ||
235 | |||
236 | if (type == 0x00000010) { | ||
237 | if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data)) | ||
238 | show &= ~0x01000000; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | nv_wr32(dev, 0x00b100, stat); | ||
243 | nv_wr32(dev, 0x00b230, 0x00000001); | ||
244 | |||
245 | if (show && nouveau_ratelimit()) { | ||
246 | NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
247 | chid, inst, stat, type, mthd, data); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | static void | ||
252 | nv40_vpe_isr(struct drm_device *dev) | ||
253 | { | ||
254 | if (nv_rd32(dev, 0x00b100)) | ||
255 | nv40_mpeg_isr(dev); | ||
256 | |||
257 | if (nv_rd32(dev, 0x00b800)) { | ||
258 | u32 stat = nv_rd32(dev, 0x00b800); | ||
259 | NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); | ||
260 | nv_wr32(dev, 0xb800, stat); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void | ||
265 | nv40_mpeg_destroy(struct drm_device *dev, int engine) | ||
266 | { | ||
267 | struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); | ||
268 | |||
269 | nouveau_irq_unregister(dev, 0); | ||
270 | |||
271 | NVOBJ_ENGINE_DEL(dev, MPEG); | ||
272 | kfree(pmpeg); | ||
273 | } | ||
274 | |||
275 | int | ||
276 | nv40_mpeg_create(struct drm_device *dev) | ||
277 | { | ||
278 | struct nv40_mpeg_engine *pmpeg; | ||
279 | |||
280 | pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); | ||
281 | if (!pmpeg) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | pmpeg->base.destroy = nv40_mpeg_destroy; | ||
285 | pmpeg->base.init = nv40_mpeg_init; | ||
286 | pmpeg->base.fini = nv40_mpeg_fini; | ||
287 | pmpeg->base.context_new = nv40_mpeg_context_new; | ||
288 | pmpeg->base.context_del = nv40_mpeg_context_del; | ||
289 | pmpeg->base.object_new = nv40_mpeg_object_new; | ||
290 | |||
291 | /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between | ||
292 | * all VPE engines, for this driver's purposes the PMPEG engine | ||
293 | * will be treated as the "master" and handle the global VPE | ||
294 | * bits too | ||
295 | */ | ||
296 | pmpeg->base.set_tile_region = nv40_vpe_set_tile_region; | ||
297 | nouveau_irq_register(dev, 0, nv40_vpe_isr); | ||
298 | |||
299 | NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); | ||
300 | NVOBJ_CLASS(dev, 0x3174, MPEG); | ||
301 | NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma); | ||
302 | NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma); | ||
303 | NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma); | ||
304 | |||
305 | #if 0 | ||
306 | NVOBJ_ENGINE_ADD(dev, ME, &pme->base); | ||
307 | NVOBJ_CLASS(dev, 0x4075, ME); | ||
308 | #endif | ||
309 | return 0; | ||
310 | |||
311 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c new file mode 100644 index 00000000000..8cf63a8b30c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_calc.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_hw.h" | ||
28 | |||
29 | int | ||
30 | nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, | ||
31 | int *N1, int *M1, int *N2, int *M2, int *P) | ||
32 | { | ||
33 | struct nouveau_pll_vals pll_vals; | ||
34 | int ret; | ||
35 | |||
36 | ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals); | ||
37 | if (ret <= 0) | ||
38 | return ret; | ||
39 | |||
40 | *N1 = pll_vals.N1; | ||
41 | *M1 = pll_vals.M1; | ||
42 | *N2 = pll_vals.N2; | ||
43 | *M2 = pll_vals.M2; | ||
44 | *P = pll_vals.log2P; | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | int | ||
49 | nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, | ||
50 | int *pN, int *pfN, int *pM, int *P) | ||
51 | { | ||
52 | u32 best_err = ~0, err; | ||
53 | int M, lM, hM, N, fN; | ||
54 | |||
55 | *P = pll->vco1.maxfreq / clk; | ||
56 | if (*P > pll->max_p) | ||
57 | *P = pll->max_p; | ||
58 | if (*P < pll->min_p) | ||
59 | *P = pll->min_p; | ||
60 | |||
61 | lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq; | ||
62 | lM = max(lM, (int)pll->vco1.min_m); | ||
63 | hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq; | ||
64 | hM = min(hM, (int)pll->vco1.max_m); | ||
65 | |||
66 | for (M = lM; M <= hM; M++) { | ||
67 | u32 tmp = clk * *P * M; | ||
68 | N = tmp / pll->refclk; | ||
69 | fN = tmp % pll->refclk; | ||
70 | if (!pfN && fN >= pll->refclk / 2) | ||
71 | N++; | ||
72 | |||
73 | if (N < pll->vco1.min_n) | ||
74 | continue; | ||
75 | if (N > pll->vco1.max_n) | ||
76 | break; | ||
77 | |||
78 | err = abs(clk - (pll->refclk * N / M / *P)); | ||
79 | if (err < best_err) { | ||
80 | best_err = err; | ||
81 | *pN = N; | ||
82 | *pM = M; | ||
83 | } | ||
84 | |||
85 | if (pfN) { | ||
86 | *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff; | ||
87 | return clk; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | if (unlikely(best_err == ~0)) { | ||
92 | NV_ERROR(dev, "unable to find matching pll values\n"); | ||
93 | return -EINVAL; | ||
94 | } | ||
95 | |||
96 | return pll->refclk * *pN / *pM / *P; | ||
97 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c new file mode 100644 index 00000000000..5d989073ba6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm_mode.h" | ||
29 | #include "drm_crtc_helper.h" | ||
30 | |||
31 | #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) | ||
32 | #include "nouveau_reg.h" | ||
33 | #include "nouveau_drv.h" | ||
34 | #include "nouveau_hw.h" | ||
35 | #include "nouveau_encoder.h" | ||
36 | #include "nouveau_crtc.h" | ||
37 | #include "nouveau_fb.h" | ||
38 | #include "nouveau_connector.h" | ||
39 | #include "nv50_display.h" | ||
40 | |||
41 | static void | ||
42 | nv50_crtc_lut_load(struct drm_crtc *crtc) | ||
43 | { | ||
44 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
45 | void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); | ||
46 | int i; | ||
47 | |||
48 | NV_DEBUG_KMS(crtc->dev, "\n"); | ||
49 | |||
50 | for (i = 0; i < 256; i++) { | ||
51 | writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); | ||
52 | writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2); | ||
53 | writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4); | ||
54 | } | ||
55 | |||
56 | if (nv_crtc->lut.depth == 30) { | ||
57 | writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0); | ||
58 | writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2); | ||
59 | writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | int | ||
64 | nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) | ||
65 | { | ||
66 | struct drm_device *dev = nv_crtc->base.dev; | ||
67 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
68 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
69 | int index = nv_crtc->index, ret; | ||
70 | |||
71 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
72 | NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked"); | ||
73 | |||
74 | if (blanked) { | ||
75 | nv_crtc->cursor.hide(nv_crtc, false); | ||
76 | |||
77 | ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5); | ||
78 | if (ret) { | ||
79 | NV_ERROR(dev, "no space while blanking crtc\n"); | ||
80 | return ret; | ||
81 | } | ||
82 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); | ||
83 | OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); | ||
84 | OUT_RING(evo, 0); | ||
85 | if (dev_priv->chipset != 0x50) { | ||
86 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); | ||
87 | OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); | ||
88 | } | ||
89 | |||
90 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); | ||
91 | OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); | ||
92 | } else { | ||
93 | if (nv_crtc->cursor.visible) | ||
94 | nv_crtc->cursor.show(nv_crtc, false); | ||
95 | else | ||
96 | nv_crtc->cursor.hide(nv_crtc, false); | ||
97 | |||
98 | ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8); | ||
99 | if (ret) { | ||
100 | NV_ERROR(dev, "no space while unblanking crtc\n"); | ||
101 | return ret; | ||
102 | } | ||
103 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); | ||
104 | OUT_RING(evo, nv_crtc->lut.depth == 8 ? | ||
105 | NV50_EVO_CRTC_CLUT_MODE_OFF : | ||
106 | NV50_EVO_CRTC_CLUT_MODE_ON); | ||
107 | OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); | ||
108 | if (dev_priv->chipset != 0x50) { | ||
109 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); | ||
110 | OUT_RING(evo, NvEvoVRAM); | ||
111 | } | ||
112 | |||
113 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); | ||
114 | OUT_RING(evo, nv_crtc->fb.offset >> 8); | ||
115 | OUT_RING(evo, 0); | ||
116 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); | ||
117 | if (dev_priv->chipset != 0x50) | ||
118 | if (nv_crtc->fb.tile_flags == 0x7a00 || | ||
119 | nv_crtc->fb.tile_flags == 0xfe00) | ||
120 | OUT_RING(evo, NvEvoFB32); | ||
121 | else | ||
122 | if (nv_crtc->fb.tile_flags == 0x7000) | ||
123 | OUT_RING(evo, NvEvoFB16); | ||
124 | else | ||
125 | OUT_RING(evo, NvEvoVRAM_LP); | ||
126 | else | ||
127 | OUT_RING(evo, NvEvoVRAM_LP); | ||
128 | } | ||
129 | |||
130 | nv_crtc->fb.blanked = blanked; | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int | ||
135 | nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) | ||
136 | { | ||
137 | struct drm_device *dev = nv_crtc->base.dev; | ||
138 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
139 | int ret; | ||
140 | |||
141 | NV_DEBUG_KMS(dev, "\n"); | ||
142 | |||
143 | ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); | ||
144 | if (ret) { | ||
145 | NV_ERROR(dev, "no space while setting dither\n"); | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1); | ||
150 | if (on) | ||
151 | OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON); | ||
152 | else | ||
153 | OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF); | ||
154 | |||
155 | if (update) { | ||
156 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
157 | OUT_RING(evo, 0); | ||
158 | FIRE_RING(evo); | ||
159 | } | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | struct nouveau_connector * | ||
165 | nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) | ||
166 | { | ||
167 | struct drm_device *dev = nv_crtc->base.dev; | ||
168 | struct drm_connector *connector; | ||
169 | struct drm_crtc *crtc = to_drm_crtc(nv_crtc); | ||
170 | |||
171 | /* The safest approach is to find an encoder with the right crtc, that | ||
172 | * is also linked to a connector. */ | ||
173 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
174 | if (connector->encoder) | ||
175 | if (connector->encoder->crtc == crtc) | ||
176 | return nouveau_connector(connector); | ||
177 | } | ||
178 | |||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | static int | ||
183 | nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) | ||
184 | { | ||
185 | struct nouveau_connector *nv_connector = | ||
186 | nouveau_crtc_connector_get(nv_crtc); | ||
187 | struct drm_device *dev = nv_crtc->base.dev; | ||
188 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
189 | struct drm_display_mode *native_mode = NULL; | ||
190 | struct drm_display_mode *mode = &nv_crtc->base.mode; | ||
191 | uint32_t outX, outY, horiz, vert; | ||
192 | int ret; | ||
193 | |||
194 | NV_DEBUG_KMS(dev, "\n"); | ||
195 | |||
196 | switch (scaling_mode) { | ||
197 | case DRM_MODE_SCALE_NONE: | ||
198 | break; | ||
199 | default: | ||
200 | if (!nv_connector || !nv_connector->native_mode) { | ||
201 | NV_ERROR(dev, "No native mode, forcing panel scaling\n"); | ||
202 | scaling_mode = DRM_MODE_SCALE_NONE; | ||
203 | } else { | ||
204 | native_mode = nv_connector->native_mode; | ||
205 | } | ||
206 | break; | ||
207 | } | ||
208 | |||
209 | switch (scaling_mode) { | ||
210 | case DRM_MODE_SCALE_ASPECT: | ||
211 | horiz = (native_mode->hdisplay << 19) / mode->hdisplay; | ||
212 | vert = (native_mode->vdisplay << 19) / mode->vdisplay; | ||
213 | |||
214 | if (vert > horiz) { | ||
215 | outX = (mode->hdisplay * horiz) >> 19; | ||
216 | outY = (mode->vdisplay * horiz) >> 19; | ||
217 | } else { | ||
218 | outX = (mode->hdisplay * vert) >> 19; | ||
219 | outY = (mode->vdisplay * vert) >> 19; | ||
220 | } | ||
221 | break; | ||
222 | case DRM_MODE_SCALE_FULLSCREEN: | ||
223 | outX = native_mode->hdisplay; | ||
224 | outY = native_mode->vdisplay; | ||
225 | break; | ||
226 | case DRM_MODE_SCALE_CENTER: | ||
227 | case DRM_MODE_SCALE_NONE: | ||
228 | default: | ||
229 | outX = mode->hdisplay; | ||
230 | outY = mode->vdisplay; | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | ret = RING_SPACE(evo, update ? 7 : 5); | ||
235 | if (ret) | ||
236 | return ret; | ||
237 | |||
238 | /* Got a better name for SCALER_ACTIVE? */ | ||
239 | /* One day i've got to really figure out why this is needed. */ | ||
240 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); | ||
241 | if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) || | ||
242 | (mode->flags & DRM_MODE_FLAG_INTERLACE) || | ||
243 | mode->hdisplay != outX || mode->vdisplay != outY) { | ||
244 | OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE); | ||
245 | } else { | ||
246 | OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE); | ||
247 | } | ||
248 | |||
249 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); | ||
250 | OUT_RING(evo, outY << 16 | outX); | ||
251 | OUT_RING(evo, outY << 16 | outX); | ||
252 | |||
253 | if (update) { | ||
254 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
255 | OUT_RING(evo, 0); | ||
256 | FIRE_RING(evo); | ||
257 | } | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | int | ||
263 | nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) | ||
264 | { | ||
265 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
266 | struct pll_lims pll; | ||
267 | uint32_t reg1, reg2; | ||
268 | int ret, N1, M1, N2, M2, P; | ||
269 | |||
270 | ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll); | ||
271 | if (ret) | ||
272 | return ret; | ||
273 | |||
274 | if (pll.vco2.maxfreq) { | ||
275 | ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); | ||
276 | if (ret <= 0) | ||
277 | return 0; | ||
278 | |||
279 | NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", | ||
280 | pclk, ret, N1, M1, N2, M2, P); | ||
281 | |||
282 | reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00; | ||
283 | reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00; | ||
284 | nv_wr32(dev, pll.reg + 0, 0x10000611); | ||
285 | nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1); | ||
286 | nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); | ||
287 | } else | ||
288 | if (dev_priv->chipset < NV_C0) { | ||
289 | ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); | ||
290 | if (ret <= 0) | ||
291 | return 0; | ||
292 | |||
293 | NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", | ||
294 | pclk, ret, N1, N2, M1, P); | ||
295 | |||
296 | reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000; | ||
297 | nv_wr32(dev, pll.reg + 0, 0x50000610); | ||
298 | nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); | ||
299 | nv_wr32(dev, pll.reg + 8, N2); | ||
300 | } else { | ||
301 | ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P); | ||
302 | if (ret <= 0) | ||
303 | return 0; | ||
304 | |||
305 | NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", | ||
306 | pclk, ret, N1, N2, M1, P); | ||
307 | |||
308 | nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); | ||
309 | nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1); | ||
310 | nv_wr32(dev, pll.reg + 0x10, N2 << 16); | ||
311 | } | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static void | ||
317 | nv50_crtc_destroy(struct drm_crtc *crtc) | ||
318 | { | ||
319 | struct drm_device *dev; | ||
320 | struct nouveau_crtc *nv_crtc; | ||
321 | |||
322 | if (!crtc) | ||
323 | return; | ||
324 | |||
325 | dev = crtc->dev; | ||
326 | nv_crtc = nouveau_crtc(crtc); | ||
327 | |||
328 | NV_DEBUG_KMS(dev, "\n"); | ||
329 | |||
330 | drm_crtc_cleanup(&nv_crtc->base); | ||
331 | |||
332 | nv50_cursor_fini(nv_crtc); | ||
333 | |||
334 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | ||
335 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | ||
336 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | ||
337 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | ||
338 | kfree(nv_crtc->mode); | ||
339 | kfree(nv_crtc); | ||
340 | } | ||
341 | |||
342 | int | ||
343 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
344 | uint32_t buffer_handle, uint32_t width, uint32_t height) | ||
345 | { | ||
346 | struct drm_device *dev = crtc->dev; | ||
347 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
348 | struct nouveau_bo *cursor = NULL; | ||
349 | struct drm_gem_object *gem; | ||
350 | int ret = 0, i; | ||
351 | |||
352 | if (!buffer_handle) { | ||
353 | nv_crtc->cursor.hide(nv_crtc, true); | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | if (width != 64 || height != 64) | ||
358 | return -EINVAL; | ||
359 | |||
360 | gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); | ||
361 | if (!gem) | ||
362 | return -ENOENT; | ||
363 | cursor = nouveau_gem_object(gem); | ||
364 | |||
365 | ret = nouveau_bo_map(cursor); | ||
366 | if (ret) | ||
367 | goto out; | ||
368 | |||
369 | /* The simple will do for now. */ | ||
370 | for (i = 0; i < 64 * 64; i++) | ||
371 | nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i)); | ||
372 | |||
373 | nouveau_bo_unmap(cursor); | ||
374 | |||
375 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); | ||
376 | nv_crtc->cursor.show(nv_crtc, true); | ||
377 | |||
378 | out: | ||
379 | drm_gem_object_unreference_unlocked(gem); | ||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | int | ||
384 | nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
385 | { | ||
386 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
387 | |||
388 | nv_crtc->cursor.set_pos(nv_crtc, x, y); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void | ||
393 | nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, | ||
394 | uint32_t start, uint32_t size) | ||
395 | { | ||
396 | int end = (start + size > 256) ? 256 : start + size, i; | ||
397 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
398 | |||
399 | for (i = start; i < end; i++) { | ||
400 | nv_crtc->lut.r[i] = r[i]; | ||
401 | nv_crtc->lut.g[i] = g[i]; | ||
402 | nv_crtc->lut.b[i] = b[i]; | ||
403 | } | ||
404 | |||
405 | /* We need to know the depth before we upload, but it's possible to | ||
406 | * get called before a framebuffer is bound. If this is the case, | ||
407 | * mark the lut values as dirty by setting depth==0, and it'll be | ||
408 | * uploaded on the first mode_set_base() | ||
409 | */ | ||
410 | if (!nv_crtc->base.fb) { | ||
411 | nv_crtc->lut.depth = 0; | ||
412 | return; | ||
413 | } | ||
414 | |||
415 | nv50_crtc_lut_load(crtc); | ||
416 | } | ||
417 | |||
418 | static void | ||
419 | nv50_crtc_save(struct drm_crtc *crtc) | ||
420 | { | ||
421 | NV_ERROR(crtc->dev, "!!\n"); | ||
422 | } | ||
423 | |||
424 | static void | ||
425 | nv50_crtc_restore(struct drm_crtc *crtc) | ||
426 | { | ||
427 | NV_ERROR(crtc->dev, "!!\n"); | ||
428 | } | ||
429 | |||
430 | static const struct drm_crtc_funcs nv50_crtc_funcs = { | ||
431 | .save = nv50_crtc_save, | ||
432 | .restore = nv50_crtc_restore, | ||
433 | .cursor_set = nv50_crtc_cursor_set, | ||
434 | .cursor_move = nv50_crtc_cursor_move, | ||
435 | .gamma_set = nv50_crtc_gamma_set, | ||
436 | .set_config = drm_crtc_helper_set_config, | ||
437 | .page_flip = nouveau_crtc_page_flip, | ||
438 | .destroy = nv50_crtc_destroy, | ||
439 | }; | ||
440 | |||
441 | static void | ||
442 | nv50_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
443 | { | ||
444 | } | ||
445 | |||
446 | static int | ||
447 | nv50_crtc_wait_complete(struct drm_crtc *crtc) | ||
448 | { | ||
449 | struct drm_device *dev = crtc->dev; | ||
450 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
451 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
452 | struct nv50_display *disp = nv50_display(dev); | ||
453 | struct nouveau_channel *evo = disp->master; | ||
454 | u64 start; | ||
455 | int ret; | ||
456 | |||
457 | ret = RING_SPACE(evo, 6); | ||
458 | if (ret) | ||
459 | return ret; | ||
460 | BEGIN_RING(evo, 0, 0x0084, 1); | ||
461 | OUT_RING (evo, 0x80000000); | ||
462 | BEGIN_RING(evo, 0, 0x0080, 1); | ||
463 | OUT_RING (evo, 0); | ||
464 | BEGIN_RING(evo, 0, 0x0084, 1); | ||
465 | OUT_RING (evo, 0x00000000); | ||
466 | |||
467 | nv_wo32(disp->ntfy, 0x000, 0x00000000); | ||
468 | FIRE_RING (evo); | ||
469 | |||
470 | start = ptimer->read(dev); | ||
471 | do { | ||
472 | if (nv_ro32(disp->ntfy, 0x000)) | ||
473 | return 0; | ||
474 | } while (ptimer->read(dev) - start < 2000000000ULL); | ||
475 | |||
476 | return -EBUSY; | ||
477 | } | ||
478 | |||
479 | static void | ||
480 | nv50_crtc_prepare(struct drm_crtc *crtc) | ||
481 | { | ||
482 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
483 | struct drm_device *dev = crtc->dev; | ||
484 | |||
485 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
486 | |||
487 | nv50_display_flip_stop(crtc); | ||
488 | drm_vblank_pre_modeset(dev, nv_crtc->index); | ||
489 | nv50_crtc_blank(nv_crtc, true); | ||
490 | } | ||
491 | |||
492 | static void | ||
493 | nv50_crtc_commit(struct drm_crtc *crtc) | ||
494 | { | ||
495 | struct drm_device *dev = crtc->dev; | ||
496 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
497 | |||
498 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
499 | |||
500 | nv50_crtc_blank(nv_crtc, false); | ||
501 | drm_vblank_post_modeset(dev, nv_crtc->index); | ||
502 | nv50_crtc_wait_complete(crtc); | ||
503 | nv50_display_flip_next(crtc, crtc->fb, NULL); | ||
504 | } | ||
505 | |||
506 | static bool | ||
507 | nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
508 | struct drm_display_mode *adjusted_mode) | ||
509 | { | ||
510 | return true; | ||
511 | } | ||
512 | |||
513 | static int | ||
514 | nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | ||
515 | struct drm_framebuffer *passed_fb, | ||
516 | int x, int y, bool atomic) | ||
517 | { | ||
518 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
519 | struct drm_device *dev = nv_crtc->base.dev; | ||
520 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
521 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
522 | struct drm_framebuffer *drm_fb; | ||
523 | struct nouveau_framebuffer *fb; | ||
524 | int ret; | ||
525 | |||
526 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
527 | |||
528 | /* no fb bound */ | ||
529 | if (!atomic && !crtc->fb) { | ||
530 | NV_DEBUG_KMS(dev, "No FB bound\n"); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | /* If atomic, we want to switch to the fb we were passed, so | ||
535 | * now we update pointers to do that. (We don't pin; just | ||
536 | * assume we're already pinned and update the base address.) | ||
537 | */ | ||
538 | if (atomic) { | ||
539 | drm_fb = passed_fb; | ||
540 | fb = nouveau_framebuffer(passed_fb); | ||
541 | } else { | ||
542 | drm_fb = crtc->fb; | ||
543 | fb = nouveau_framebuffer(crtc->fb); | ||
544 | /* If not atomic, we can go ahead and pin, and unpin the | ||
545 | * old fb we were passed. | ||
546 | */ | ||
547 | ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); | ||
548 | if (ret) | ||
549 | return ret; | ||
550 | |||
551 | if (passed_fb) { | ||
552 | struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); | ||
553 | nouveau_bo_unpin(ofb->nvbo); | ||
554 | } | ||
555 | } | ||
556 | |||
557 | nv_crtc->fb.offset = fb->nvbo->bo.offset; | ||
558 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); | ||
559 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | ||
560 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | ||
561 | ret = RING_SPACE(evo, 2); | ||
562 | if (ret) | ||
563 | return ret; | ||
564 | |||
565 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); | ||
566 | OUT_RING (evo, fb->r_dma); | ||
567 | } | ||
568 | |||
569 | ret = RING_SPACE(evo, 12); | ||
570 | if (ret) | ||
571 | return ret; | ||
572 | |||
573 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); | ||
574 | OUT_RING (evo, nv_crtc->fb.offset >> 8); | ||
575 | OUT_RING (evo, 0); | ||
576 | OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); | ||
577 | OUT_RING (evo, fb->r_pitch); | ||
578 | OUT_RING (evo, fb->r_format); | ||
579 | |||
580 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); | ||
581 | OUT_RING (evo, fb->base.depth == 8 ? | ||
582 | NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); | ||
583 | |||
584 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); | ||
585 | OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR); | ||
586 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); | ||
587 | OUT_RING (evo, (y << 16) | x); | ||
588 | |||
589 | if (nv_crtc->lut.depth != fb->base.depth) { | ||
590 | nv_crtc->lut.depth = fb->base.depth; | ||
591 | nv50_crtc_lut_load(crtc); | ||
592 | } | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static int | ||
598 | nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
599 | struct drm_display_mode *adjusted_mode, int x, int y, | ||
600 | struct drm_framebuffer *old_fb) | ||
601 | { | ||
602 | struct drm_device *dev = crtc->dev; | ||
603 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
604 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
605 | struct nouveau_connector *nv_connector = NULL; | ||
606 | uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end; | ||
607 | uint32_t hunk1, vunk1, vunk2a, vunk2b; | ||
608 | int ret; | ||
609 | |||
610 | /* Find the connector attached to this CRTC */ | ||
611 | nv_connector = nouveau_crtc_connector_get(nv_crtc); | ||
612 | |||
613 | *nv_crtc->mode = *adjusted_mode; | ||
614 | |||
615 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
616 | |||
617 | hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; | ||
618 | vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; | ||
619 | hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start; | ||
620 | vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start; | ||
621 | /* I can't give this a proper name, anyone else can? */ | ||
622 | hunk1 = adjusted_mode->htotal - | ||
623 | adjusted_mode->hsync_start + adjusted_mode->hdisplay; | ||
624 | vunk1 = adjusted_mode->vtotal - | ||
625 | adjusted_mode->vsync_start + adjusted_mode->vdisplay; | ||
626 | /* Another strange value, this time only for interlaced adjusted_modes. */ | ||
627 | vunk2a = 2 * adjusted_mode->vtotal - | ||
628 | adjusted_mode->vsync_start + adjusted_mode->vdisplay; | ||
629 | vunk2b = adjusted_mode->vtotal - | ||
630 | adjusted_mode->vsync_start + adjusted_mode->vtotal; | ||
631 | |||
632 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
633 | vsync_dur /= 2; | ||
634 | vsync_start_to_end /= 2; | ||
635 | vunk1 /= 2; | ||
636 | vunk2a /= 2; | ||
637 | vunk2b /= 2; | ||
638 | /* magic */ | ||
639 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) { | ||
640 | vsync_start_to_end -= 1; | ||
641 | vunk1 -= 1; | ||
642 | vunk2a -= 1; | ||
643 | vunk2b -= 1; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | ret = RING_SPACE(evo, 17); | ||
648 | if (ret) | ||
649 | return ret; | ||
650 | |||
651 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2); | ||
652 | OUT_RING(evo, adjusted_mode->clock | 0x800000); | ||
653 | OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0); | ||
654 | |||
655 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5); | ||
656 | OUT_RING(evo, 0); | ||
657 | OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal); | ||
658 | OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1)); | ||
659 | OUT_RING(evo, (vsync_start_to_end - 1) << 16 | | ||
660 | (hsync_start_to_end - 1)); | ||
661 | OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1)); | ||
662 | |||
663 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
664 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1); | ||
665 | OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1)); | ||
666 | } else { | ||
667 | OUT_RING(evo, 0); | ||
668 | OUT_RING(evo, 0); | ||
669 | } | ||
670 | |||
671 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1); | ||
672 | OUT_RING(evo, 0); | ||
673 | |||
674 | /* This is the actual resolution of the mode. */ | ||
675 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1); | ||
676 | OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay); | ||
677 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1); | ||
678 | OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0)); | ||
679 | |||
680 | nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false); | ||
681 | nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false); | ||
682 | |||
683 | return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | ||
684 | } | ||
685 | |||
686 | static int | ||
687 | nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | ||
688 | struct drm_framebuffer *old_fb) | ||
689 | { | ||
690 | int ret; | ||
691 | |||
692 | nv50_display_flip_stop(crtc); | ||
693 | ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | ||
694 | if (ret) | ||
695 | return ret; | ||
696 | |||
697 | ret = nv50_crtc_wait_complete(crtc); | ||
698 | if (ret) | ||
699 | return ret; | ||
700 | |||
701 | return nv50_display_flip_next(crtc, crtc->fb, NULL); | ||
702 | } | ||
703 | |||
704 | static int | ||
705 | nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, | ||
706 | struct drm_framebuffer *fb, | ||
707 | int x, int y, enum mode_set_atomic state) | ||
708 | { | ||
709 | int ret; | ||
710 | |||
711 | nv50_display_flip_stop(crtc); | ||
712 | ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true); | ||
713 | if (ret) | ||
714 | return ret; | ||
715 | |||
716 | return nv50_crtc_wait_complete(crtc); | ||
717 | } | ||
718 | |||
719 | static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { | ||
720 | .dpms = nv50_crtc_dpms, | ||
721 | .prepare = nv50_crtc_prepare, | ||
722 | .commit = nv50_crtc_commit, | ||
723 | .mode_fixup = nv50_crtc_mode_fixup, | ||
724 | .mode_set = nv50_crtc_mode_set, | ||
725 | .mode_set_base = nv50_crtc_mode_set_base, | ||
726 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | ||
727 | .load_lut = nv50_crtc_lut_load, | ||
728 | }; | ||
729 | |||
730 | int | ||
731 | nv50_crtc_create(struct drm_device *dev, int index) | ||
732 | { | ||
733 | struct nouveau_crtc *nv_crtc = NULL; | ||
734 | int ret, i; | ||
735 | |||
736 | NV_DEBUG_KMS(dev, "\n"); | ||
737 | |||
738 | nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); | ||
739 | if (!nv_crtc) | ||
740 | return -ENOMEM; | ||
741 | |||
742 | nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL); | ||
743 | if (!nv_crtc->mode) { | ||
744 | kfree(nv_crtc); | ||
745 | return -ENOMEM; | ||
746 | } | ||
747 | |||
748 | /* Default CLUT parameters, will be activated on the hw upon | ||
749 | * first mode set. | ||
750 | */ | ||
751 | for (i = 0; i < 256; i++) { | ||
752 | nv_crtc->lut.r[i] = i << 8; | ||
753 | nv_crtc->lut.g[i] = i << 8; | ||
754 | nv_crtc->lut.b[i] = i << 8; | ||
755 | } | ||
756 | nv_crtc->lut.depth = 0; | ||
757 | |||
758 | ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, | ||
759 | 0, 0x0000, &nv_crtc->lut.nvbo); | ||
760 | if (!ret) { | ||
761 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); | ||
762 | if (!ret) | ||
763 | ret = nouveau_bo_map(nv_crtc->lut.nvbo); | ||
764 | if (ret) | ||
765 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | ||
766 | } | ||
767 | |||
768 | if (ret) { | ||
769 | kfree(nv_crtc->mode); | ||
770 | kfree(nv_crtc); | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | nv_crtc->index = index; | ||
775 | |||
776 | /* set function pointers */ | ||
777 | nv_crtc->set_dither = nv50_crtc_set_dither; | ||
778 | nv_crtc->set_scale = nv50_crtc_set_scale; | ||
779 | |||
780 | drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); | ||
781 | drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); | ||
782 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | ||
783 | |||
784 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | ||
785 | 0, 0x0000, &nv_crtc->cursor.nvbo); | ||
786 | if (!ret) { | ||
787 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | ||
788 | if (!ret) | ||
789 | ret = nouveau_bo_map(nv_crtc->cursor.nvbo); | ||
790 | if (ret) | ||
791 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | ||
792 | } | ||
793 | |||
794 | nv50_cursor_init(nv_crtc); | ||
795 | return 0; | ||
796 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c new file mode 100644 index 00000000000..9752c35bb84 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm_mode.h" | ||
29 | |||
30 | #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) | ||
31 | #include "nouveau_reg.h" | ||
32 | #include "nouveau_drv.h" | ||
33 | #include "nouveau_crtc.h" | ||
34 | #include "nv50_display.h" | ||
35 | |||
36 | static void | ||
37 | nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) | ||
38 | { | ||
39 | struct drm_device *dev = nv_crtc->base.dev; | ||
40 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
41 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
42 | int ret; | ||
43 | |||
44 | NV_DEBUG_KMS(dev, "\n"); | ||
45 | |||
46 | if (update && nv_crtc->cursor.visible) | ||
47 | return; | ||
48 | |||
49 | ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); | ||
50 | if (ret) { | ||
51 | NV_ERROR(dev, "no space while unhiding cursor\n"); | ||
52 | return; | ||
53 | } | ||
54 | |||
55 | if (dev_priv->chipset != 0x50) { | ||
56 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); | ||
57 | OUT_RING(evo, NvEvoVRAM); | ||
58 | } | ||
59 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); | ||
60 | OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); | ||
61 | OUT_RING(evo, nv_crtc->cursor.offset >> 8); | ||
62 | |||
63 | if (update) { | ||
64 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
65 | OUT_RING(evo, 0); | ||
66 | FIRE_RING(evo); | ||
67 | nv_crtc->cursor.visible = true; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | static void | ||
72 | nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) | ||
73 | { | ||
74 | struct drm_device *dev = nv_crtc->base.dev; | ||
75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
76 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
77 | int ret; | ||
78 | |||
79 | NV_DEBUG_KMS(dev, "\n"); | ||
80 | |||
81 | if (update && !nv_crtc->cursor.visible) | ||
82 | return; | ||
83 | |||
84 | ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); | ||
85 | if (ret) { | ||
86 | NV_ERROR(dev, "no space while hiding cursor\n"); | ||
87 | return; | ||
88 | } | ||
89 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); | ||
90 | OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); | ||
91 | OUT_RING(evo, 0); | ||
92 | if (dev_priv->chipset != 0x50) { | ||
93 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); | ||
94 | OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); | ||
95 | } | ||
96 | |||
97 | if (update) { | ||
98 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
99 | OUT_RING(evo, 0); | ||
100 | FIRE_RING(evo); | ||
101 | nv_crtc->cursor.visible = false; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | ||
107 | { | ||
108 | struct drm_device *dev = nv_crtc->base.dev; | ||
109 | |||
110 | nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; | ||
111 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), | ||
112 | ((y & 0xFFFF) << 16) | (x & 0xFFFF)); | ||
113 | /* Needed to make the cursor move. */ | ||
114 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); | ||
115 | } | ||
116 | |||
117 | static void | ||
118 | nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | ||
119 | { | ||
120 | NV_DEBUG_KMS(nv_crtc->base.dev, "\n"); | ||
121 | if (offset == nv_crtc->cursor.offset) | ||
122 | return; | ||
123 | |||
124 | nv_crtc->cursor.offset = offset; | ||
125 | if (nv_crtc->cursor.visible) { | ||
126 | nv_crtc->cursor.visible = false; | ||
127 | nv_crtc->cursor.show(nv_crtc, true); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | int | ||
132 | nv50_cursor_init(struct nouveau_crtc *nv_crtc) | ||
133 | { | ||
134 | nv_crtc->cursor.set_offset = nv50_cursor_set_offset; | ||
135 | nv_crtc->cursor.set_pos = nv50_cursor_set_pos; | ||
136 | nv_crtc->cursor.hide = nv50_cursor_hide; | ||
137 | nv_crtc->cursor.show = nv50_cursor_show; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | void | ||
142 | nv50_cursor_fini(struct nouveau_crtc *nv_crtc) | ||
143 | { | ||
144 | struct drm_device *dev = nv_crtc->base.dev; | ||
145 | int idx = nv_crtc->index; | ||
146 | |||
147 | NV_DEBUG_KMS(dev, "\n"); | ||
148 | |||
149 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); | ||
150 | if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), | ||
151 | NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { | ||
152 | NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); | ||
153 | NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", | ||
154 | nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx))); | ||
155 | } | ||
156 | } | ||
157 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c new file mode 100644 index 00000000000..808f3ec8f82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_dac.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm_crtc_helper.h" | ||
29 | |||
30 | #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) | ||
31 | #include "nouveau_reg.h" | ||
32 | #include "nouveau_drv.h" | ||
33 | #include "nouveau_dma.h" | ||
34 | #include "nouveau_encoder.h" | ||
35 | #include "nouveau_connector.h" | ||
36 | #include "nouveau_crtc.h" | ||
37 | #include "nv50_display.h" | ||
38 | |||
39 | static void | ||
40 | nv50_dac_disconnect(struct drm_encoder *encoder) | ||
41 | { | ||
42 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
43 | struct drm_device *dev = encoder->dev; | ||
44 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
45 | int ret; | ||
46 | |||
47 | if (!nv_encoder->crtc) | ||
48 | return; | ||
49 | nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); | ||
50 | |||
51 | NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); | ||
52 | |||
53 | ret = RING_SPACE(evo, 4); | ||
54 | if (ret) { | ||
55 | NV_ERROR(dev, "no space while disconnecting DAC\n"); | ||
56 | return; | ||
57 | } | ||
58 | BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); | ||
59 | OUT_RING (evo, 0); | ||
60 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
61 | OUT_RING (evo, 0); | ||
62 | |||
63 | nv_encoder->crtc = NULL; | ||
64 | } | ||
65 | |||
66 | static enum drm_connector_status | ||
67 | nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
68 | { | ||
69 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
70 | struct drm_device *dev = encoder->dev; | ||
71 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
72 | enum drm_connector_status status = connector_status_disconnected; | ||
73 | uint32_t dpms_state, load_pattern, load_state; | ||
74 | int or = nv_encoder->or; | ||
75 | |||
76 | nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); | ||
77 | dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); | ||
78 | |||
79 | nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), | ||
80 | 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); | ||
81 | if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), | ||
82 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { | ||
83 | NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); | ||
84 | NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, | ||
85 | nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); | ||
86 | return status; | ||
87 | } | ||
88 | |||
89 | /* Use bios provided value if possible. */ | ||
90 | if (dev_priv->vbios.dactestval) { | ||
91 | load_pattern = dev_priv->vbios.dactestval; | ||
92 | NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", | ||
93 | load_pattern); | ||
94 | } else { | ||
95 | load_pattern = 340; | ||
96 | NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", | ||
97 | load_pattern); | ||
98 | } | ||
99 | |||
100 | nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), | ||
101 | NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); | ||
102 | mdelay(45); /* give it some time to process */ | ||
103 | load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); | ||
104 | |||
105 | nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); | ||
106 | nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | | ||
107 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); | ||
108 | |||
109 | if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == | ||
110 | NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) | ||
111 | status = connector_status_connected; | ||
112 | |||
113 | if (status == connector_status_connected) | ||
114 | NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); | ||
115 | else | ||
116 | NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); | ||
117 | |||
118 | return status; | ||
119 | } | ||
120 | |||
121 | static void | ||
122 | nv50_dac_dpms(struct drm_encoder *encoder, int mode) | ||
123 | { | ||
124 | struct drm_device *dev = encoder->dev; | ||
125 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
126 | uint32_t val; | ||
127 | int or = nv_encoder->or; | ||
128 | |||
129 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); | ||
130 | |||
131 | /* wait for it to be done */ | ||
132 | if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), | ||
133 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { | ||
134 | NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); | ||
135 | NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, | ||
136 | nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; | ||
141 | |||
142 | if (mode != DRM_MODE_DPMS_ON) | ||
143 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; | ||
144 | |||
145 | switch (mode) { | ||
146 | case DRM_MODE_DPMS_STANDBY: | ||
147 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; | ||
148 | break; | ||
149 | case DRM_MODE_DPMS_SUSPEND: | ||
150 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; | ||
151 | break; | ||
152 | case DRM_MODE_DPMS_OFF: | ||
153 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; | ||
154 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; | ||
155 | val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; | ||
156 | break; | ||
157 | default: | ||
158 | break; | ||
159 | } | ||
160 | |||
161 | nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | | ||
162 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); | ||
163 | } | ||
164 | |||
165 | static void | ||
166 | nv50_dac_save(struct drm_encoder *encoder) | ||
167 | { | ||
168 | NV_ERROR(encoder->dev, "!!\n"); | ||
169 | } | ||
170 | |||
171 | static void | ||
172 | nv50_dac_restore(struct drm_encoder *encoder) | ||
173 | { | ||
174 | NV_ERROR(encoder->dev, "!!\n"); | ||
175 | } | ||
176 | |||
177 | static bool | ||
178 | nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
179 | struct drm_display_mode *adjusted_mode) | ||
180 | { | ||
181 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
182 | struct nouveau_connector *connector; | ||
183 | |||
184 | NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); | ||
185 | |||
186 | connector = nouveau_encoder_connector_get(nv_encoder); | ||
187 | if (!connector) { | ||
188 | NV_ERROR(encoder->dev, "Encoder has no connector\n"); | ||
189 | return false; | ||
190 | } | ||
191 | |||
192 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && | ||
193 | connector->native_mode) { | ||
194 | int id = adjusted_mode->base.id; | ||
195 | *adjusted_mode = *connector->native_mode; | ||
196 | adjusted_mode->base.id = id; | ||
197 | } | ||
198 | |||
199 | return true; | ||
200 | } | ||
201 | |||
202 | static void | ||
203 | nv50_dac_prepare(struct drm_encoder *encoder) | ||
204 | { | ||
205 | } | ||
206 | |||
207 | static void | ||
208 | nv50_dac_commit(struct drm_encoder *encoder) | ||
209 | { | ||
210 | } | ||
211 | |||
212 | static void | ||
213 | nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
214 | struct drm_display_mode *adjusted_mode) | ||
215 | { | ||
216 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
217 | struct drm_device *dev = encoder->dev; | ||
218 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
219 | struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); | ||
220 | uint32_t mode_ctl = 0, mode_ctl2 = 0; | ||
221 | int ret; | ||
222 | |||
223 | NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", | ||
224 | nv_encoder->or, nv_encoder->dcb->type, crtc->index); | ||
225 | |||
226 | nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); | ||
227 | |||
228 | if (crtc->index == 1) | ||
229 | mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; | ||
230 | else | ||
231 | mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; | ||
232 | |||
233 | /* Lacking a working tv-out, this is not a 100% sure. */ | ||
234 | if (nv_encoder->dcb->type == OUTPUT_ANALOG) | ||
235 | mode_ctl |= 0x40; | ||
236 | else | ||
237 | if (nv_encoder->dcb->type == OUTPUT_TV) | ||
238 | mode_ctl |= 0x100; | ||
239 | |||
240 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
241 | mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; | ||
242 | |||
243 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
244 | mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; | ||
245 | |||
246 | ret = RING_SPACE(evo, 3); | ||
247 | if (ret) { | ||
248 | NV_ERROR(dev, "no space while connecting DAC\n"); | ||
249 | return; | ||
250 | } | ||
251 | BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); | ||
252 | OUT_RING(evo, mode_ctl); | ||
253 | OUT_RING(evo, mode_ctl2); | ||
254 | |||
255 | nv_encoder->crtc = encoder->crtc; | ||
256 | } | ||
257 | |||
258 | static struct drm_crtc * | ||
259 | nv50_dac_crtc_get(struct drm_encoder *encoder) | ||
260 | { | ||
261 | return nouveau_encoder(encoder)->crtc; | ||
262 | } | ||
263 | |||
264 | static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { | ||
265 | .dpms = nv50_dac_dpms, | ||
266 | .save = nv50_dac_save, | ||
267 | .restore = nv50_dac_restore, | ||
268 | .mode_fixup = nv50_dac_mode_fixup, | ||
269 | .prepare = nv50_dac_prepare, | ||
270 | .commit = nv50_dac_commit, | ||
271 | .mode_set = nv50_dac_mode_set, | ||
272 | .get_crtc = nv50_dac_crtc_get, | ||
273 | .detect = nv50_dac_detect, | ||
274 | .disable = nv50_dac_disconnect | ||
275 | }; | ||
276 | |||
277 | static void | ||
278 | nv50_dac_destroy(struct drm_encoder *encoder) | ||
279 | { | ||
280 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
281 | |||
282 | if (!encoder) | ||
283 | return; | ||
284 | |||
285 | NV_DEBUG_KMS(encoder->dev, "\n"); | ||
286 | |||
287 | drm_encoder_cleanup(encoder); | ||
288 | kfree(nv_encoder); | ||
289 | } | ||
290 | |||
291 | static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { | ||
292 | .destroy = nv50_dac_destroy, | ||
293 | }; | ||
294 | |||
295 | int | ||
296 | nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) | ||
297 | { | ||
298 | struct nouveau_encoder *nv_encoder; | ||
299 | struct drm_encoder *encoder; | ||
300 | |||
301 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); | ||
302 | if (!nv_encoder) | ||
303 | return -ENOMEM; | ||
304 | encoder = to_drm_encoder(nv_encoder); | ||
305 | |||
306 | nv_encoder->dcb = entry; | ||
307 | nv_encoder->or = ffs(entry->or) - 1; | ||
308 | |||
309 | drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, | ||
310 | DRM_MODE_ENCODER_DAC); | ||
311 | drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); | ||
312 | |||
313 | encoder->possible_crtcs = entry->heads; | ||
314 | encoder->possible_clones = 0; | ||
315 | |||
316 | drm_mode_connector_attach_encoder(connector, encoder); | ||
317 | return 0; | ||
318 | } | ||
319 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 00000000000..c99d9751880 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -0,0 +1,425 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_dma.h" | ||
29 | #include "nouveau_ramht.h" | ||
30 | #include "nv50_display.h" | ||
31 | |||
32 | static void | ||
33 | nv50_evo_channel_del(struct nouveau_channel **pevo) | ||
34 | { | ||
35 | struct nouveau_channel *evo = *pevo; | ||
36 | |||
37 | if (!evo) | ||
38 | return; | ||
39 | *pevo = NULL; | ||
40 | |||
41 | nouveau_ramht_ref(NULL, &evo->ramht, evo); | ||
42 | nouveau_gpuobj_channel_takedown(evo); | ||
43 | nouveau_bo_unmap(evo->pushbuf_bo); | ||
44 | nouveau_bo_ref(NULL, &evo->pushbuf_bo); | ||
45 | |||
46 | if (evo->user) | ||
47 | iounmap(evo->user); | ||
48 | |||
49 | kfree(evo); | ||
50 | } | ||
51 | |||
52 | void | ||
53 | nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) | ||
54 | { | ||
55 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; | ||
56 | u32 flags5; | ||
57 | |||
58 | if (dev_priv->chipset < 0xc0) { | ||
59 | /* not supported on 0x50, specified in format mthd */ | ||
60 | if (dev_priv->chipset == 0x50) | ||
61 | memtype = 0; | ||
62 | flags5 = 0x00010000; | ||
63 | } else { | ||
64 | if (memtype & 0x80000000) | ||
65 | flags5 = 0x00000000; /* large pages */ | ||
66 | else | ||
67 | flags5 = 0x00020000; | ||
68 | } | ||
69 | |||
70 | nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, | ||
71 | NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); | ||
72 | nv_wo32(obj, 0x14, flags5); | ||
73 | dev_priv->engine.instmem.flush(obj->dev); | ||
74 | } | ||
75 | |||
76 | int | ||
77 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, | ||
78 | u64 base, u64 size, struct nouveau_gpuobj **pobj) | ||
79 | { | ||
80 | struct nv50_display *disp = nv50_display(evo->dev); | ||
81 | struct nouveau_gpuobj *obj = NULL; | ||
82 | int ret; | ||
83 | |||
84 | ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj); | ||
85 | if (ret) | ||
86 | return ret; | ||
87 | obj->engine = NVOBJ_ENGINE_DISPLAY; | ||
88 | |||
89 | nv50_evo_dmaobj_init(obj, memtype, base, size); | ||
90 | |||
91 | ret = nouveau_ramht_insert(evo, handle, obj); | ||
92 | if (ret) | ||
93 | goto out; | ||
94 | |||
95 | if (pobj) | ||
96 | nouveau_gpuobj_ref(obj, pobj); | ||
97 | out: | ||
98 | nouveau_gpuobj_ref(NULL, &obj); | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | static int | ||
103 | nv50_evo_channel_new(struct drm_device *dev, int chid, | ||
104 | struct nouveau_channel **pevo) | ||
105 | { | ||
106 | struct nv50_display *disp = nv50_display(dev); | ||
107 | struct nouveau_channel *evo; | ||
108 | int ret; | ||
109 | |||
110 | evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); | ||
111 | if (!evo) | ||
112 | return -ENOMEM; | ||
113 | *pevo = evo; | ||
114 | |||
115 | evo->id = chid; | ||
116 | evo->dev = dev; | ||
117 | evo->user_get = 4; | ||
118 | evo->user_put = 0; | ||
119 | |||
120 | ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, | ||
121 | &evo->pushbuf_bo); | ||
122 | if (ret == 0) | ||
123 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); | ||
124 | if (ret) { | ||
125 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); | ||
126 | nv50_evo_channel_del(pevo); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | ret = nouveau_bo_map(evo->pushbuf_bo); | ||
131 | if (ret) { | ||
132 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); | ||
133 | nv50_evo_channel_del(pevo); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | evo->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
138 | NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); | ||
139 | if (!evo->user) { | ||
140 | NV_ERROR(dev, "Error mapping EVO control regs.\n"); | ||
141 | nv50_evo_channel_del(pevo); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | |||
145 | /* bind primary evo channel's ramht to the channel */ | ||
146 | if (disp->master && evo != disp->master) | ||
147 | nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static int | ||
153 | nv50_evo_channel_init(struct nouveau_channel *evo) | ||
154 | { | ||
155 | struct drm_device *dev = evo->dev; | ||
156 | int id = evo->id, ret, i; | ||
157 | u64 pushbuf = evo->pushbuf_bo->bo.offset; | ||
158 | u32 tmp; | ||
159 | |||
160 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | ||
161 | if ((tmp & 0x009f0000) == 0x00020000) | ||
162 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); | ||
163 | |||
164 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | ||
165 | if ((tmp & 0x003f0000) == 0x00030000) | ||
166 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); | ||
167 | |||
168 | /* initialise fifo */ | ||
169 | nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | | ||
170 | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | | ||
171 | NV50_PDISPLAY_EVO_DMA_CB_VALID); | ||
172 | nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); | ||
173 | nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); | ||
174 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, | ||
175 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); | ||
176 | |||
177 | nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); | ||
178 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | | ||
179 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); | ||
180 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { | ||
181 | NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, | ||
182 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); | ||
183 | return -EBUSY; | ||
184 | } | ||
185 | |||
186 | /* enable error reporting on the channel */ | ||
187 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); | ||
188 | |||
189 | evo->dma.max = (4096/4) - 2; | ||
190 | evo->dma.max &= ~7; | ||
191 | evo->dma.put = 0; | ||
192 | evo->dma.cur = evo->dma.put; | ||
193 | evo->dma.free = evo->dma.max - evo->dma.cur; | ||
194 | |||
195 | ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); | ||
196 | if (ret) | ||
197 | return ret; | ||
198 | |||
199 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | ||
200 | OUT_RING(evo, 0); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static void | ||
206 | nv50_evo_channel_fini(struct nouveau_channel *evo) | ||
207 | { | ||
208 | struct drm_device *dev = evo->dev; | ||
209 | int id = evo->id; | ||
210 | |||
211 | nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); | ||
212 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); | ||
213 | nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); | ||
214 | nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); | ||
215 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { | ||
216 | NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, | ||
217 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void | ||
222 | nv50_evo_destroy(struct drm_device *dev) | ||
223 | { | ||
224 | struct nv50_display *disp = nv50_display(dev); | ||
225 | int i; | ||
226 | |||
227 | for (i = 0; i < 2; i++) { | ||
228 | if (disp->crtc[i].sem.bo) { | ||
229 | nouveau_bo_unmap(disp->crtc[i].sem.bo); | ||
230 | nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo); | ||
231 | } | ||
232 | nv50_evo_channel_del(&disp->crtc[i].sync); | ||
233 | } | ||
234 | nouveau_gpuobj_ref(NULL, &disp->ntfy); | ||
235 | nv50_evo_channel_del(&disp->master); | ||
236 | } | ||
237 | |||
238 | static int | ||
239 | nv50_evo_create(struct drm_device *dev) | ||
240 | { | ||
241 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
242 | struct nv50_display *disp = nv50_display(dev); | ||
243 | struct nouveau_gpuobj *ramht = NULL; | ||
244 | struct nouveau_channel *evo; | ||
245 | int ret, i, j; | ||
246 | |||
247 | /* create primary evo channel, the one we use for modesetting | ||
248 | * purporses | ||
249 | */ | ||
250 | ret = nv50_evo_channel_new(dev, 0, &disp->master); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | evo = disp->master; | ||
254 | |||
255 | /* setup object management on it, any other evo channel will | ||
256 | * use this also as there's no per-channel support on the | ||
257 | * hardware | ||
258 | */ | ||
259 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, | ||
260 | NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); | ||
261 | if (ret) { | ||
262 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); | ||
263 | goto err; | ||
264 | } | ||
265 | |||
266 | ret = drm_mm_init(&evo->ramin_heap, 0, 32768); | ||
267 | if (ret) { | ||
268 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); | ||
269 | goto err; | ||
270 | } | ||
271 | |||
272 | ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); | ||
273 | if (ret) { | ||
274 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); | ||
275 | goto err; | ||
276 | } | ||
277 | |||
278 | ret = nouveau_ramht_new(dev, ramht, &evo->ramht); | ||
279 | nouveau_gpuobj_ref(NULL, &ramht); | ||
280 | if (ret) | ||
281 | goto err; | ||
282 | |||
283 | /* not sure exactly what this is.. | ||
284 | * | ||
285 | * the first dword of the structure is used by nvidia to wait on | ||
286 | * full completion of an EVO "update" command. | ||
287 | * | ||
288 | * method 0x8c on the master evo channel will fill a lot more of | ||
289 | * this structure with some undefined info | ||
290 | */ | ||
291 | ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, | ||
292 | NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy); | ||
293 | if (ret) | ||
294 | goto err; | ||
295 | |||
296 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, | ||
297 | disp->ntfy->vinst, disp->ntfy->size, NULL); | ||
298 | if (ret) | ||
299 | goto err; | ||
300 | |||
301 | /* create some default objects for the scanout memtypes we support */ | ||
302 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, | ||
303 | 0, dev_priv->vram_size, NULL); | ||
304 | if (ret) | ||
305 | goto err; | ||
306 | |||
307 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, | ||
308 | 0, dev_priv->vram_size, NULL); | ||
309 | if (ret) | ||
310 | goto err; | ||
311 | |||
312 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | | ||
313 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), | ||
314 | 0, dev_priv->vram_size, NULL); | ||
315 | if (ret) | ||
316 | goto err; | ||
317 | |||
318 | ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | | ||
319 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), | ||
320 | 0, dev_priv->vram_size, NULL); | ||
321 | if (ret) | ||
322 | goto err; | ||
323 | |||
324 | /* create "display sync" channels and other structures we need | ||
325 | * to implement page flipping | ||
326 | */ | ||
327 | for (i = 0; i < 2; i++) { | ||
328 | struct nv50_display_crtc *dispc = &disp->crtc[i]; | ||
329 | u64 offset; | ||
330 | |||
331 | ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync); | ||
332 | if (ret) | ||
333 | goto err; | ||
334 | |||
335 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | ||
336 | 0, 0x0000, &dispc->sem.bo); | ||
337 | if (!ret) { | ||
338 | ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); | ||
339 | if (!ret) | ||
340 | ret = nouveau_bo_map(dispc->sem.bo); | ||
341 | if (ret) | ||
342 | nouveau_bo_ref(NULL, &dispc->sem.bo); | ||
343 | offset = dispc->sem.bo->bo.offset; | ||
344 | } | ||
345 | |||
346 | if (ret) | ||
347 | goto err; | ||
348 | |||
349 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000, | ||
350 | offset, 4096, NULL); | ||
351 | if (ret) | ||
352 | goto err; | ||
353 | |||
354 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, | ||
355 | 0, dev_priv->vram_size, NULL); | ||
356 | if (ret) | ||
357 | goto err; | ||
358 | |||
359 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | | ||
360 | (dev_priv->chipset < 0xc0 ? | ||
361 | 0x7a00 : 0xfe00), | ||
362 | 0, dev_priv->vram_size, NULL); | ||
363 | if (ret) | ||
364 | goto err; | ||
365 | |||
366 | ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | | ||
367 | (dev_priv->chipset < 0xc0 ? | ||
368 | 0x7000 : 0xfe00), | ||
369 | 0, dev_priv->vram_size, NULL); | ||
370 | if (ret) | ||
371 | goto err; | ||
372 | |||
373 | for (j = 0; j < 4096; j += 4) | ||
374 | nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000); | ||
375 | dispc->sem.offset = 0; | ||
376 | } | ||
377 | |||
378 | return 0; | ||
379 | |||
380 | err: | ||
381 | nv50_evo_destroy(dev); | ||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | int | ||
386 | nv50_evo_init(struct drm_device *dev) | ||
387 | { | ||
388 | struct nv50_display *disp = nv50_display(dev); | ||
389 | int ret, i; | ||
390 | |||
391 | if (!disp->master) { | ||
392 | ret = nv50_evo_create(dev); | ||
393 | if (ret) | ||
394 | return ret; | ||
395 | } | ||
396 | |||
397 | ret = nv50_evo_channel_init(disp->master); | ||
398 | if (ret) | ||
399 | return ret; | ||
400 | |||
401 | for (i = 0; i < 2; i++) { | ||
402 | ret = nv50_evo_channel_init(disp->crtc[i].sync); | ||
403 | if (ret) | ||
404 | return ret; | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | void | ||
411 | nv50_evo_fini(struct drm_device *dev) | ||
412 | { | ||
413 | struct nv50_display *disp = nv50_display(dev); | ||
414 | int i; | ||
415 | |||
416 | for (i = 0; i < 2; i++) { | ||
417 | if (disp->crtc[i].sync) | ||
418 | nv50_evo_channel_fini(disp->crtc[i].sync); | ||
419 | } | ||
420 | |||
421 | if (disp->master) | ||
422 | nv50_evo_channel_fini(disp->master); | ||
423 | |||
424 | nv50_evo_destroy(dev); | ||
425 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h new file mode 100644 index 00000000000..3860ca62cb1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __NV50_EVO_H__ | ||
28 | #define __NV50_EVO_H__ | ||
29 | |||
30 | #define NV50_EVO_UPDATE 0x00000080 | ||
31 | #define NV50_EVO_UNK84 0x00000084 | ||
32 | #define NV50_EVO_UNK84_NOTIFY 0x40000000 | ||
33 | #define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000 | ||
34 | #define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000 | ||
35 | #define NV50_EVO_DMA_NOTIFY 0x00000088 | ||
36 | #define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff | ||
37 | #define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000 | ||
38 | #define NV50_EVO_UNK8C 0x0000008C | ||
39 | |||
40 | #define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r) | ||
41 | #define NV50_EVO_DAC_MODE_CTRL 0x00000400 | ||
42 | #define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001 | ||
43 | #define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002 | ||
44 | #define NV50_EVO_DAC_MODE_CTRL2 0x00000404 | ||
45 | #define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001 | ||
46 | #define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002 | ||
47 | |||
48 | #define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r) | ||
49 | #define NV50_EVO_SOR_MODE_CTRL 0x00000600 | ||
50 | #define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001 | ||
51 | #define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002 | ||
52 | #define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100 | ||
53 | #define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400 | ||
54 | #define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000 | ||
55 | #define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000 | ||
56 | |||
57 | #define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r) | ||
58 | #define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r) | ||
59 | #define NV50_EVO_CRTC_UNK0800 0x00000800 | ||
60 | #define NV50_EVO_CRTC_CLOCK 0x00000804 | ||
61 | #define NV50_EVO_CRTC_INTERLACE 0x00000808 | ||
62 | #define NV50_EVO_CRTC_DISPLAY_START 0x00000810 | ||
63 | #define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814 | ||
64 | #define NV50_EVO_CRTC_SYNC_DURATION 0x00000818 | ||
65 | #define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c | ||
66 | #define NV50_EVO_CRTC_UNK0820 0x00000820 | ||
67 | #define NV50_EVO_CRTC_UNK0824 0x00000824 | ||
68 | #define NV50_EVO_CRTC_UNK082C 0x0000082c | ||
69 | #define NV50_EVO_CRTC_CLUT_MODE 0x00000840 | ||
70 | /* You can't have a palette in 8 bit mode (=OFF) */ | ||
71 | #define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000 | ||
72 | #define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000 | ||
73 | #define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000 | ||
74 | #define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844 | ||
75 | #define NV84_EVO_CRTC_CLUT_DMA 0x0000085C | ||
76 | #define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff | ||
77 | #define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000 | ||
78 | #define NV50_EVO_CRTC_FB_OFFSET 0x00000860 | ||
79 | #define NV50_EVO_CRTC_FB_SIZE 0x00000868 | ||
80 | #define NV50_EVO_CRTC_FB_CONFIG 0x0000086c | ||
81 | #define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000 | ||
82 | #define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000 | ||
83 | #define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000 | ||
84 | #define NV50_EVO_CRTC_FB_DEPTH 0x00000870 | ||
85 | #define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00 | ||
86 | #define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900 | ||
87 | #define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800 | ||
88 | #define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00 | ||
89 | #define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100 | ||
90 | #define NV50_EVO_CRTC_FB_DMA 0x00000874 | ||
91 | #define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff | ||
92 | #define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000 | ||
93 | #define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880 | ||
94 | #define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000 | ||
95 | #define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000 | ||
96 | #define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884 | ||
97 | #define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c | ||
98 | #define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff | ||
99 | #define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000 | ||
100 | #define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0 | ||
101 | #define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000 | ||
102 | #define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011 | ||
103 | #define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4 | ||
104 | #define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 | ||
105 | #define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 | ||
106 | #define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 | ||
107 | #define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000 | ||
108 | #define NV50_EVO_CRTC_FB_POS 0x000008c0 | ||
109 | #define NV50_EVO_CRTC_REAL_RES 0x000008c8 | ||
110 | #define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 | ||
111 | #define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \ | ||
112 | ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF)) | ||
113 | /* Both of these are needed, otherwise nothing happens. */ | ||
114 | #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 | ||
115 | #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc | ||
116 | #define NV50_EVO_CRTC_UNK900 0x00000900 | ||
117 | #define NV50_EVO_CRTC_UNK904 0x00000904 | ||
118 | |||
119 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c new file mode 100644 index 00000000000..bdd2afe2920 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -0,0 +1,294 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | struct nv50_fb_priv { | ||
7 | struct page *r100c08_page; | ||
8 | dma_addr_t r100c08; | ||
9 | }; | ||
10 | |||
11 | static void | ||
12 | nv50_fb_destroy(struct drm_device *dev) | ||
13 | { | ||
14 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
15 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
16 | struct nv50_fb_priv *priv = pfb->priv; | ||
17 | |||
18 | if (drm_mm_initialized(&pfb->tag_heap)) | ||
19 | drm_mm_takedown(&pfb->tag_heap); | ||
20 | |||
21 | if (priv->r100c08_page) { | ||
22 | pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE, | ||
23 | PCI_DMA_BIDIRECTIONAL); | ||
24 | __free_page(priv->r100c08_page); | ||
25 | } | ||
26 | |||
27 | kfree(priv); | ||
28 | pfb->priv = NULL; | ||
29 | } | ||
30 | |||
31 | static int | ||
32 | nv50_fb_create(struct drm_device *dev) | ||
33 | { | ||
34 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
35 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
36 | struct nv50_fb_priv *priv; | ||
37 | u32 tagmem; | ||
38 | int ret; | ||
39 | |||
40 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
41 | if (!priv) | ||
42 | return -ENOMEM; | ||
43 | pfb->priv = priv; | ||
44 | |||
45 | priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
46 | if (!priv->r100c08_page) { | ||
47 | nv50_fb_destroy(dev); | ||
48 | return -ENOMEM; | ||
49 | } | ||
50 | |||
51 | priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, | ||
52 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
53 | if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { | ||
54 | nv50_fb_destroy(dev); | ||
55 | return -EFAULT; | ||
56 | } | ||
57 | |||
58 | tagmem = nv_rd32(dev, 0x100320); | ||
59 | NV_DEBUG(dev, "%d tags available\n", tagmem); | ||
60 | ret = drm_mm_init(&pfb->tag_heap, 0, tagmem); | ||
61 | if (ret) { | ||
62 | nv50_fb_destroy(dev); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | int | ||
70 | nv50_fb_init(struct drm_device *dev) | ||
71 | { | ||
72 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
73 | struct nv50_fb_priv *priv; | ||
74 | int ret; | ||
75 | |||
76 | if (!dev_priv->engine.fb.priv) { | ||
77 | ret = nv50_fb_create(dev); | ||
78 | if (ret) | ||
79 | return ret; | ||
80 | } | ||
81 | priv = dev_priv->engine.fb.priv; | ||
82 | |||
83 | /* Not a clue what this is exactly. Without pointing it at a | ||
84 | * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) | ||
85 | * cause IOMMU "read from address 0" errors (rh#561267) | ||
86 | */ | ||
87 | nv_wr32(dev, 0x100c08, priv->r100c08 >> 8); | ||
88 | |||
89 | /* This is needed to get meaningful information from 100c90 | ||
90 | * on traps. No idea what these values mean exactly. */ | ||
91 | switch (dev_priv->chipset) { | ||
92 | case 0x50: | ||
93 | nv_wr32(dev, 0x100c90, 0x000707ff); | ||
94 | break; | ||
95 | case 0xa3: | ||
96 | case 0xa5: | ||
97 | case 0xa8: | ||
98 | nv_wr32(dev, 0x100c90, 0x000d0fff); | ||
99 | break; | ||
100 | case 0xaf: | ||
101 | nv_wr32(dev, 0x100c90, 0x089d1fff); | ||
102 | break; | ||
103 | default: | ||
104 | nv_wr32(dev, 0x100c90, 0x001d07ff); | ||
105 | break; | ||
106 | } | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | void | ||
112 | nv50_fb_takedown(struct drm_device *dev) | ||
113 | { | ||
114 | nv50_fb_destroy(dev); | ||
115 | } | ||
116 | |||
117 | static struct nouveau_enum vm_dispatch_subclients[] = { | ||
118 | { 0x00000000, "GRCTX", NULL }, | ||
119 | { 0x00000001, "NOTIFY", NULL }, | ||
120 | { 0x00000002, "QUERY", NULL }, | ||
121 | { 0x00000003, "COND", NULL }, | ||
122 | { 0x00000004, "M2M_IN", NULL }, | ||
123 | { 0x00000005, "M2M_OUT", NULL }, | ||
124 | { 0x00000006, "M2M_NOTIFY", NULL }, | ||
125 | {} | ||
126 | }; | ||
127 | |||
128 | static struct nouveau_enum vm_ccache_subclients[] = { | ||
129 | { 0x00000000, "CB", NULL }, | ||
130 | { 0x00000001, "TIC", NULL }, | ||
131 | { 0x00000002, "TSC", NULL }, | ||
132 | {} | ||
133 | }; | ||
134 | |||
135 | static struct nouveau_enum vm_prop_subclients[] = { | ||
136 | { 0x00000000, "RT0", NULL }, | ||
137 | { 0x00000001, "RT1", NULL }, | ||
138 | { 0x00000002, "RT2", NULL }, | ||
139 | { 0x00000003, "RT3", NULL }, | ||
140 | { 0x00000004, "RT4", NULL }, | ||
141 | { 0x00000005, "RT5", NULL }, | ||
142 | { 0x00000006, "RT6", NULL }, | ||
143 | { 0x00000007, "RT7", NULL }, | ||
144 | { 0x00000008, "ZETA", NULL }, | ||
145 | { 0x00000009, "LOCAL", NULL }, | ||
146 | { 0x0000000a, "GLOBAL", NULL }, | ||
147 | { 0x0000000b, "STACK", NULL }, | ||
148 | { 0x0000000c, "DST2D", NULL }, | ||
149 | {} | ||
150 | }; | ||
151 | |||
152 | static struct nouveau_enum vm_pfifo_subclients[] = { | ||
153 | { 0x00000000, "PUSHBUF", NULL }, | ||
154 | { 0x00000001, "SEMAPHORE", NULL }, | ||
155 | {} | ||
156 | }; | ||
157 | |||
158 | static struct nouveau_enum vm_bar_subclients[] = { | ||
159 | { 0x00000000, "FB", NULL }, | ||
160 | { 0x00000001, "IN", NULL }, | ||
161 | {} | ||
162 | }; | ||
163 | |||
164 | static struct nouveau_enum vm_client[] = { | ||
165 | { 0x00000000, "STRMOUT", NULL }, | ||
166 | { 0x00000003, "DISPATCH", vm_dispatch_subclients }, | ||
167 | { 0x00000004, "PFIFO_WRITE", NULL }, | ||
168 | { 0x00000005, "CCACHE", vm_ccache_subclients }, | ||
169 | { 0x00000006, "PPPP", NULL }, | ||
170 | { 0x00000007, "CLIPID", NULL }, | ||
171 | { 0x00000008, "PFIFO_READ", NULL }, | ||
172 | { 0x00000009, "VFETCH", NULL }, | ||
173 | { 0x0000000a, "TEXTURE", NULL }, | ||
174 | { 0x0000000b, "PROP", vm_prop_subclients }, | ||
175 | { 0x0000000c, "PVP", NULL }, | ||
176 | { 0x0000000d, "PBSP", NULL }, | ||
177 | { 0x0000000e, "PCRYPT", NULL }, | ||
178 | { 0x0000000f, "PCOUNTER", NULL }, | ||
179 | { 0x00000011, "PDAEMON", NULL }, | ||
180 | {} | ||
181 | }; | ||
182 | |||
183 | static struct nouveau_enum vm_engine[] = { | ||
184 | { 0x00000000, "PGRAPH", NULL }, | ||
185 | { 0x00000001, "PVP", NULL }, | ||
186 | { 0x00000004, "PEEPHOLE", NULL }, | ||
187 | { 0x00000005, "PFIFO", vm_pfifo_subclients }, | ||
188 | { 0x00000006, "BAR", vm_bar_subclients }, | ||
189 | { 0x00000008, "PPPP", NULL }, | ||
190 | { 0x00000009, "PBSP", NULL }, | ||
191 | { 0x0000000a, "PCRYPT", NULL }, | ||
192 | { 0x0000000b, "PCOUNTER", NULL }, | ||
193 | { 0x0000000c, "SEMAPHORE_BG", NULL }, | ||
194 | { 0x0000000d, "PCOPY", NULL }, | ||
195 | { 0x0000000e, "PDAEMON", NULL }, | ||
196 | {} | ||
197 | }; | ||
198 | |||
199 | static struct nouveau_enum vm_fault[] = { | ||
200 | { 0x00000000, "PT_NOT_PRESENT", NULL }, | ||
201 | { 0x00000001, "PT_TOO_SHORT", NULL }, | ||
202 | { 0x00000002, "PAGE_NOT_PRESENT", NULL }, | ||
203 | { 0x00000003, "PAGE_SYSTEM_ONLY", NULL }, | ||
204 | { 0x00000004, "PAGE_READ_ONLY", NULL }, | ||
205 | { 0x00000006, "NULL_DMAOBJ", NULL }, | ||
206 | { 0x00000007, "WRONG_MEMTYPE", NULL }, | ||
207 | { 0x0000000b, "VRAM_LIMIT", NULL }, | ||
208 | { 0x0000000f, "DMAOBJ_LIMIT", NULL }, | ||
209 | {} | ||
210 | }; | ||
211 | |||
212 | void | ||
213 | nv50_fb_vm_trap(struct drm_device *dev, int display) | ||
214 | { | ||
215 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
216 | const struct nouveau_enum *en, *cl; | ||
217 | unsigned long flags; | ||
218 | u32 trap[6], idx, chinst; | ||
219 | u8 st0, st1, st2, st3; | ||
220 | int i, ch; | ||
221 | |||
222 | idx = nv_rd32(dev, 0x100c90); | ||
223 | if (!(idx & 0x80000000)) | ||
224 | return; | ||
225 | idx &= 0x00ffffff; | ||
226 | |||
227 | for (i = 0; i < 6; i++) { | ||
228 | nv_wr32(dev, 0x100c90, idx | i << 24); | ||
229 | trap[i] = nv_rd32(dev, 0x100c94); | ||
230 | } | ||
231 | nv_wr32(dev, 0x100c90, idx | 0x80000000); | ||
232 | |||
233 | if (!display) | ||
234 | return; | ||
235 | |||
236 | /* lookup channel id */ | ||
237 | chinst = (trap[2] << 16) | trap[1]; | ||
238 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
239 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | ||
240 | struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; | ||
241 | |||
242 | if (!chan || !chan->ramin) | ||
243 | continue; | ||
244 | |||
245 | if (chinst == chan->ramin->vinst >> 12) | ||
246 | break; | ||
247 | } | ||
248 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
249 | |||
250 | /* decode status bits into something more useful */ | ||
251 | if (dev_priv->chipset < 0xa3 || | ||
252 | dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { | ||
253 | st0 = (trap[0] & 0x0000000f) >> 0; | ||
254 | st1 = (trap[0] & 0x000000f0) >> 4; | ||
255 | st2 = (trap[0] & 0x00000f00) >> 8; | ||
256 | st3 = (trap[0] & 0x0000f000) >> 12; | ||
257 | } else { | ||
258 | st0 = (trap[0] & 0x000000ff) >> 0; | ||
259 | st1 = (trap[0] & 0x0000ff00) >> 8; | ||
260 | st2 = (trap[0] & 0x00ff0000) >> 16; | ||
261 | st3 = (trap[0] & 0xff000000) >> 24; | ||
262 | } | ||
263 | |||
264 | NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ", | ||
265 | (trap[5] & 0x00000100) ? "read" : "write", | ||
266 | trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst); | ||
267 | |||
268 | en = nouveau_enum_find(vm_engine, st0); | ||
269 | if (en) | ||
270 | printk("%s/", en->name); | ||
271 | else | ||
272 | printk("%02x/", st0); | ||
273 | |||
274 | cl = nouveau_enum_find(vm_client, st2); | ||
275 | if (cl) | ||
276 | printk("%s/", cl->name); | ||
277 | else | ||
278 | printk("%02x/", st2); | ||
279 | |||
280 | if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3); | ||
281 | else if (en && en->data) cl = nouveau_enum_find(en->data, st3); | ||
282 | else cl = NULL; | ||
283 | if (cl) | ||
284 | printk("%s", cl->name); | ||
285 | else | ||
286 | printk("%02x", st3); | ||
287 | |||
288 | printk(" reason: "); | ||
289 | en = nouveau_enum_find(vm_fault, st1); | ||
290 | if (en) | ||
291 | printk("%s\n", en->name); | ||
292 | else | ||
293 | printk("0x%08x\n", st1); | ||
294 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c new file mode 100644 index 00000000000..c34a074f7ea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -0,0 +1,504 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nouveau_vm.h" | ||
32 | |||
33 | static void | ||
34 | nv50_fifo_playlist_update(struct drm_device *dev) | ||
35 | { | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
38 | struct nouveau_gpuobj *cur; | ||
39 | int i, nr; | ||
40 | |||
41 | NV_DEBUG(dev, "\n"); | ||
42 | |||
43 | cur = pfifo->playlist[pfifo->cur_playlist]; | ||
44 | pfifo->cur_playlist = !pfifo->cur_playlist; | ||
45 | |||
46 | /* We never schedule channel 0 or 127 */ | ||
47 | for (i = 1, nr = 0; i < 127; i++) { | ||
48 | if (dev_priv->channels.ptr[i] && | ||
49 | dev_priv->channels.ptr[i]->ramfc) { | ||
50 | nv_wo32(cur, (nr * 4), i); | ||
51 | nr++; | ||
52 | } | ||
53 | } | ||
54 | dev_priv->engine.instmem.flush(dev); | ||
55 | |||
56 | nv_wr32(dev, 0x32f4, cur->vinst >> 12); | ||
57 | nv_wr32(dev, 0x32ec, nr); | ||
58 | nv_wr32(dev, 0x2500, 0x101); | ||
59 | } | ||
60 | |||
61 | static void | ||
62 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) | ||
63 | { | ||
64 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
65 | struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; | ||
66 | uint32_t inst; | ||
67 | |||
68 | NV_DEBUG(dev, "ch%d\n", channel); | ||
69 | |||
70 | if (dev_priv->chipset == 0x50) | ||
71 | inst = chan->ramfc->vinst >> 12; | ||
72 | else | ||
73 | inst = chan->ramfc->vinst >> 8; | ||
74 | |||
75 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | | ||
76 | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); | ||
77 | } | ||
78 | |||
79 | static void | ||
80 | nv50_fifo_channel_disable(struct drm_device *dev, int channel) | ||
81 | { | ||
82 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
83 | uint32_t inst; | ||
84 | |||
85 | NV_DEBUG(dev, "ch%d\n", channel); | ||
86 | |||
87 | if (dev_priv->chipset == 0x50) | ||
88 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; | ||
89 | else | ||
90 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; | ||
91 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); | ||
92 | } | ||
93 | |||
94 | static void | ||
95 | nv50_fifo_init_reset(struct drm_device *dev) | ||
96 | { | ||
97 | uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; | ||
98 | |||
99 | NV_DEBUG(dev, "\n"); | ||
100 | |||
101 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | ||
102 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | nv50_fifo_init_intr(struct drm_device *dev) | ||
107 | { | ||
108 | NV_DEBUG(dev, "\n"); | ||
109 | |||
110 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
111 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); | ||
112 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); | ||
113 | } | ||
114 | |||
115 | static void | ||
116 | nv50_fifo_init_context_table(struct drm_device *dev) | ||
117 | { | ||
118 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
119 | int i; | ||
120 | |||
121 | NV_DEBUG(dev, "\n"); | ||
122 | |||
123 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { | ||
124 | if (dev_priv->channels.ptr[i]) | ||
125 | nv50_fifo_channel_enable(dev, i); | ||
126 | else | ||
127 | nv50_fifo_channel_disable(dev, i); | ||
128 | } | ||
129 | |||
130 | nv50_fifo_playlist_update(dev); | ||
131 | } | ||
132 | |||
133 | static void | ||
134 | nv50_fifo_init_regs__nv(struct drm_device *dev) | ||
135 | { | ||
136 | NV_DEBUG(dev, "\n"); | ||
137 | |||
138 | nv_wr32(dev, 0x250c, 0x6f3cfc34); | ||
139 | } | ||
140 | |||
141 | static void | ||
142 | nv50_fifo_init_regs(struct drm_device *dev) | ||
143 | { | ||
144 | NV_DEBUG(dev, "\n"); | ||
145 | |||
146 | nv_wr32(dev, 0x2500, 0); | ||
147 | nv_wr32(dev, 0x3250, 0); | ||
148 | nv_wr32(dev, 0x3220, 0); | ||
149 | nv_wr32(dev, 0x3204, 0); | ||
150 | nv_wr32(dev, 0x3210, 0); | ||
151 | nv_wr32(dev, 0x3270, 0); | ||
152 | nv_wr32(dev, 0x2044, 0x01003fff); | ||
153 | |||
154 | /* Enable dummy channels setup by nv50_instmem.c */ | ||
155 | nv50_fifo_channel_enable(dev, 0); | ||
156 | nv50_fifo_channel_enable(dev, 127); | ||
157 | } | ||
158 | |||
159 | int | ||
160 | nv50_fifo_init(struct drm_device *dev) | ||
161 | { | ||
162 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
163 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
164 | int ret; | ||
165 | |||
166 | NV_DEBUG(dev, "\n"); | ||
167 | |||
168 | if (pfifo->playlist[0]) { | ||
169 | pfifo->cur_playlist = !pfifo->cur_playlist; | ||
170 | goto just_reset; | ||
171 | } | ||
172 | |||
173 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, | ||
174 | NVOBJ_FLAG_ZERO_ALLOC, | ||
175 | &pfifo->playlist[0]); | ||
176 | if (ret) { | ||
177 | NV_ERROR(dev, "error creating playlist 0: %d\n", ret); | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, | ||
182 | NVOBJ_FLAG_ZERO_ALLOC, | ||
183 | &pfifo->playlist[1]); | ||
184 | if (ret) { | ||
185 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); | ||
186 | NV_ERROR(dev, "error creating playlist 1: %d\n", ret); | ||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | just_reset: | ||
191 | nv50_fifo_init_reset(dev); | ||
192 | nv50_fifo_init_intr(dev); | ||
193 | nv50_fifo_init_context_table(dev); | ||
194 | nv50_fifo_init_regs__nv(dev); | ||
195 | nv50_fifo_init_regs(dev); | ||
196 | dev_priv->engine.fifo.enable(dev); | ||
197 | dev_priv->engine.fifo.reassign(dev, true); | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | void | ||
203 | nv50_fifo_takedown(struct drm_device *dev) | ||
204 | { | ||
205 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
206 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
207 | |||
208 | NV_DEBUG(dev, "\n"); | ||
209 | |||
210 | if (!pfifo->playlist[0]) | ||
211 | return; | ||
212 | |||
213 | nv_wr32(dev, 0x2140, 0x00000000); | ||
214 | nouveau_irq_unregister(dev, 8); | ||
215 | |||
216 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); | ||
217 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); | ||
218 | } | ||
219 | |||
220 | int | ||
221 | nv50_fifo_channel_id(struct drm_device *dev) | ||
222 | { | ||
223 | return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & | ||
224 | NV50_PFIFO_CACHE1_PUSH1_CHID_MASK; | ||
225 | } | ||
226 | |||
227 | int | ||
228 | nv50_fifo_create_context(struct nouveau_channel *chan) | ||
229 | { | ||
230 | struct drm_device *dev = chan->dev; | ||
231 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
232 | struct nouveau_gpuobj *ramfc = NULL; | ||
233 | unsigned long flags; | ||
234 | int ret; | ||
235 | |||
236 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
237 | |||
238 | if (dev_priv->chipset == 0x50) { | ||
239 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, | ||
240 | chan->ramin->vinst, 0x100, | ||
241 | NVOBJ_FLAG_ZERO_ALLOC | | ||
242 | NVOBJ_FLAG_ZERO_FREE, | ||
243 | &chan->ramfc); | ||
244 | if (ret) | ||
245 | return ret; | ||
246 | |||
247 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400, | ||
248 | chan->ramin->vinst + 0x0400, | ||
249 | 4096, 0, &chan->cache); | ||
250 | if (ret) | ||
251 | return ret; | ||
252 | } else { | ||
253 | ret = nouveau_gpuobj_new(dev, chan, 0x100, 256, | ||
254 | NVOBJ_FLAG_ZERO_ALLOC | | ||
255 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | ||
256 | if (ret) | ||
257 | return ret; | ||
258 | |||
259 | ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, | ||
260 | 0, &chan->cache); | ||
261 | if (ret) | ||
262 | return ret; | ||
263 | } | ||
264 | ramfc = chan->ramfc; | ||
265 | |||
266 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
267 | NV50_USER(chan->id), PAGE_SIZE); | ||
268 | if (!chan->user) | ||
269 | return -ENOMEM; | ||
270 | |||
271 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
272 | |||
273 | nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); | ||
274 | nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | ||
275 | (4 << 24) /* SEARCH_FULL */ | | ||
276 | (chan->ramht->gpuobj->cinst >> 4)); | ||
277 | nv_wo32(ramfc, 0x44, 0x01003fff); | ||
278 | nv_wo32(ramfc, 0x60, 0x7fffffff); | ||
279 | nv_wo32(ramfc, 0x40, 0x00000000); | ||
280 | nv_wo32(ramfc, 0x7c, 0x30000001); | ||
281 | nv_wo32(ramfc, 0x78, 0x00000000); | ||
282 | nv_wo32(ramfc, 0x3c, 0x403f6078); | ||
283 | nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4); | ||
284 | nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); | ||
285 | |||
286 | if (dev_priv->chipset != 0x50) { | ||
287 | nv_wo32(chan->ramin, 0, chan->id); | ||
288 | nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8); | ||
289 | |||
290 | nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10); | ||
291 | nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12); | ||
292 | } | ||
293 | |||
294 | dev_priv->engine.instmem.flush(dev); | ||
295 | |||
296 | nv50_fifo_channel_enable(dev, chan->id); | ||
297 | nv50_fifo_playlist_update(dev); | ||
298 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | void | ||
303 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | ||
304 | { | ||
305 | struct drm_device *dev = chan->dev; | ||
306 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
307 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
308 | struct nouveau_gpuobj *ramfc = NULL; | ||
309 | unsigned long flags; | ||
310 | |||
311 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
312 | |||
313 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
314 | pfifo->reassign(dev, false); | ||
315 | |||
316 | /* Unload the context if it's the currently active one */ | ||
317 | if (pfifo->channel_id(dev) == chan->id) { | ||
318 | pfifo->disable(dev); | ||
319 | pfifo->unload_context(dev); | ||
320 | pfifo->enable(dev); | ||
321 | } | ||
322 | |||
323 | /* This will ensure the channel is seen as disabled. */ | ||
324 | nouveau_gpuobj_ref(chan->ramfc, &ramfc); | ||
325 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
326 | nv50_fifo_channel_disable(dev, chan->id); | ||
327 | |||
328 | /* Dummy channel, also used on ch 127 */ | ||
329 | if (chan->id == 0) | ||
330 | nv50_fifo_channel_disable(dev, 127); | ||
331 | nv50_fifo_playlist_update(dev); | ||
332 | |||
333 | pfifo->reassign(dev, true); | ||
334 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
335 | |||
336 | /* Free the channel resources */ | ||
337 | if (chan->user) { | ||
338 | iounmap(chan->user); | ||
339 | chan->user = NULL; | ||
340 | } | ||
341 | nouveau_gpuobj_ref(NULL, &ramfc); | ||
342 | nouveau_gpuobj_ref(NULL, &chan->cache); | ||
343 | } | ||
344 | |||
345 | int | ||
346 | nv50_fifo_load_context(struct nouveau_channel *chan) | ||
347 | { | ||
348 | struct drm_device *dev = chan->dev; | ||
349 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
350 | struct nouveau_gpuobj *ramfc = chan->ramfc; | ||
351 | struct nouveau_gpuobj *cache = chan->cache; | ||
352 | int ptr, cnt; | ||
353 | |||
354 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
355 | |||
356 | nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00)); | ||
357 | nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04)); | ||
358 | nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08)); | ||
359 | nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c)); | ||
360 | nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10)); | ||
361 | nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14)); | ||
362 | nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18)); | ||
363 | nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c)); | ||
364 | nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20)); | ||
365 | nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24)); | ||
366 | nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28)); | ||
367 | nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c)); | ||
368 | nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30)); | ||
369 | nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34)); | ||
370 | nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38)); | ||
371 | nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c)); | ||
372 | nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40)); | ||
373 | nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44)); | ||
374 | nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48)); | ||
375 | nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c)); | ||
376 | nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50)); | ||
377 | nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54)); | ||
378 | nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58)); | ||
379 | nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c)); | ||
380 | nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60)); | ||
381 | nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64)); | ||
382 | nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68)); | ||
383 | nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c)); | ||
384 | nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70)); | ||
385 | nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74)); | ||
386 | nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78)); | ||
387 | nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c)); | ||
388 | nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80)); | ||
389 | |||
390 | cnt = nv_ro32(ramfc, 0x84); | ||
391 | for (ptr = 0; ptr < cnt; ptr++) { | ||
392 | nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), | ||
393 | nv_ro32(cache, (ptr * 8) + 0)); | ||
394 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), | ||
395 | nv_ro32(cache, (ptr * 8) + 4)); | ||
396 | } | ||
397 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); | ||
398 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
399 | |||
400 | /* guessing that all the 0x34xx regs aren't on NV50 */ | ||
401 | if (dev_priv->chipset != 0x50) { | ||
402 | nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88)); | ||
403 | nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c)); | ||
404 | nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90)); | ||
405 | nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94)); | ||
406 | nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98)); | ||
407 | } | ||
408 | |||
409 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | int | ||
414 | nv50_fifo_unload_context(struct drm_device *dev) | ||
415 | { | ||
416 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
417 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
418 | struct nouveau_gpuobj *ramfc, *cache; | ||
419 | struct nouveau_channel *chan = NULL; | ||
420 | int chid, get, put, ptr; | ||
421 | |||
422 | NV_DEBUG(dev, "\n"); | ||
423 | |||
424 | chid = pfifo->channel_id(dev); | ||
425 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) | ||
426 | return 0; | ||
427 | |||
428 | chan = dev_priv->channels.ptr[chid]; | ||
429 | if (!chan) { | ||
430 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
434 | ramfc = chan->ramfc; | ||
435 | cache = chan->cache; | ||
436 | |||
437 | nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); | ||
438 | nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); | ||
439 | nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240)); | ||
440 | nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320)); | ||
441 | nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244)); | ||
442 | nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328)); | ||
443 | nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368)); | ||
444 | nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c)); | ||
445 | nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370)); | ||
446 | nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374)); | ||
447 | nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378)); | ||
448 | nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c)); | ||
449 | nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228)); | ||
450 | nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364)); | ||
451 | nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0)); | ||
452 | nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224)); | ||
453 | nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c)); | ||
454 | nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044)); | ||
455 | nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c)); | ||
456 | nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234)); | ||
457 | nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340)); | ||
458 | nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344)); | ||
459 | nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280)); | ||
460 | nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254)); | ||
461 | nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260)); | ||
462 | nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264)); | ||
463 | nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268)); | ||
464 | nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c)); | ||
465 | nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4)); | ||
466 | nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248)); | ||
467 | nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088)); | ||
468 | nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058)); | ||
469 | nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210)); | ||
470 | |||
471 | put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; | ||
472 | get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; | ||
473 | ptr = 0; | ||
474 | while (put != get) { | ||
475 | nv_wo32(cache, ptr + 0, | ||
476 | nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); | ||
477 | nv_wo32(cache, ptr + 4, | ||
478 | nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); | ||
479 | get = (get + 1) & 0x1ff; | ||
480 | ptr += 8; | ||
481 | } | ||
482 | |||
483 | /* guessing that all the 0x34xx regs aren't on NV50 */ | ||
484 | if (dev_priv->chipset != 0x50) { | ||
485 | nv_wo32(ramfc, 0x84, ptr >> 3); | ||
486 | nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c)); | ||
487 | nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400)); | ||
488 | nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404)); | ||
489 | nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408)); | ||
490 | nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410)); | ||
491 | } | ||
492 | |||
493 | dev_priv->engine.instmem.flush(dev); | ||
494 | |||
495 | /*XXX: probably reload ch127 (NULL) state back too */ | ||
496 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | void | ||
501 | nv50_fifo_tlb_flush(struct drm_device *dev) | ||
502 | { | ||
503 | nv50_vm_flush_engine(dev, 5); | ||
504 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c new file mode 100644 index 00000000000..d4f4206dad7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_hw.h" | ||
28 | |||
29 | #include "nv50_display.h" | ||
30 | |||
31 | static void nv50_gpio_isr(struct drm_device *dev); | ||
32 | static void nv50_gpio_isr_bh(struct work_struct *work); | ||
33 | |||
34 | struct nv50_gpio_priv { | ||
35 | struct list_head handlers; | ||
36 | spinlock_t lock; | ||
37 | }; | ||
38 | |||
39 | struct nv50_gpio_handler { | ||
40 | struct drm_device *dev; | ||
41 | struct list_head head; | ||
42 | struct work_struct work; | ||
43 | bool inhibit; | ||
44 | |||
45 | struct dcb_gpio_entry *gpio; | ||
46 | |||
47 | void (*handler)(void *data, int state); | ||
48 | void *data; | ||
49 | }; | ||
50 | |||
51 | static int | ||
52 | nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) | ||
53 | { | ||
54 | const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | ||
55 | |||
56 | if (gpio->line >= 32) | ||
57 | return -EINVAL; | ||
58 | |||
59 | *reg = nv50_gpio_reg[gpio->line >> 3]; | ||
60 | *shift = (gpio->line & 7) << 2; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | int | ||
65 | nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) | ||
66 | { | ||
67 | struct dcb_gpio_entry *gpio; | ||
68 | uint32_t r, s, v; | ||
69 | |||
70 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
71 | if (!gpio) | ||
72 | return -ENOENT; | ||
73 | |||
74 | if (nv50_gpio_location(gpio, &r, &s)) | ||
75 | return -EINVAL; | ||
76 | |||
77 | v = nv_rd32(dev, r) >> (s + 2); | ||
78 | return ((v & 1) == (gpio->state[1] & 1)); | ||
79 | } | ||
80 | |||
81 | int | ||
82 | nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | ||
83 | { | ||
84 | struct dcb_gpio_entry *gpio; | ||
85 | uint32_t r, s, v; | ||
86 | |||
87 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
88 | if (!gpio) | ||
89 | return -ENOENT; | ||
90 | |||
91 | if (nv50_gpio_location(gpio, &r, &s)) | ||
92 | return -EINVAL; | ||
93 | |||
94 | v = nv_rd32(dev, r) & ~(0x3 << s); | ||
95 | v |= (gpio->state[state] ^ 2) << s; | ||
96 | nv_wr32(dev, r, v); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | int | ||
101 | nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, | ||
102 | void (*handler)(void *, int), void *data) | ||
103 | { | ||
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
105 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
106 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
107 | struct nv50_gpio_handler *gpioh; | ||
108 | struct dcb_gpio_entry *gpio; | ||
109 | unsigned long flags; | ||
110 | |||
111 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
112 | if (!gpio) | ||
113 | return -ENOENT; | ||
114 | |||
115 | gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL); | ||
116 | if (!gpioh) | ||
117 | return -ENOMEM; | ||
118 | |||
119 | INIT_WORK(&gpioh->work, nv50_gpio_isr_bh); | ||
120 | gpioh->dev = dev; | ||
121 | gpioh->gpio = gpio; | ||
122 | gpioh->handler = handler; | ||
123 | gpioh->data = data; | ||
124 | |||
125 | spin_lock_irqsave(&priv->lock, flags); | ||
126 | list_add(&gpioh->head, &priv->handlers); | ||
127 | spin_unlock_irqrestore(&priv->lock, flags); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | void | ||
132 | nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, | ||
133 | void (*handler)(void *, int), void *data) | ||
134 | { | ||
135 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
136 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
137 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
138 | struct nv50_gpio_handler *gpioh, *tmp; | ||
139 | struct dcb_gpio_entry *gpio; | ||
140 | LIST_HEAD(tofree); | ||
141 | unsigned long flags; | ||
142 | |||
143 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
144 | if (!gpio) | ||
145 | return; | ||
146 | |||
147 | spin_lock_irqsave(&priv->lock, flags); | ||
148 | list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) { | ||
149 | if (gpioh->gpio != gpio || | ||
150 | gpioh->handler != handler || | ||
151 | gpioh->data != data) | ||
152 | continue; | ||
153 | list_move(&gpioh->head, &tofree); | ||
154 | } | ||
155 | spin_unlock_irqrestore(&priv->lock, flags); | ||
156 | |||
157 | list_for_each_entry_safe(gpioh, tmp, &tofree, head) { | ||
158 | flush_work_sync(&gpioh->work); | ||
159 | kfree(gpioh); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | bool | ||
164 | nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) | ||
165 | { | ||
166 | struct dcb_gpio_entry *gpio; | ||
167 | u32 reg, mask; | ||
168 | |||
169 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
170 | if (!gpio) | ||
171 | return false; | ||
172 | |||
173 | reg = gpio->line < 16 ? 0xe050 : 0xe070; | ||
174 | mask = 0x00010001 << (gpio->line & 0xf); | ||
175 | |||
176 | nv_wr32(dev, reg + 4, mask); | ||
177 | reg = nv_mask(dev, reg + 0, mask, on ? mask : 0); | ||
178 | return (reg & mask) == mask; | ||
179 | } | ||
180 | |||
181 | static int | ||
182 | nv50_gpio_create(struct drm_device *dev) | ||
183 | { | ||
184 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
185 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
186 | struct nv50_gpio_priv *priv; | ||
187 | |||
188 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
189 | if (!priv) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | INIT_LIST_HEAD(&priv->handlers); | ||
193 | spin_lock_init(&priv->lock); | ||
194 | pgpio->priv = priv; | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | nv50_gpio_destroy(struct drm_device *dev) | ||
200 | { | ||
201 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
202 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
203 | |||
204 | kfree(pgpio->priv); | ||
205 | pgpio->priv = NULL; | ||
206 | } | ||
207 | |||
208 | int | ||
209 | nv50_gpio_init(struct drm_device *dev) | ||
210 | { | ||
211 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
212 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
213 | int ret; | ||
214 | |||
215 | if (!pgpio->priv) { | ||
216 | ret = nv50_gpio_create(dev); | ||
217 | if (ret) | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | /* disable, and ack any pending gpio interrupts */ | ||
222 | nv_wr32(dev, 0xe050, 0x00000000); | ||
223 | nv_wr32(dev, 0xe054, 0xffffffff); | ||
224 | if (dev_priv->chipset >= 0x90) { | ||
225 | nv_wr32(dev, 0xe070, 0x00000000); | ||
226 | nv_wr32(dev, 0xe074, 0xffffffff); | ||
227 | } | ||
228 | |||
229 | nouveau_irq_register(dev, 21, nv50_gpio_isr); | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | void | ||
234 | nv50_gpio_fini(struct drm_device *dev) | ||
235 | { | ||
236 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
237 | |||
238 | nv_wr32(dev, 0xe050, 0x00000000); | ||
239 | if (dev_priv->chipset >= 0x90) | ||
240 | nv_wr32(dev, 0xe070, 0x00000000); | ||
241 | nouveau_irq_unregister(dev, 21); | ||
242 | |||
243 | nv50_gpio_destroy(dev); | ||
244 | } | ||
245 | |||
246 | static void | ||
247 | nv50_gpio_isr_bh(struct work_struct *work) | ||
248 | { | ||
249 | struct nv50_gpio_handler *gpioh = | ||
250 | container_of(work, struct nv50_gpio_handler, work); | ||
251 | struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private; | ||
252 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
253 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
254 | unsigned long flags; | ||
255 | int state; | ||
256 | |||
257 | state = pgpio->get(gpioh->dev, gpioh->gpio->tag); | ||
258 | if (state < 0) | ||
259 | return; | ||
260 | |||
261 | gpioh->handler(gpioh->data, state); | ||
262 | |||
263 | spin_lock_irqsave(&priv->lock, flags); | ||
264 | gpioh->inhibit = false; | ||
265 | spin_unlock_irqrestore(&priv->lock, flags); | ||
266 | } | ||
267 | |||
268 | static void | ||
269 | nv50_gpio_isr(struct drm_device *dev) | ||
270 | { | ||
271 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
272 | struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; | ||
273 | struct nv50_gpio_priv *priv = pgpio->priv; | ||
274 | struct nv50_gpio_handler *gpioh; | ||
275 | u32 intr0, intr1 = 0; | ||
276 | u32 hi, lo, ch; | ||
277 | |||
278 | intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); | ||
279 | if (dev_priv->chipset >= 0x90) | ||
280 | intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); | ||
281 | |||
282 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); | ||
283 | lo = (intr0 >> 16) | (intr1 & 0xffff0000); | ||
284 | ch = hi | lo; | ||
285 | |||
286 | nv_wr32(dev, 0xe054, intr0); | ||
287 | if (dev_priv->chipset >= 0x90) | ||
288 | nv_wr32(dev, 0xe074, intr1); | ||
289 | |||
290 | spin_lock(&priv->lock); | ||
291 | list_for_each_entry(gpioh, &priv->handlers, head) { | ||
292 | if (!(ch & (1 << gpioh->gpio->line))) | ||
293 | continue; | ||
294 | |||
295 | if (gpioh->inhibit) | ||
296 | continue; | ||
297 | gpioh->inhibit = true; | ||
298 | |||
299 | schedule_work(&gpioh->work); | ||
300 | } | ||
301 | spin_unlock(&priv->lock); | ||
302 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c new file mode 100644 index 00000000000..d43c46caa76 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -0,0 +1,1123 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nouveau_grctx.h" | ||
32 | #include "nouveau_dma.h" | ||
33 | #include "nouveau_vm.h" | ||
34 | #include "nv50_evo.h" | ||
35 | |||
36 | struct nv50_graph_engine { | ||
37 | struct nouveau_exec_engine base; | ||
38 | u32 ctxprog[512]; | ||
39 | u32 ctxprog_size; | ||
40 | u32 grctx_size; | ||
41 | }; | ||
42 | |||
43 | static void | ||
44 | nv50_graph_fifo_access(struct drm_device *dev, bool enabled) | ||
45 | { | ||
46 | const uint32_t mask = 0x00010001; | ||
47 | |||
48 | if (enabled) | ||
49 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); | ||
50 | else | ||
51 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); | ||
52 | } | ||
53 | |||
54 | static struct nouveau_channel * | ||
55 | nv50_graph_channel(struct drm_device *dev) | ||
56 | { | ||
57 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
58 | uint32_t inst; | ||
59 | int i; | ||
60 | |||
61 | /* Be sure we're not in the middle of a context switch or bad things | ||
62 | * will happen, such as unloading the wrong pgraph context. | ||
63 | */ | ||
64 | if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) | ||
65 | NV_ERROR(dev, "Ctxprog is still running\n"); | ||
66 | |||
67 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | ||
68 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | ||
69 | return NULL; | ||
70 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; | ||
71 | |||
72 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
73 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | ||
74 | |||
75 | if (chan && chan->ramin && chan->ramin->vinst == inst) | ||
76 | return chan; | ||
77 | } | ||
78 | |||
79 | return NULL; | ||
80 | } | ||
81 | |||
82 | static int | ||
83 | nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) | ||
84 | { | ||
85 | uint32_t fifo = nv_rd32(dev, 0x400500); | ||
86 | |||
87 | nv_wr32(dev, 0x400500, fifo & ~1); | ||
88 | nv_wr32(dev, 0x400784, inst); | ||
89 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); | ||
90 | nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); | ||
91 | nv_wr32(dev, 0x400040, 0xffffffff); | ||
92 | (void)nv_rd32(dev, 0x400040); | ||
93 | nv_wr32(dev, 0x400040, 0x00000000); | ||
94 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); | ||
95 | |||
96 | if (nouveau_wait_for_idle(dev)) | ||
97 | nv_wr32(dev, 0x40032c, inst | (1<<31)); | ||
98 | nv_wr32(dev, 0x400500, fifo); | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int | ||
104 | nv50_graph_unload_context(struct drm_device *dev) | ||
105 | { | ||
106 | uint32_t inst; | ||
107 | |||
108 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | ||
109 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | ||
110 | return 0; | ||
111 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
112 | |||
113 | nouveau_wait_for_idle(dev); | ||
114 | nv_wr32(dev, 0x400784, inst); | ||
115 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); | ||
116 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); | ||
117 | nouveau_wait_for_idle(dev); | ||
118 | |||
119 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static void | ||
124 | nv50_graph_init_reset(struct drm_device *dev) | ||
125 | { | ||
126 | uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); | ||
127 | NV_DEBUG(dev, "\n"); | ||
128 | |||
129 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | ||
130 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); | ||
131 | } | ||
132 | |||
133 | static void | ||
134 | nv50_graph_init_intr(struct drm_device *dev) | ||
135 | { | ||
136 | NV_DEBUG(dev, "\n"); | ||
137 | |||
138 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); | ||
139 | nv_wr32(dev, 0x400138, 0xffffffff); | ||
140 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); | ||
141 | } | ||
142 | |||
143 | static void | ||
144 | nv50_graph_init_regs__nv(struct drm_device *dev) | ||
145 | { | ||
146 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
147 | uint32_t units = nv_rd32(dev, 0x1540); | ||
148 | int i; | ||
149 | |||
150 | NV_DEBUG(dev, "\n"); | ||
151 | |||
152 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
153 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
154 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
155 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
156 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
157 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
158 | |||
159 | for (i = 0; i < 16; i++) { | ||
160 | if (units & 1 << i) { | ||
161 | if (dev_priv->chipset < 0xa0) { | ||
162 | nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); | ||
163 | nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); | ||
164 | nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); | ||
165 | } else { | ||
166 | nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); | ||
167 | nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); | ||
168 | nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); | ||
169 | } | ||
170 | } | ||
171 | } | ||
172 | |||
173 | nv_wr32(dev, 0x400108, 0xffffffff); | ||
174 | |||
175 | nv_wr32(dev, 0x400824, 0x00004000); | ||
176 | nv_wr32(dev, 0x400500, 0x00010001); | ||
177 | } | ||
178 | |||
179 | static void | ||
180 | nv50_graph_init_zcull(struct drm_device *dev) | ||
181 | { | ||
182 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
183 | int i; | ||
184 | |||
185 | NV_DEBUG(dev, "\n"); | ||
186 | |||
187 | switch (dev_priv->chipset & 0xf0) { | ||
188 | case 0x50: | ||
189 | case 0x80: | ||
190 | case 0x90: | ||
191 | nv_wr32(dev, 0x402ca8, 0x00000800); | ||
192 | break; | ||
193 | case 0xa0: | ||
194 | default: | ||
195 | nv_wr32(dev, 0x402cc0, 0x00000000); | ||
196 | if (dev_priv->chipset == 0xa0 || | ||
197 | dev_priv->chipset == 0xaa || | ||
198 | dev_priv->chipset == 0xac) { | ||
199 | nv_wr32(dev, 0x402ca8, 0x00000802); | ||
200 | } else { | ||
201 | nv_wr32(dev, 0x402cc0, 0x00000000); | ||
202 | nv_wr32(dev, 0x402ca8, 0x00000002); | ||
203 | } | ||
204 | |||
205 | break; | ||
206 | } | ||
207 | |||
208 | /* zero out zcull regions */ | ||
209 | for (i = 0; i < 8; i++) { | ||
210 | nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000); | ||
211 | nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000); | ||
212 | nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); | ||
213 | nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static int | ||
218 | nv50_graph_init_ctxctl(struct drm_device *dev) | ||
219 | { | ||
220 | struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
221 | int i; | ||
222 | |||
223 | NV_DEBUG(dev, "\n"); | ||
224 | |||
225 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
226 | for (i = 0; i < pgraph->ctxprog_size; i++) | ||
227 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]); | ||
228 | |||
229 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ | ||
230 | nv_wr32(dev, 0x400320, 4); | ||
231 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); | ||
232 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static int | ||
237 | nv50_graph_init(struct drm_device *dev, int engine) | ||
238 | { | ||
239 | int ret; | ||
240 | |||
241 | NV_DEBUG(dev, "\n"); | ||
242 | |||
243 | nv50_graph_init_reset(dev); | ||
244 | nv50_graph_init_regs__nv(dev); | ||
245 | nv50_graph_init_zcull(dev); | ||
246 | |||
247 | ret = nv50_graph_init_ctxctl(dev); | ||
248 | if (ret) | ||
249 | return ret; | ||
250 | |||
251 | nv50_graph_init_intr(dev); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static int | ||
256 | nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
257 | { | ||
258 | nv_mask(dev, 0x400500, 0x00010001, 0x00000000); | ||
259 | if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) { | ||
260 | nv_mask(dev, 0x400500, 0x00010001, 0x00010001); | ||
261 | return -EBUSY; | ||
262 | } | ||
263 | nv50_graph_unload_context(dev); | ||
264 | nv_wr32(dev, 0x40013c, 0x00000000); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static int | ||
269 | nv50_graph_context_new(struct nouveau_channel *chan, int engine) | ||
270 | { | ||
271 | struct drm_device *dev = chan->dev; | ||
272 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
273 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
274 | struct nouveau_gpuobj *grctx = NULL; | ||
275 | struct nv50_graph_engine *pgraph = nv_engine(dev, engine); | ||
276 | struct nouveau_grctx ctx = {}; | ||
277 | int hdr, ret; | ||
278 | |||
279 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
280 | |||
281 | ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0, | ||
282 | NVOBJ_FLAG_ZERO_ALLOC | | ||
283 | NVOBJ_FLAG_ZERO_FREE, &grctx); | ||
284 | if (ret) | ||
285 | return ret; | ||
286 | |||
287 | hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | ||
288 | nv_wo32(ramin, hdr + 0x00, 0x00190002); | ||
289 | nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1); | ||
290 | nv_wo32(ramin, hdr + 0x08, grctx->vinst); | ||
291 | nv_wo32(ramin, hdr + 0x0c, 0); | ||
292 | nv_wo32(ramin, hdr + 0x10, 0); | ||
293 | nv_wo32(ramin, hdr + 0x14, 0x00010000); | ||
294 | |||
295 | ctx.dev = chan->dev; | ||
296 | ctx.mode = NOUVEAU_GRCTX_VALS; | ||
297 | ctx.data = grctx; | ||
298 | nv50_grctx_init(&ctx); | ||
299 | |||
300 | nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); | ||
301 | |||
302 | dev_priv->engine.instmem.flush(dev); | ||
303 | |||
304 | atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]); | ||
305 | chan->engctx[NVOBJ_ENGINE_GR] = grctx; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static void | ||
310 | nv50_graph_context_del(struct nouveau_channel *chan, int engine) | ||
311 | { | ||
312 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; | ||
313 | struct drm_device *dev = chan->dev; | ||
314 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
315 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
316 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | ||
317 | unsigned long flags; | ||
318 | |||
319 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
320 | |||
321 | if (!chan->ramin) | ||
322 | return; | ||
323 | |||
324 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
325 | pfifo->reassign(dev, false); | ||
326 | nv50_graph_fifo_access(dev, false); | ||
327 | |||
328 | if (nv50_graph_channel(dev) == chan) | ||
329 | nv50_graph_unload_context(dev); | ||
330 | |||
331 | for (i = hdr; i < hdr + 24; i += 4) | ||
332 | nv_wo32(chan->ramin, i, 0); | ||
333 | dev_priv->engine.instmem.flush(dev); | ||
334 | |||
335 | nv50_graph_fifo_access(dev, true); | ||
336 | pfifo->reassign(dev, true); | ||
337 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
338 | |||
339 | nouveau_gpuobj_ref(NULL, &grctx); | ||
340 | |||
341 | atomic_dec(&chan->vm->engref[engine]); | ||
342 | chan->engctx[engine] = NULL; | ||
343 | } | ||
344 | |||
345 | static int | ||
346 | nv50_graph_object_new(struct nouveau_channel *chan, int engine, | ||
347 | u32 handle, u16 class) | ||
348 | { | ||
349 | struct drm_device *dev = chan->dev; | ||
350 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
351 | struct nouveau_gpuobj *obj = NULL; | ||
352 | int ret; | ||
353 | |||
354 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
355 | if (ret) | ||
356 | return ret; | ||
357 | obj->engine = 1; | ||
358 | obj->class = class; | ||
359 | |||
360 | nv_wo32(obj, 0x00, class); | ||
361 | nv_wo32(obj, 0x04, 0x00000000); | ||
362 | nv_wo32(obj, 0x08, 0x00000000); | ||
363 | nv_wo32(obj, 0x0c, 0x00000000); | ||
364 | dev_priv->engine.instmem.flush(dev); | ||
365 | |||
366 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
367 | nouveau_gpuobj_ref(NULL, &obj); | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | static void | ||
372 | nv50_graph_context_switch(struct drm_device *dev) | ||
373 | { | ||
374 | uint32_t inst; | ||
375 | |||
376 | nv50_graph_unload_context(dev); | ||
377 | |||
378 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT); | ||
379 | inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE; | ||
380 | nv50_graph_do_load_context(dev, inst); | ||
381 | |||
382 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, | ||
383 | NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
384 | } | ||
385 | |||
386 | static int | ||
387 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, | ||
388 | u32 class, u32 mthd, u32 data) | ||
389 | { | ||
390 | struct nouveau_gpuobj *gpuobj; | ||
391 | |||
392 | gpuobj = nouveau_ramht_find(chan, data); | ||
393 | if (!gpuobj) | ||
394 | return -ENOENT; | ||
395 | |||
396 | if (nouveau_notifier_offset(gpuobj, NULL)) | ||
397 | return -EINVAL; | ||
398 | |||
399 | chan->nvsw.vblsem = gpuobj; | ||
400 | chan->nvsw.vblsem_offset = ~0; | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int | ||
405 | nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, | ||
406 | u32 class, u32 mthd, u32 data) | ||
407 | { | ||
408 | if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) | ||
409 | return -ERANGE; | ||
410 | |||
411 | chan->nvsw.vblsem_offset = data >> 2; | ||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static int | ||
416 | nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, | ||
417 | u32 class, u32 mthd, u32 data) | ||
418 | { | ||
419 | chan->nvsw.vblsem_rval = data; | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int | ||
424 | nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, | ||
425 | u32 class, u32 mthd, u32 data) | ||
426 | { | ||
427 | struct drm_device *dev = chan->dev; | ||
428 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
429 | |||
430 | if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) | ||
431 | return -EINVAL; | ||
432 | |||
433 | drm_vblank_get(dev, data); | ||
434 | |||
435 | chan->nvsw.vblsem_head = data; | ||
436 | list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int | ||
442 | nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, | ||
443 | u32 class, u32 mthd, u32 data) | ||
444 | { | ||
445 | nouveau_finish_page_flip(chan, NULL); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | |||
450 | static void | ||
451 | nv50_graph_tlb_flush(struct drm_device *dev, int engine) | ||
452 | { | ||
453 | nv50_vm_flush_engine(dev, 0); | ||
454 | } | ||
455 | |||
456 | static void | ||
457 | nv84_graph_tlb_flush(struct drm_device *dev, int engine) | ||
458 | { | ||
459 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
460 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
461 | bool idle, timeout = false; | ||
462 | unsigned long flags; | ||
463 | u64 start; | ||
464 | u32 tmp; | ||
465 | |||
466 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
467 | nv_mask(dev, 0x400500, 0x00000001, 0x00000000); | ||
468 | |||
469 | start = ptimer->read(dev); | ||
470 | do { | ||
471 | idle = true; | ||
472 | |||
473 | for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { | ||
474 | if ((tmp & 7) == 1) | ||
475 | idle = false; | ||
476 | } | ||
477 | |||
478 | for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { | ||
479 | if ((tmp & 7) == 1) | ||
480 | idle = false; | ||
481 | } | ||
482 | |||
483 | for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { | ||
484 | if ((tmp & 7) == 1) | ||
485 | idle = false; | ||
486 | } | ||
487 | } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); | ||
488 | |||
489 | if (timeout) { | ||
490 | NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " | ||
491 | "0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
492 | nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), | ||
493 | nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); | ||
494 | } | ||
495 | |||
496 | nv50_vm_flush_engine(dev, 0); | ||
497 | |||
498 | nv_mask(dev, 0x400500, 0x00000001, 0x00000001); | ||
499 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
500 | } | ||
501 | |||
502 | static struct nouveau_enum nv50_mp_exec_error_names[] = { | ||
503 | { 3, "STACK_UNDERFLOW", NULL }, | ||
504 | { 4, "QUADON_ACTIVE", NULL }, | ||
505 | { 8, "TIMEOUT", NULL }, | ||
506 | { 0x10, "INVALID_OPCODE", NULL }, | ||
507 | { 0x40, "BREAKPOINT", NULL }, | ||
508 | {} | ||
509 | }; | ||
510 | |||
511 | static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { | ||
512 | { 0x00000001, "NOTIFY" }, | ||
513 | { 0x00000002, "IN" }, | ||
514 | { 0x00000004, "OUT" }, | ||
515 | {} | ||
516 | }; | ||
517 | |||
518 | static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { | ||
519 | { 0x00000001, "FAULT" }, | ||
520 | {} | ||
521 | }; | ||
522 | |||
523 | static struct nouveau_bitfield nv50_graph_trap_strmout[] = { | ||
524 | { 0x00000001, "FAULT" }, | ||
525 | {} | ||
526 | }; | ||
527 | |||
528 | static struct nouveau_bitfield nv50_graph_trap_ccache[] = { | ||
529 | { 0x00000001, "FAULT" }, | ||
530 | {} | ||
531 | }; | ||
532 | |||
533 | /* There must be a *lot* of these. Will take some time to gather them up. */ | ||
534 | struct nouveau_enum nv50_data_error_names[] = { | ||
535 | { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL }, | ||
536 | { 0x00000004, "INVALID_VALUE", NULL }, | ||
537 | { 0x00000005, "INVALID_ENUM", NULL }, | ||
538 | { 0x00000008, "INVALID_OBJECT", NULL }, | ||
539 | { 0x00000009, "READ_ONLY_OBJECT", NULL }, | ||
540 | { 0x0000000a, "SUPERVISOR_OBJECT", NULL }, | ||
541 | { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL }, | ||
542 | { 0x0000000c, "INVALID_BITFIELD", NULL }, | ||
543 | { 0x0000000d, "BEGIN_END_ACTIVE", NULL }, | ||
544 | { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL }, | ||
545 | { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL }, | ||
546 | { 0x00000010, "RT_DOUBLE_BIND", NULL }, | ||
547 | { 0x00000011, "RT_TYPES_MISMATCH", NULL }, | ||
548 | { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL }, | ||
549 | { 0x00000015, "FP_TOO_FEW_REGS", NULL }, | ||
550 | { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL }, | ||
551 | { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL }, | ||
552 | { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL }, | ||
553 | { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL }, | ||
554 | { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL }, | ||
555 | { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL }, | ||
556 | { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL }, | ||
557 | { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL }, | ||
558 | { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, | ||
559 | { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, | ||
560 | { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, | ||
561 | { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, | ||
562 | { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, | ||
563 | { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, | ||
564 | { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL }, | ||
565 | { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL }, | ||
566 | { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL }, | ||
567 | { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL }, | ||
568 | { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL }, | ||
569 | { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL }, | ||
570 | { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL }, | ||
571 | { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL }, | ||
572 | { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL }, | ||
573 | { 0x00000046, "LAYER_ID_NEEDS_GP", NULL }, | ||
574 | { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL }, | ||
575 | { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL }, | ||
576 | {} | ||
577 | }; | ||
578 | |||
579 | static struct nouveau_bitfield nv50_graph_intr[] = { | ||
580 | { 0x00000001, "NOTIFY" }, | ||
581 | { 0x00000002, "COMPUTE_QUERY" }, | ||
582 | { 0x00000010, "ILLEGAL_MTHD" }, | ||
583 | { 0x00000020, "ILLEGAL_CLASS" }, | ||
584 | { 0x00000040, "DOUBLE_NOTIFY" }, | ||
585 | { 0x00001000, "CONTEXT_SWITCH" }, | ||
586 | { 0x00010000, "BUFFER_NOTIFY" }, | ||
587 | { 0x00100000, "DATA_ERROR" }, | ||
588 | { 0x00200000, "TRAP" }, | ||
589 | { 0x01000000, "SINGLE_STEP" }, | ||
590 | {} | ||
591 | }; | ||
592 | |||
593 | static void | ||
594 | nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) | ||
595 | { | ||
596 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
597 | uint32_t units = nv_rd32(dev, 0x1540); | ||
598 | uint32_t addr, mp10, status, pc, oplow, ophigh; | ||
599 | int i; | ||
600 | int mps = 0; | ||
601 | for (i = 0; i < 4; i++) { | ||
602 | if (!(units & 1 << (i+24))) | ||
603 | continue; | ||
604 | if (dev_priv->chipset < 0xa0) | ||
605 | addr = 0x408200 + (tpid << 12) + (i << 7); | ||
606 | else | ||
607 | addr = 0x408100 + (tpid << 11) + (i << 7); | ||
608 | mp10 = nv_rd32(dev, addr + 0x10); | ||
609 | status = nv_rd32(dev, addr + 0x14); | ||
610 | if (!status) | ||
611 | continue; | ||
612 | if (display) { | ||
613 | nv_rd32(dev, addr + 0x20); | ||
614 | pc = nv_rd32(dev, addr + 0x24); | ||
615 | oplow = nv_rd32(dev, addr + 0x70); | ||
616 | ophigh = nv_rd32(dev, addr + 0x74); | ||
617 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " | ||
618 | "TP %d MP %d: ", tpid, i); | ||
619 | nouveau_enum_print(nv50_mp_exec_error_names, status); | ||
620 | printk(" at %06x warp %d, opcode %08x %08x\n", | ||
621 | pc&0xffffff, pc >> 24, | ||
622 | oplow, ophigh); | ||
623 | } | ||
624 | nv_wr32(dev, addr + 0x10, mp10); | ||
625 | nv_wr32(dev, addr + 0x14, 0); | ||
626 | mps++; | ||
627 | } | ||
628 | if (!mps && display) | ||
629 | NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " | ||
630 | "No MPs claiming errors?\n", tpid); | ||
631 | } | ||
632 | |||
633 | static void | ||
634 | nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, | ||
635 | uint32_t ustatus_new, int display, const char *name) | ||
636 | { | ||
637 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
638 | int tps = 0; | ||
639 | uint32_t units = nv_rd32(dev, 0x1540); | ||
640 | int i, r; | ||
641 | uint32_t ustatus_addr, ustatus; | ||
642 | for (i = 0; i < 16; i++) { | ||
643 | if (!(units & (1 << i))) | ||
644 | continue; | ||
645 | if (dev_priv->chipset < 0xa0) | ||
646 | ustatus_addr = ustatus_old + (i << 12); | ||
647 | else | ||
648 | ustatus_addr = ustatus_new + (i << 11); | ||
649 | ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; | ||
650 | if (!ustatus) | ||
651 | continue; | ||
652 | tps++; | ||
653 | switch (type) { | ||
654 | case 6: /* texture error... unknown for now */ | ||
655 | if (display) { | ||
656 | NV_ERROR(dev, "magic set %d:\n", i); | ||
657 | for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) | ||
658 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | ||
659 | nv_rd32(dev, r)); | ||
660 | } | ||
661 | break; | ||
662 | case 7: /* MP error */ | ||
663 | if (ustatus & 0x00010000) { | ||
664 | nv50_pgraph_mp_trap(dev, i, display); | ||
665 | ustatus &= ~0x00010000; | ||
666 | } | ||
667 | break; | ||
668 | case 8: /* TPDMA error */ | ||
669 | { | ||
670 | uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); | ||
671 | uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); | ||
672 | uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); | ||
673 | uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); | ||
674 | uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); | ||
675 | uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); | ||
676 | uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); | ||
677 | /* 2d engine destination */ | ||
678 | if (ustatus & 0x00000010) { | ||
679 | if (display) { | ||
680 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", | ||
681 | i, e14, e10); | ||
682 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
683 | i, e0c, e18, e1c, e20, e24); | ||
684 | } | ||
685 | ustatus &= ~0x00000010; | ||
686 | } | ||
687 | /* Render target */ | ||
688 | if (ustatus & 0x00000040) { | ||
689 | if (display) { | ||
690 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", | ||
691 | i, e14, e10); | ||
692 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
693 | i, e0c, e18, e1c, e20, e24); | ||
694 | } | ||
695 | ustatus &= ~0x00000040; | ||
696 | } | ||
697 | /* CUDA memory: l[], g[] or stack. */ | ||
698 | if (ustatus & 0x00000080) { | ||
699 | if (display) { | ||
700 | if (e18 & 0x80000000) { | ||
701 | /* g[] read fault? */ | ||
702 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", | ||
703 | i, e14, e10 | ((e18 >> 24) & 0x1f)); | ||
704 | e18 &= ~0x1f000000; | ||
705 | } else if (e18 & 0xc) { | ||
706 | /* g[] write fault? */ | ||
707 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", | ||
708 | i, e14, e10 | ((e18 >> 7) & 0x1f)); | ||
709 | e18 &= ~0x00000f80; | ||
710 | } else { | ||
711 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", | ||
712 | i, e14, e10); | ||
713 | } | ||
714 | NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", | ||
715 | i, e0c, e18, e1c, e20, e24); | ||
716 | } | ||
717 | ustatus &= ~0x00000080; | ||
718 | } | ||
719 | } | ||
720 | break; | ||
721 | } | ||
722 | if (ustatus) { | ||
723 | if (display) | ||
724 | NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); | ||
725 | } | ||
726 | nv_wr32(dev, ustatus_addr, 0xc0000000); | ||
727 | } | ||
728 | |||
729 | if (!tps && display) | ||
730 | NV_INFO(dev, "%s - No TPs claiming errors?\n", name); | ||
731 | } | ||
732 | |||
733 | static int | ||
734 | nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) | ||
735 | { | ||
736 | u32 status = nv_rd32(dev, 0x400108); | ||
737 | u32 ustatus; | ||
738 | |||
739 | if (!status && display) { | ||
740 | NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); | ||
741 | return 1; | ||
742 | } | ||
743 | |||
744 | /* DISPATCH: Relays commands to other units and handles NOTIFY, | ||
745 | * COND, QUERY. If you get a trap from it, the command is still stuck | ||
746 | * in DISPATCH and you need to do something about it. */ | ||
747 | if (status & 0x001) { | ||
748 | ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; | ||
749 | if (!ustatus && display) { | ||
750 | NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); | ||
751 | } | ||
752 | |||
753 | nv_wr32(dev, 0x400500, 0x00000000); | ||
754 | |||
755 | /* Known to be triggered by screwed up NOTIFY and COND... */ | ||
756 | if (ustatus & 0x00000001) { | ||
757 | u32 addr = nv_rd32(dev, 0x400808); | ||
758 | u32 subc = (addr & 0x00070000) >> 16; | ||
759 | u32 mthd = (addr & 0x00001ffc); | ||
760 | u32 datal = nv_rd32(dev, 0x40080c); | ||
761 | u32 datah = nv_rd32(dev, 0x400810); | ||
762 | u32 class = nv_rd32(dev, 0x400814); | ||
763 | u32 r848 = nv_rd32(dev, 0x400848); | ||
764 | |||
765 | NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); | ||
766 | if (display && (addr & 0x80000000)) { | ||
767 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " | ||
768 | "subc %d class 0x%04x mthd 0x%04x " | ||
769 | "data 0x%08x%08x " | ||
770 | "400808 0x%08x 400848 0x%08x\n", | ||
771 | chid, inst, subc, class, mthd, datah, | ||
772 | datal, addr, r848); | ||
773 | } else | ||
774 | if (display) { | ||
775 | NV_INFO(dev, "PGRAPH - no stuck command?\n"); | ||
776 | } | ||
777 | |||
778 | nv_wr32(dev, 0x400808, 0); | ||
779 | nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); | ||
780 | nv_wr32(dev, 0x400848, 0); | ||
781 | ustatus &= ~0x00000001; | ||
782 | } | ||
783 | |||
784 | if (ustatus & 0x00000002) { | ||
785 | u32 addr = nv_rd32(dev, 0x40084c); | ||
786 | u32 subc = (addr & 0x00070000) >> 16; | ||
787 | u32 mthd = (addr & 0x00001ffc); | ||
788 | u32 data = nv_rd32(dev, 0x40085c); | ||
789 | u32 class = nv_rd32(dev, 0x400814); | ||
790 | |||
791 | NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); | ||
792 | if (display && (addr & 0x80000000)) { | ||
793 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " | ||
794 | "subc %d class 0x%04x mthd 0x%04x " | ||
795 | "data 0x%08x 40084c 0x%08x\n", | ||
796 | chid, inst, subc, class, mthd, | ||
797 | data, addr); | ||
798 | } else | ||
799 | if (display) { | ||
800 | NV_INFO(dev, "PGRAPH - no stuck command?\n"); | ||
801 | } | ||
802 | |||
803 | nv_wr32(dev, 0x40084c, 0); | ||
804 | ustatus &= ~0x00000002; | ||
805 | } | ||
806 | |||
807 | if (ustatus && display) { | ||
808 | NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " | ||
809 | "0x%08x)\n", ustatus); | ||
810 | } | ||
811 | |||
812 | nv_wr32(dev, 0x400804, 0xc0000000); | ||
813 | nv_wr32(dev, 0x400108, 0x001); | ||
814 | status &= ~0x001; | ||
815 | if (!status) | ||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | /* M2MF: Memory to memory copy engine. */ | ||
820 | if (status & 0x002) { | ||
821 | u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; | ||
822 | if (display) { | ||
823 | NV_INFO(dev, "PGRAPH - TRAP_M2MF"); | ||
824 | nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); | ||
825 | printk("\n"); | ||
826 | NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", | ||
827 | nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), | ||
828 | nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); | ||
829 | |||
830 | } | ||
831 | |||
832 | /* No sane way found yet -- just reset the bugger. */ | ||
833 | nv_wr32(dev, 0x400040, 2); | ||
834 | nv_wr32(dev, 0x400040, 0); | ||
835 | nv_wr32(dev, 0x406800, 0xc0000000); | ||
836 | nv_wr32(dev, 0x400108, 0x002); | ||
837 | status &= ~0x002; | ||
838 | } | ||
839 | |||
840 | /* VFETCH: Fetches data from vertex buffers. */ | ||
841 | if (status & 0x004) { | ||
842 | u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; | ||
843 | if (display) { | ||
844 | NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); | ||
845 | nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); | ||
846 | printk("\n"); | ||
847 | NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", | ||
848 | nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), | ||
849 | nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); | ||
850 | } | ||
851 | |||
852 | nv_wr32(dev, 0x400c04, 0xc0000000); | ||
853 | nv_wr32(dev, 0x400108, 0x004); | ||
854 | status &= ~0x004; | ||
855 | } | ||
856 | |||
857 | /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ | ||
858 | if (status & 0x008) { | ||
859 | ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; | ||
860 | if (display) { | ||
861 | NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); | ||
862 | nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); | ||
863 | printk("\n"); | ||
864 | NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", | ||
865 | nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), | ||
866 | nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); | ||
867 | |||
868 | } | ||
869 | |||
870 | /* No sane way found yet -- just reset the bugger. */ | ||
871 | nv_wr32(dev, 0x400040, 0x80); | ||
872 | nv_wr32(dev, 0x400040, 0); | ||
873 | nv_wr32(dev, 0x401800, 0xc0000000); | ||
874 | nv_wr32(dev, 0x400108, 0x008); | ||
875 | status &= ~0x008; | ||
876 | } | ||
877 | |||
878 | /* CCACHE: Handles code and c[] caches and fills them. */ | ||
879 | if (status & 0x010) { | ||
880 | ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; | ||
881 | if (display) { | ||
882 | NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); | ||
883 | nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); | ||
884 | printk("\n"); | ||
885 | NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" | ||
886 | " %08x %08x %08x\n", | ||
887 | nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004), | ||
888 | nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c), | ||
889 | nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014), | ||
890 | nv_rd32(dev, 0x40501c)); | ||
891 | |||
892 | } | ||
893 | |||
894 | nv_wr32(dev, 0x405018, 0xc0000000); | ||
895 | nv_wr32(dev, 0x400108, 0x010); | ||
896 | status &= ~0x010; | ||
897 | } | ||
898 | |||
899 | /* Unknown, not seen yet... 0x402000 is the only trap status reg | ||
900 | * remaining, so try to handle it anyway. Perhaps related to that | ||
901 | * unknown DMA slot on tesla? */ | ||
902 | if (status & 0x20) { | ||
903 | ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; | ||
904 | if (display) | ||
905 | NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); | ||
906 | nv_wr32(dev, 0x402000, 0xc0000000); | ||
907 | /* no status modifiction on purpose */ | ||
908 | } | ||
909 | |||
910 | /* TEXTURE: CUDA texturing units */ | ||
911 | if (status & 0x040) { | ||
912 | nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, | ||
913 | "PGRAPH - TRAP_TEXTURE"); | ||
914 | nv_wr32(dev, 0x400108, 0x040); | ||
915 | status &= ~0x040; | ||
916 | } | ||
917 | |||
918 | /* MP: CUDA execution engines. */ | ||
919 | if (status & 0x080) { | ||
920 | nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, | ||
921 | "PGRAPH - TRAP_MP"); | ||
922 | nv_wr32(dev, 0x400108, 0x080); | ||
923 | status &= ~0x080; | ||
924 | } | ||
925 | |||
926 | /* TPDMA: Handles TP-initiated uncached memory accesses: | ||
927 | * l[], g[], stack, 2d surfaces, render targets. */ | ||
928 | if (status & 0x100) { | ||
929 | nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, | ||
930 | "PGRAPH - TRAP_TPDMA"); | ||
931 | nv_wr32(dev, 0x400108, 0x100); | ||
932 | status &= ~0x100; | ||
933 | } | ||
934 | |||
935 | if (status) { | ||
936 | if (display) | ||
937 | NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); | ||
938 | nv_wr32(dev, 0x400108, status); | ||
939 | } | ||
940 | |||
941 | return 1; | ||
942 | } | ||
943 | |||
944 | int | ||
945 | nv50_graph_isr_chid(struct drm_device *dev, u64 inst) | ||
946 | { | ||
947 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
948 | struct nouveau_channel *chan; | ||
949 | unsigned long flags; | ||
950 | int i; | ||
951 | |||
952 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
953 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
954 | chan = dev_priv->channels.ptr[i]; | ||
955 | if (!chan || !chan->ramin) | ||
956 | continue; | ||
957 | |||
958 | if (inst == chan->ramin->vinst) | ||
959 | break; | ||
960 | } | ||
961 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
962 | return i; | ||
963 | } | ||
964 | |||
965 | static void | ||
966 | nv50_graph_isr(struct drm_device *dev) | ||
967 | { | ||
968 | u32 stat; | ||
969 | |||
970 | while ((stat = nv_rd32(dev, 0x400100))) { | ||
971 | u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; | ||
972 | u32 chid = nv50_graph_isr_chid(dev, inst); | ||
973 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
974 | u32 subc = (addr & 0x00070000) >> 16; | ||
975 | u32 mthd = (addr & 0x00001ffc); | ||
976 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
977 | u32 class = nv_rd32(dev, 0x400814); | ||
978 | u32 show = stat; | ||
979 | |||
980 | if (stat & 0x00000010) { | ||
981 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, | ||
982 | mthd, data)) | ||
983 | show &= ~0x00000010; | ||
984 | } | ||
985 | |||
986 | if (stat & 0x00001000) { | ||
987 | nv_wr32(dev, 0x400500, 0x00000000); | ||
988 | nv_wr32(dev, 0x400100, 0x00001000); | ||
989 | nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); | ||
990 | nv50_graph_context_switch(dev); | ||
991 | stat &= ~0x00001000; | ||
992 | show &= ~0x00001000; | ||
993 | } | ||
994 | |||
995 | show = (show && nouveau_ratelimit()) ? show : 0; | ||
996 | |||
997 | if (show & 0x00100000) { | ||
998 | u32 ecode = nv_rd32(dev, 0x400110); | ||
999 | NV_INFO(dev, "PGRAPH - DATA_ERROR "); | ||
1000 | nouveau_enum_print(nv50_data_error_names, ecode); | ||
1001 | printk("\n"); | ||
1002 | } | ||
1003 | |||
1004 | if (stat & 0x00200000) { | ||
1005 | if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) | ||
1006 | show &= ~0x00200000; | ||
1007 | } | ||
1008 | |||
1009 | nv_wr32(dev, 0x400100, stat); | ||
1010 | nv_wr32(dev, 0x400500, 0x00010001); | ||
1011 | |||
1012 | if (show) { | ||
1013 | NV_INFO(dev, "PGRAPH -"); | ||
1014 | nouveau_bitfield_print(nv50_graph_intr, show); | ||
1015 | printk("\n"); | ||
1016 | NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " | ||
1017 | "class 0x%04x mthd 0x%04x data 0x%08x\n", | ||
1018 | chid, inst, subc, class, mthd, data); | ||
1019 | nv50_fb_vm_trap(dev, 1); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | if (nv_rd32(dev, 0x400824) & (1 << 31)) | ||
1024 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
1025 | } | ||
1026 | |||
1027 | static void | ||
1028 | nv50_graph_destroy(struct drm_device *dev, int engine) | ||
1029 | { | ||
1030 | struct nv50_graph_engine *pgraph = nv_engine(dev, engine); | ||
1031 | |||
1032 | NVOBJ_ENGINE_DEL(dev, GR); | ||
1033 | |||
1034 | nouveau_irq_unregister(dev, 12); | ||
1035 | kfree(pgraph); | ||
1036 | } | ||
1037 | |||
1038 | int | ||
1039 | nv50_graph_create(struct drm_device *dev) | ||
1040 | { | ||
1041 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1042 | struct nv50_graph_engine *pgraph; | ||
1043 | struct nouveau_grctx ctx = {}; | ||
1044 | int ret; | ||
1045 | |||
1046 | pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); | ||
1047 | if (!pgraph) | ||
1048 | return -ENOMEM; | ||
1049 | |||
1050 | ctx.dev = dev; | ||
1051 | ctx.mode = NOUVEAU_GRCTX_PROG; | ||
1052 | ctx.data = pgraph->ctxprog; | ||
1053 | ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog); | ||
1054 | |||
1055 | ret = nv50_grctx_init(&ctx); | ||
1056 | if (ret) { | ||
1057 | NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); | ||
1058 | kfree(pgraph); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | pgraph->grctx_size = ctx.ctxvals_pos * 4; | ||
1063 | pgraph->ctxprog_size = ctx.ctxprog_len; | ||
1064 | |||
1065 | pgraph->base.destroy = nv50_graph_destroy; | ||
1066 | pgraph->base.init = nv50_graph_init; | ||
1067 | pgraph->base.fini = nv50_graph_fini; | ||
1068 | pgraph->base.context_new = nv50_graph_context_new; | ||
1069 | pgraph->base.context_del = nv50_graph_context_del; | ||
1070 | pgraph->base.object_new = nv50_graph_object_new; | ||
1071 | if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac) | ||
1072 | pgraph->base.tlb_flush = nv50_graph_tlb_flush; | ||
1073 | else | ||
1074 | pgraph->base.tlb_flush = nv84_graph_tlb_flush; | ||
1075 | |||
1076 | nouveau_irq_register(dev, 12, nv50_graph_isr); | ||
1077 | |||
1078 | /* NVSW really doesn't live here... */ | ||
1079 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
1080 | NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); | ||
1081 | NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); | ||
1082 | NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); | ||
1083 | NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); | ||
1084 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); | ||
1085 | |||
1086 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
1087 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
1088 | NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ | ||
1089 | NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ | ||
1090 | |||
1091 | /* tesla */ | ||
1092 | if (dev_priv->chipset == 0x50) | ||
1093 | NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ | ||
1094 | else | ||
1095 | if (dev_priv->chipset < 0xa0) | ||
1096 | NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ | ||
1097 | else { | ||
1098 | switch (dev_priv->chipset) { | ||
1099 | case 0xa0: | ||
1100 | case 0xaa: | ||
1101 | case 0xac: | ||
1102 | NVOBJ_CLASS(dev, 0x8397, GR); | ||
1103 | break; | ||
1104 | case 0xa3: | ||
1105 | case 0xa5: | ||
1106 | case 0xa8: | ||
1107 | NVOBJ_CLASS(dev, 0x8597, GR); | ||
1108 | break; | ||
1109 | case 0xaf: | ||
1110 | NVOBJ_CLASS(dev, 0x8697, GR); | ||
1111 | break; | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | /* compute */ | ||
1116 | NVOBJ_CLASS(dev, 0x50c0, GR); | ||
1117 | if (dev_priv->chipset > 0xa0 && | ||
1118 | dev_priv->chipset != 0xaa && | ||
1119 | dev_priv->chipset != 0xac) | ||
1120 | NVOBJ_CLASS(dev, 0x85c0, GR); | ||
1121 | |||
1122 | return 0; | ||
1123 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c new file mode 100644 index 00000000000..de9abff12b9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -0,0 +1,3322 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Marcin Kościelnicki | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #define CP_FLAG_CLEAR 0 | ||
24 | #define CP_FLAG_SET 1 | ||
25 | #define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) | ||
26 | #define CP_FLAG_SWAP_DIRECTION_LOAD 0 | ||
27 | #define CP_FLAG_SWAP_DIRECTION_SAVE 1 | ||
28 | #define CP_FLAG_UNK01 ((0 * 32) + 1) | ||
29 | #define CP_FLAG_UNK01_CLEAR 0 | ||
30 | #define CP_FLAG_UNK01_SET 1 | ||
31 | #define CP_FLAG_UNK03 ((0 * 32) + 3) | ||
32 | #define CP_FLAG_UNK03_CLEAR 0 | ||
33 | #define CP_FLAG_UNK03_SET 1 | ||
34 | #define CP_FLAG_USER_SAVE ((0 * 32) + 5) | ||
35 | #define CP_FLAG_USER_SAVE_NOT_PENDING 0 | ||
36 | #define CP_FLAG_USER_SAVE_PENDING 1 | ||
37 | #define CP_FLAG_USER_LOAD ((0 * 32) + 6) | ||
38 | #define CP_FLAG_USER_LOAD_NOT_PENDING 0 | ||
39 | #define CP_FLAG_USER_LOAD_PENDING 1 | ||
40 | #define CP_FLAG_UNK0B ((0 * 32) + 0xb) | ||
41 | #define CP_FLAG_UNK0B_CLEAR 0 | ||
42 | #define CP_FLAG_UNK0B_SET 1 | ||
43 | #define CP_FLAG_UNK1D ((0 * 32) + 0x1d) | ||
44 | #define CP_FLAG_UNK1D_CLEAR 0 | ||
45 | #define CP_FLAG_UNK1D_SET 1 | ||
46 | #define CP_FLAG_UNK20 ((1 * 32) + 0) | ||
47 | #define CP_FLAG_UNK20_CLEAR 0 | ||
48 | #define CP_FLAG_UNK20_SET 1 | ||
49 | #define CP_FLAG_STATUS ((2 * 32) + 0) | ||
50 | #define CP_FLAG_STATUS_BUSY 0 | ||
51 | #define CP_FLAG_STATUS_IDLE 1 | ||
52 | #define CP_FLAG_AUTO_SAVE ((2 * 32) + 4) | ||
53 | #define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 | ||
54 | #define CP_FLAG_AUTO_SAVE_PENDING 1 | ||
55 | #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) | ||
56 | #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 | ||
57 | #define CP_FLAG_AUTO_LOAD_PENDING 1 | ||
58 | #define CP_FLAG_NEWCTX ((2 * 32) + 10) | ||
59 | #define CP_FLAG_NEWCTX_BUSY 0 | ||
60 | #define CP_FLAG_NEWCTX_DONE 1 | ||
61 | #define CP_FLAG_XFER ((2 * 32) + 11) | ||
62 | #define CP_FLAG_XFER_IDLE 0 | ||
63 | #define CP_FLAG_XFER_BUSY 1 | ||
64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) | ||
65 | #define CP_FLAG_ALWAYS_FALSE 0 | ||
66 | #define CP_FLAG_ALWAYS_TRUE 1 | ||
67 | #define CP_FLAG_INTR ((2 * 32) + 15) | ||
68 | #define CP_FLAG_INTR_NOT_PENDING 0 | ||
69 | #define CP_FLAG_INTR_PENDING 1 | ||
70 | |||
71 | #define CP_CTX 0x00100000 | ||
72 | #define CP_CTX_COUNT 0x000f0000 | ||
73 | #define CP_CTX_COUNT_SHIFT 16 | ||
74 | #define CP_CTX_REG 0x00003fff | ||
75 | #define CP_LOAD_SR 0x00200000 | ||
76 | #define CP_LOAD_SR_VALUE 0x000fffff | ||
77 | #define CP_BRA 0x00400000 | ||
78 | #define CP_BRA_IP 0x0001ff00 | ||
79 | #define CP_BRA_IP_SHIFT 8 | ||
80 | #define CP_BRA_IF_CLEAR 0x00000080 | ||
81 | #define CP_BRA_FLAG 0x0000007f | ||
82 | #define CP_WAIT 0x00500000 | ||
83 | #define CP_WAIT_SET 0x00000080 | ||
84 | #define CP_WAIT_FLAG 0x0000007f | ||
85 | #define CP_SET 0x00700000 | ||
86 | #define CP_SET_1 0x00000080 | ||
87 | #define CP_SET_FLAG 0x0000007f | ||
88 | #define CP_NEWCTX 0x00600004 | ||
89 | #define CP_NEXT_TO_SWAP 0x00600005 | ||
90 | #define CP_SET_CONTEXT_POINTER 0x00600006 | ||
91 | #define CP_SET_XFER_POINTER 0x00600007 | ||
92 | #define CP_ENABLE 0x00600009 | ||
93 | #define CP_END 0x0060000c | ||
94 | #define CP_NEXT_TO_CURRENT 0x0060000d | ||
95 | #define CP_DISABLE1 0x0090ffff | ||
96 | #define CP_DISABLE2 0x0091ffff | ||
97 | #define CP_XFER_1 0x008000ff | ||
98 | #define CP_XFER_2 0x008800ff | ||
99 | #define CP_SEEK_1 0x00c000ff | ||
100 | #define CP_SEEK_2 0x00c800ff | ||
101 | |||
102 | #include "drmP.h" | ||
103 | #include "nouveau_drv.h" | ||
104 | #include "nouveau_grctx.h" | ||
105 | |||
106 | #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) | ||
107 | #define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) | ||
108 | |||
109 | /* | ||
110 | * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's | ||
111 | * the GPU itself that does context-switching, but it needs a special | ||
112 | * microcode to do it. And it's the driver's task to supply this microcode, | ||
113 | * further known as ctxprog, as well as the initial context values, known | ||
114 | * as ctxvals. | ||
115 | * | ||
116 | * Without ctxprog, you cannot switch contexts. Not even in software, since | ||
117 | * the majority of context [xfer strands] isn't accessible directly. You're | ||
118 | * stuck with a single channel, and you also suffer all the problems resulting | ||
119 | * from missing ctxvals, since you cannot load them. | ||
120 | * | ||
121 | * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to | ||
122 | * run 2d operations, but trying to utilise 3d or CUDA will just lock you up, | ||
123 | * since you don't have... some sort of needed setup. | ||
124 | * | ||
125 | * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since | ||
126 | * it's too much hassle to handle no-ctxprog as a special case. | ||
127 | */ | ||
128 | |||
129 | /* | ||
130 | * How ctxprogs work. | ||
131 | * | ||
132 | * The ctxprog is written in its own kind of microcode, with very small and | ||
133 | * crappy set of available commands. You upload it to a small [512 insns] | ||
134 | * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to | ||
135 | * switch channel. or when the driver explicitely requests it. Stuff visible | ||
136 | * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands, | ||
137 | * the per-channel context save area in VRAM [known as ctxvals or grctx], | ||
138 | * 4 flags registers, a scratch register, two grctx pointers, plus many | ||
139 | * random poorly-understood details. | ||
140 | * | ||
141 | * When ctxprog runs, it's supposed to check what operations are asked of it, | ||
142 | * save old context if requested, optionally reset PGRAPH and switch to the | ||
143 | * new channel, and load the new context. Context consists of three major | ||
144 | * parts: subset of MMIO registers and two "xfer areas". | ||
145 | */ | ||
146 | |||
147 | /* TODO: | ||
148 | * - document unimplemented bits compared to nvidia | ||
149 | * - NVAx: make a TP subroutine, use it. | ||
150 | * - use 0x4008fc instead of 0x1540? | ||
151 | */ | ||
152 | |||
153 | enum cp_label { | ||
154 | cp_check_load = 1, | ||
155 | cp_setup_auto_load, | ||
156 | cp_setup_load, | ||
157 | cp_setup_save, | ||
158 | cp_swap_state, | ||
159 | cp_prepare_exit, | ||
160 | cp_exit, | ||
161 | }; | ||
162 | |||
163 | static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx); | ||
164 | static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx); | ||
165 | static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx); | ||
166 | |||
167 | /* Main function: construct the ctxprog skeleton, call the other functions. */ | ||
168 | |||
169 | int | ||
170 | nv50_grctx_init(struct nouveau_grctx *ctx) | ||
171 | { | ||
172 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
173 | |||
174 | switch (dev_priv->chipset) { | ||
175 | case 0x50: | ||
176 | case 0x84: | ||
177 | case 0x86: | ||
178 | case 0x92: | ||
179 | case 0x94: | ||
180 | case 0x96: | ||
181 | case 0x98: | ||
182 | case 0xa0: | ||
183 | case 0xa3: | ||
184 | case 0xa5: | ||
185 | case 0xa8: | ||
186 | case 0xaa: | ||
187 | case 0xac: | ||
188 | case 0xaf: | ||
189 | break; | ||
190 | default: | ||
191 | NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " | ||
192 | "your NV%x card.\n", dev_priv->chipset); | ||
193 | NV_ERROR(ctx->dev, "Disabling acceleration. Please contact " | ||
194 | "the devs.\n"); | ||
195 | return -ENOSYS; | ||
196 | } | ||
197 | /* decide whether we're loading/unloading the context */ | ||
198 | cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); | ||
199 | cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); | ||
200 | |||
201 | cp_name(ctx, cp_check_load); | ||
202 | cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); | ||
203 | cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); | ||
204 | cp_bra (ctx, ALWAYS, TRUE, cp_exit); | ||
205 | |||
206 | /* setup for context load */ | ||
207 | cp_name(ctx, cp_setup_auto_load); | ||
208 | cp_out (ctx, CP_DISABLE1); | ||
209 | cp_out (ctx, CP_DISABLE2); | ||
210 | cp_out (ctx, CP_ENABLE); | ||
211 | cp_out (ctx, CP_NEXT_TO_SWAP); | ||
212 | cp_set (ctx, UNK01, SET); | ||
213 | cp_name(ctx, cp_setup_load); | ||
214 | cp_out (ctx, CP_NEWCTX); | ||
215 | cp_wait(ctx, NEWCTX, BUSY); | ||
216 | cp_set (ctx, UNK1D, CLEAR); | ||
217 | cp_set (ctx, SWAP_DIRECTION, LOAD); | ||
218 | cp_bra (ctx, UNK0B, SET, cp_prepare_exit); | ||
219 | cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); | ||
220 | |||
221 | /* setup for context save */ | ||
222 | cp_name(ctx, cp_setup_save); | ||
223 | cp_set (ctx, UNK1D, SET); | ||
224 | cp_wait(ctx, STATUS, BUSY); | ||
225 | cp_wait(ctx, INTR, PENDING); | ||
226 | cp_bra (ctx, STATUS, BUSY, cp_setup_save); | ||
227 | cp_set (ctx, UNK01, SET); | ||
228 | cp_set (ctx, SWAP_DIRECTION, SAVE); | ||
229 | |||
230 | /* general PGRAPH state */ | ||
231 | cp_name(ctx, cp_swap_state); | ||
232 | cp_set (ctx, UNK03, SET); | ||
233 | cp_pos (ctx, 0x00004/4); | ||
234 | cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ | ||
235 | cp_pos (ctx, 0x00100/4); | ||
236 | nv50_graph_construct_mmio(ctx); | ||
237 | nv50_graph_construct_xfer1(ctx); | ||
238 | nv50_graph_construct_xfer2(ctx); | ||
239 | |||
240 | cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); | ||
241 | |||
242 | cp_set (ctx, UNK20, SET); | ||
243 | cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ | ||
244 | cp_lsr (ctx, ctx->ctxvals_base); | ||
245 | cp_out (ctx, CP_SET_XFER_POINTER); | ||
246 | cp_lsr (ctx, 4); | ||
247 | cp_out (ctx, CP_SEEK_1); | ||
248 | cp_out (ctx, CP_XFER_1); | ||
249 | cp_wait(ctx, XFER, BUSY); | ||
250 | |||
251 | /* pre-exit state updates */ | ||
252 | cp_name(ctx, cp_prepare_exit); | ||
253 | cp_set (ctx, UNK01, CLEAR); | ||
254 | cp_set (ctx, UNK03, CLEAR); | ||
255 | cp_set (ctx, UNK1D, CLEAR); | ||
256 | |||
257 | cp_bra (ctx, USER_SAVE, PENDING, cp_exit); | ||
258 | cp_out (ctx, CP_NEXT_TO_CURRENT); | ||
259 | |||
260 | cp_name(ctx, cp_exit); | ||
261 | cp_set (ctx, USER_SAVE, NOT_PENDING); | ||
262 | cp_set (ctx, USER_LOAD, NOT_PENDING); | ||
263 | cp_out (ctx, CP_END); | ||
264 | ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which | ||
271 | * registers to save/restore and the default values for them. | ||
272 | */ | ||
273 | |||
274 | static void | ||
275 | nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx); | ||
276 | |||
277 | static void | ||
278 | nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | ||
279 | { | ||
280 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
281 | int i, j; | ||
282 | int offset, base; | ||
283 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); | ||
284 | |||
285 | /* 0800: DISPATCH */ | ||
286 | cp_ctx(ctx, 0x400808, 7); | ||
287 | gr_def(ctx, 0x400814, 0x00000030); | ||
288 | cp_ctx(ctx, 0x400834, 0x32); | ||
289 | if (dev_priv->chipset == 0x50) { | ||
290 | gr_def(ctx, 0x400834, 0xff400040); | ||
291 | gr_def(ctx, 0x400838, 0xfff00080); | ||
292 | gr_def(ctx, 0x40083c, 0xfff70090); | ||
293 | gr_def(ctx, 0x400840, 0xffe806a8); | ||
294 | } | ||
295 | gr_def(ctx, 0x400844, 0x00000002); | ||
296 | if (IS_NVA3F(dev_priv->chipset)) | ||
297 | gr_def(ctx, 0x400894, 0x00001000); | ||
298 | gr_def(ctx, 0x4008e8, 0x00000003); | ||
299 | gr_def(ctx, 0x4008ec, 0x00001000); | ||
300 | if (dev_priv->chipset == 0x50) | ||
301 | cp_ctx(ctx, 0x400908, 0xb); | ||
302 | else if (dev_priv->chipset < 0xa0) | ||
303 | cp_ctx(ctx, 0x400908, 0xc); | ||
304 | else | ||
305 | cp_ctx(ctx, 0x400908, 0xe); | ||
306 | |||
307 | if (dev_priv->chipset >= 0xa0) | ||
308 | cp_ctx(ctx, 0x400b00, 0x1); | ||
309 | if (IS_NVA3F(dev_priv->chipset)) { | ||
310 | cp_ctx(ctx, 0x400b10, 0x1); | ||
311 | gr_def(ctx, 0x400b10, 0x0001629d); | ||
312 | cp_ctx(ctx, 0x400b20, 0x1); | ||
313 | gr_def(ctx, 0x400b20, 0x0001629d); | ||
314 | } | ||
315 | |||
316 | nv50_graph_construct_mmio_ddata(ctx); | ||
317 | |||
318 | /* 0C00: VFETCH */ | ||
319 | cp_ctx(ctx, 0x400c08, 0x2); | ||
320 | gr_def(ctx, 0x400c08, 0x0000fe0c); | ||
321 | |||
322 | /* 1000 */ | ||
323 | if (dev_priv->chipset < 0xa0) { | ||
324 | cp_ctx(ctx, 0x401008, 0x4); | ||
325 | gr_def(ctx, 0x401014, 0x00001000); | ||
326 | } else if (!IS_NVA3F(dev_priv->chipset)) { | ||
327 | cp_ctx(ctx, 0x401008, 0x5); | ||
328 | gr_def(ctx, 0x401018, 0x00001000); | ||
329 | } else { | ||
330 | cp_ctx(ctx, 0x401008, 0x5); | ||
331 | gr_def(ctx, 0x401018, 0x00004000); | ||
332 | } | ||
333 | |||
334 | /* 1400 */ | ||
335 | cp_ctx(ctx, 0x401400, 0x8); | ||
336 | cp_ctx(ctx, 0x401424, 0x3); | ||
337 | if (dev_priv->chipset == 0x50) | ||
338 | gr_def(ctx, 0x40142c, 0x0001fd87); | ||
339 | else | ||
340 | gr_def(ctx, 0x40142c, 0x00000187); | ||
341 | cp_ctx(ctx, 0x401540, 0x5); | ||
342 | gr_def(ctx, 0x401550, 0x00001018); | ||
343 | |||
344 | /* 1800: STREAMOUT */ | ||
345 | cp_ctx(ctx, 0x401814, 0x1); | ||
346 | gr_def(ctx, 0x401814, 0x000000ff); | ||
347 | if (dev_priv->chipset == 0x50) { | ||
348 | cp_ctx(ctx, 0x40181c, 0xe); | ||
349 | gr_def(ctx, 0x401850, 0x00000004); | ||
350 | } else if (dev_priv->chipset < 0xa0) { | ||
351 | cp_ctx(ctx, 0x40181c, 0xf); | ||
352 | gr_def(ctx, 0x401854, 0x00000004); | ||
353 | } else { | ||
354 | cp_ctx(ctx, 0x40181c, 0x13); | ||
355 | gr_def(ctx, 0x401864, 0x00000004); | ||
356 | } | ||
357 | |||
358 | /* 1C00 */ | ||
359 | cp_ctx(ctx, 0x401c00, 0x1); | ||
360 | switch (dev_priv->chipset) { | ||
361 | case 0x50: | ||
362 | gr_def(ctx, 0x401c00, 0x0001005f); | ||
363 | break; | ||
364 | case 0x84: | ||
365 | case 0x86: | ||
366 | case 0x94: | ||
367 | gr_def(ctx, 0x401c00, 0x044d00df); | ||
368 | break; | ||
369 | case 0x92: | ||
370 | case 0x96: | ||
371 | case 0x98: | ||
372 | case 0xa0: | ||
373 | case 0xaa: | ||
374 | case 0xac: | ||
375 | gr_def(ctx, 0x401c00, 0x042500df); | ||
376 | break; | ||
377 | case 0xa3: | ||
378 | case 0xa5: | ||
379 | case 0xa8: | ||
380 | case 0xaf: | ||
381 | gr_def(ctx, 0x401c00, 0x142500df); | ||
382 | break; | ||
383 | } | ||
384 | |||
385 | /* 2000 */ | ||
386 | |||
387 | /* 2400 */ | ||
388 | cp_ctx(ctx, 0x402400, 0x1); | ||
389 | if (dev_priv->chipset == 0x50) | ||
390 | cp_ctx(ctx, 0x402408, 0x1); | ||
391 | else | ||
392 | cp_ctx(ctx, 0x402408, 0x2); | ||
393 | gr_def(ctx, 0x402408, 0x00000600); | ||
394 | |||
395 | /* 2800: CSCHED */ | ||
396 | cp_ctx(ctx, 0x402800, 0x1); | ||
397 | if (dev_priv->chipset == 0x50) | ||
398 | gr_def(ctx, 0x402800, 0x00000006); | ||
399 | |||
400 | /* 2C00: ZCULL */ | ||
401 | cp_ctx(ctx, 0x402c08, 0x6); | ||
402 | if (dev_priv->chipset != 0x50) | ||
403 | gr_def(ctx, 0x402c14, 0x01000000); | ||
404 | gr_def(ctx, 0x402c18, 0x000000ff); | ||
405 | if (dev_priv->chipset == 0x50) | ||
406 | cp_ctx(ctx, 0x402ca0, 0x1); | ||
407 | else | ||
408 | cp_ctx(ctx, 0x402ca0, 0x2); | ||
409 | if (dev_priv->chipset < 0xa0) | ||
410 | gr_def(ctx, 0x402ca0, 0x00000400); | ||
411 | else if (!IS_NVA3F(dev_priv->chipset)) | ||
412 | gr_def(ctx, 0x402ca0, 0x00000800); | ||
413 | else | ||
414 | gr_def(ctx, 0x402ca0, 0x00000400); | ||
415 | cp_ctx(ctx, 0x402cac, 0x4); | ||
416 | |||
417 | /* 3000: ENG2D */ | ||
418 | cp_ctx(ctx, 0x403004, 0x1); | ||
419 | gr_def(ctx, 0x403004, 0x00000001); | ||
420 | |||
421 | /* 3400 */ | ||
422 | if (dev_priv->chipset >= 0xa0) { | ||
423 | cp_ctx(ctx, 0x403404, 0x1); | ||
424 | gr_def(ctx, 0x403404, 0x00000001); | ||
425 | } | ||
426 | |||
427 | /* 5000: CCACHE */ | ||
428 | cp_ctx(ctx, 0x405000, 0x1); | ||
429 | switch (dev_priv->chipset) { | ||
430 | case 0x50: | ||
431 | gr_def(ctx, 0x405000, 0x00300080); | ||
432 | break; | ||
433 | case 0x84: | ||
434 | case 0xa0: | ||
435 | case 0xa3: | ||
436 | case 0xa5: | ||
437 | case 0xa8: | ||
438 | case 0xaa: | ||
439 | case 0xac: | ||
440 | case 0xaf: | ||
441 | gr_def(ctx, 0x405000, 0x000e0080); | ||
442 | break; | ||
443 | case 0x86: | ||
444 | case 0x92: | ||
445 | case 0x94: | ||
446 | case 0x96: | ||
447 | case 0x98: | ||
448 | gr_def(ctx, 0x405000, 0x00000080); | ||
449 | break; | ||
450 | } | ||
451 | cp_ctx(ctx, 0x405014, 0x1); | ||
452 | gr_def(ctx, 0x405014, 0x00000004); | ||
453 | cp_ctx(ctx, 0x40501c, 0x1); | ||
454 | cp_ctx(ctx, 0x405024, 0x1); | ||
455 | cp_ctx(ctx, 0x40502c, 0x1); | ||
456 | |||
457 | /* 6000? */ | ||
458 | if (dev_priv->chipset == 0x50) | ||
459 | cp_ctx(ctx, 0x4063e0, 0x1); | ||
460 | |||
461 | /* 6800: M2MF */ | ||
462 | if (dev_priv->chipset < 0x90) { | ||
463 | cp_ctx(ctx, 0x406814, 0x2b); | ||
464 | gr_def(ctx, 0x406818, 0x00000f80); | ||
465 | gr_def(ctx, 0x406860, 0x007f0080); | ||
466 | gr_def(ctx, 0x40689c, 0x007f0080); | ||
467 | } else { | ||
468 | cp_ctx(ctx, 0x406814, 0x4); | ||
469 | if (dev_priv->chipset == 0x98) | ||
470 | gr_def(ctx, 0x406818, 0x00000f80); | ||
471 | else | ||
472 | gr_def(ctx, 0x406818, 0x00001f80); | ||
473 | if (IS_NVA3F(dev_priv->chipset)) | ||
474 | gr_def(ctx, 0x40681c, 0x00000030); | ||
475 | cp_ctx(ctx, 0x406830, 0x3); | ||
476 | } | ||
477 | |||
478 | /* 7000: per-ROP group state */ | ||
479 | for (i = 0; i < 8; i++) { | ||
480 | if (units & (1<<(i+16))) { | ||
481 | cp_ctx(ctx, 0x407000 + (i<<8), 3); | ||
482 | if (dev_priv->chipset == 0x50) | ||
483 | gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); | ||
484 | else if (dev_priv->chipset != 0xa5) | ||
485 | gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); | ||
486 | else | ||
487 | gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); | ||
488 | gr_def(ctx, 0x407004 + (i<<8), 0x89058001); | ||
489 | |||
490 | if (dev_priv->chipset == 0x50) { | ||
491 | cp_ctx(ctx, 0x407010 + (i<<8), 1); | ||
492 | } else if (dev_priv->chipset < 0xa0) { | ||
493 | cp_ctx(ctx, 0x407010 + (i<<8), 2); | ||
494 | gr_def(ctx, 0x407010 + (i<<8), 0x00001000); | ||
495 | gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); | ||
496 | } else { | ||
497 | cp_ctx(ctx, 0x407010 + (i<<8), 3); | ||
498 | gr_def(ctx, 0x407010 + (i<<8), 0x00001000); | ||
499 | if (dev_priv->chipset != 0xa5) | ||
500 | gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); | ||
501 | else | ||
502 | gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); | ||
503 | } | ||
504 | |||
505 | cp_ctx(ctx, 0x407080 + (i<<8), 4); | ||
506 | if (dev_priv->chipset != 0xa5) | ||
507 | gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); | ||
508 | else | ||
509 | gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); | ||
510 | if (dev_priv->chipset == 0x50) | ||
511 | gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); | ||
512 | else | ||
513 | gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); | ||
514 | gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); | ||
515 | |||
516 | if (dev_priv->chipset < 0xa0) | ||
517 | cp_ctx(ctx, 0x407094 + (i<<8), 1); | ||
518 | else if (!IS_NVA3F(dev_priv->chipset)) | ||
519 | cp_ctx(ctx, 0x407094 + (i<<8), 3); | ||
520 | else { | ||
521 | cp_ctx(ctx, 0x407094 + (i<<8), 4); | ||
522 | gr_def(ctx, 0x4070a0 + (i<<8), 1); | ||
523 | } | ||
524 | } | ||
525 | } | ||
526 | |||
527 | cp_ctx(ctx, 0x407c00, 0x3); | ||
528 | if (dev_priv->chipset < 0x90) | ||
529 | gr_def(ctx, 0x407c00, 0x00010040); | ||
530 | else if (dev_priv->chipset < 0xa0) | ||
531 | gr_def(ctx, 0x407c00, 0x00390040); | ||
532 | else | ||
533 | gr_def(ctx, 0x407c00, 0x003d0040); | ||
534 | gr_def(ctx, 0x407c08, 0x00000022); | ||
535 | if (dev_priv->chipset >= 0xa0) { | ||
536 | cp_ctx(ctx, 0x407c10, 0x3); | ||
537 | cp_ctx(ctx, 0x407c20, 0x1); | ||
538 | cp_ctx(ctx, 0x407c2c, 0x1); | ||
539 | } | ||
540 | |||
541 | if (dev_priv->chipset < 0xa0) { | ||
542 | cp_ctx(ctx, 0x407d00, 0x9); | ||
543 | } else { | ||
544 | cp_ctx(ctx, 0x407d00, 0x15); | ||
545 | } | ||
546 | if (dev_priv->chipset == 0x98) | ||
547 | gr_def(ctx, 0x407d08, 0x00380040); | ||
548 | else { | ||
549 | if (dev_priv->chipset < 0x90) | ||
550 | gr_def(ctx, 0x407d08, 0x00010040); | ||
551 | else if (dev_priv->chipset < 0xa0) | ||
552 | gr_def(ctx, 0x407d08, 0x00390040); | ||
553 | else | ||
554 | gr_def(ctx, 0x407d08, 0x003d0040); | ||
555 | gr_def(ctx, 0x407d0c, 0x00000022); | ||
556 | } | ||
557 | |||
558 | /* 8000+: per-TP state */ | ||
559 | for (i = 0; i < 10; i++) { | ||
560 | if (units & (1<<i)) { | ||
561 | if (dev_priv->chipset < 0xa0) | ||
562 | base = 0x408000 + (i<<12); | ||
563 | else | ||
564 | base = 0x408000 + (i<<11); | ||
565 | if (dev_priv->chipset < 0xa0) | ||
566 | offset = base + 0xc00; | ||
567 | else | ||
568 | offset = base + 0x80; | ||
569 | cp_ctx(ctx, offset + 0x00, 1); | ||
570 | gr_def(ctx, offset + 0x00, 0x0000ff0a); | ||
571 | cp_ctx(ctx, offset + 0x08, 1); | ||
572 | |||
573 | /* per-MP state */ | ||
574 | for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { | ||
575 | if (!(units & (1 << (j+24)))) continue; | ||
576 | if (dev_priv->chipset < 0xa0) | ||
577 | offset = base + 0x200 + (j<<7); | ||
578 | else | ||
579 | offset = base + 0x100 + (j<<7); | ||
580 | cp_ctx(ctx, offset, 0x20); | ||
581 | gr_def(ctx, offset + 0x00, 0x01800000); | ||
582 | gr_def(ctx, offset + 0x04, 0x00160000); | ||
583 | gr_def(ctx, offset + 0x08, 0x01800000); | ||
584 | gr_def(ctx, offset + 0x18, 0x0003ffff); | ||
585 | switch (dev_priv->chipset) { | ||
586 | case 0x50: | ||
587 | gr_def(ctx, offset + 0x1c, 0x00080000); | ||
588 | break; | ||
589 | case 0x84: | ||
590 | gr_def(ctx, offset + 0x1c, 0x00880000); | ||
591 | break; | ||
592 | case 0x86: | ||
593 | gr_def(ctx, offset + 0x1c, 0x008c0000); | ||
594 | break; | ||
595 | case 0x92: | ||
596 | case 0x96: | ||
597 | case 0x98: | ||
598 | gr_def(ctx, offset + 0x1c, 0x118c0000); | ||
599 | break; | ||
600 | case 0x94: | ||
601 | gr_def(ctx, offset + 0x1c, 0x10880000); | ||
602 | break; | ||
603 | case 0xa0: | ||
604 | case 0xa5: | ||
605 | gr_def(ctx, offset + 0x1c, 0x310c0000); | ||
606 | break; | ||
607 | case 0xa3: | ||
608 | case 0xa8: | ||
609 | case 0xaa: | ||
610 | case 0xac: | ||
611 | case 0xaf: | ||
612 | gr_def(ctx, offset + 0x1c, 0x300c0000); | ||
613 | break; | ||
614 | } | ||
615 | gr_def(ctx, offset + 0x40, 0x00010401); | ||
616 | if (dev_priv->chipset == 0x50) | ||
617 | gr_def(ctx, offset + 0x48, 0x00000040); | ||
618 | else | ||
619 | gr_def(ctx, offset + 0x48, 0x00000078); | ||
620 | gr_def(ctx, offset + 0x50, 0x000000bf); | ||
621 | gr_def(ctx, offset + 0x58, 0x00001210); | ||
622 | if (dev_priv->chipset == 0x50) | ||
623 | gr_def(ctx, offset + 0x5c, 0x00000080); | ||
624 | else | ||
625 | gr_def(ctx, offset + 0x5c, 0x08000080); | ||
626 | if (dev_priv->chipset >= 0xa0) | ||
627 | gr_def(ctx, offset + 0x68, 0x0000003e); | ||
628 | } | ||
629 | |||
630 | if (dev_priv->chipset < 0xa0) | ||
631 | cp_ctx(ctx, base + 0x300, 0x4); | ||
632 | else | ||
633 | cp_ctx(ctx, base + 0x300, 0x5); | ||
634 | if (dev_priv->chipset == 0x50) | ||
635 | gr_def(ctx, base + 0x304, 0x00007070); | ||
636 | else if (dev_priv->chipset < 0xa0) | ||
637 | gr_def(ctx, base + 0x304, 0x00027070); | ||
638 | else if (!IS_NVA3F(dev_priv->chipset)) | ||
639 | gr_def(ctx, base + 0x304, 0x01127070); | ||
640 | else | ||
641 | gr_def(ctx, base + 0x304, 0x05127070); | ||
642 | |||
643 | if (dev_priv->chipset < 0xa0) | ||
644 | cp_ctx(ctx, base + 0x318, 1); | ||
645 | else | ||
646 | cp_ctx(ctx, base + 0x320, 1); | ||
647 | if (dev_priv->chipset == 0x50) | ||
648 | gr_def(ctx, base + 0x318, 0x0003ffff); | ||
649 | else if (dev_priv->chipset < 0xa0) | ||
650 | gr_def(ctx, base + 0x318, 0x03ffffff); | ||
651 | else | ||
652 | gr_def(ctx, base + 0x320, 0x07ffffff); | ||
653 | |||
654 | if (dev_priv->chipset < 0xa0) | ||
655 | cp_ctx(ctx, base + 0x324, 5); | ||
656 | else | ||
657 | cp_ctx(ctx, base + 0x328, 4); | ||
658 | |||
659 | if (dev_priv->chipset < 0xa0) { | ||
660 | cp_ctx(ctx, base + 0x340, 9); | ||
661 | offset = base + 0x340; | ||
662 | } else if (!IS_NVA3F(dev_priv->chipset)) { | ||
663 | cp_ctx(ctx, base + 0x33c, 0xb); | ||
664 | offset = base + 0x344; | ||
665 | } else { | ||
666 | cp_ctx(ctx, base + 0x33c, 0xd); | ||
667 | offset = base + 0x344; | ||
668 | } | ||
669 | gr_def(ctx, offset + 0x0, 0x00120407); | ||
670 | gr_def(ctx, offset + 0x4, 0x05091507); | ||
671 | if (dev_priv->chipset == 0x84) | ||
672 | gr_def(ctx, offset + 0x8, 0x05100202); | ||
673 | else | ||
674 | gr_def(ctx, offset + 0x8, 0x05010202); | ||
675 | gr_def(ctx, offset + 0xc, 0x00030201); | ||
676 | if (dev_priv->chipset == 0xa3) | ||
677 | cp_ctx(ctx, base + 0x36c, 1); | ||
678 | |||
679 | cp_ctx(ctx, base + 0x400, 2); | ||
680 | gr_def(ctx, base + 0x404, 0x00000040); | ||
681 | cp_ctx(ctx, base + 0x40c, 2); | ||
682 | gr_def(ctx, base + 0x40c, 0x0d0c0b0a); | ||
683 | gr_def(ctx, base + 0x410, 0x00141210); | ||
684 | |||
685 | if (dev_priv->chipset < 0xa0) | ||
686 | offset = base + 0x800; | ||
687 | else | ||
688 | offset = base + 0x500; | ||
689 | cp_ctx(ctx, offset, 6); | ||
690 | gr_def(ctx, offset + 0x0, 0x000001f0); | ||
691 | gr_def(ctx, offset + 0x4, 0x00000001); | ||
692 | gr_def(ctx, offset + 0x8, 0x00000003); | ||
693 | if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) | ||
694 | gr_def(ctx, offset + 0xc, 0x00008000); | ||
695 | gr_def(ctx, offset + 0x14, 0x00039e00); | ||
696 | cp_ctx(ctx, offset + 0x1c, 2); | ||
697 | if (dev_priv->chipset == 0x50) | ||
698 | gr_def(ctx, offset + 0x1c, 0x00000040); | ||
699 | else | ||
700 | gr_def(ctx, offset + 0x1c, 0x00000100); | ||
701 | gr_def(ctx, offset + 0x20, 0x00003800); | ||
702 | |||
703 | if (dev_priv->chipset >= 0xa0) { | ||
704 | cp_ctx(ctx, base + 0x54c, 2); | ||
705 | if (!IS_NVA3F(dev_priv->chipset)) | ||
706 | gr_def(ctx, base + 0x54c, 0x003fe006); | ||
707 | else | ||
708 | gr_def(ctx, base + 0x54c, 0x003fe007); | ||
709 | gr_def(ctx, base + 0x550, 0x003fe000); | ||
710 | } | ||
711 | |||
712 | if (dev_priv->chipset < 0xa0) | ||
713 | offset = base + 0xa00; | ||
714 | else | ||
715 | offset = base + 0x680; | ||
716 | cp_ctx(ctx, offset, 1); | ||
717 | gr_def(ctx, offset, 0x00404040); | ||
718 | |||
719 | if (dev_priv->chipset < 0xa0) | ||
720 | offset = base + 0xe00; | ||
721 | else | ||
722 | offset = base + 0x700; | ||
723 | cp_ctx(ctx, offset, 2); | ||
724 | if (dev_priv->chipset < 0xa0) | ||
725 | gr_def(ctx, offset, 0x0077f005); | ||
726 | else if (dev_priv->chipset == 0xa5) | ||
727 | gr_def(ctx, offset, 0x6cf7f007); | ||
728 | else if (dev_priv->chipset == 0xa8) | ||
729 | gr_def(ctx, offset, 0x6cfff007); | ||
730 | else if (dev_priv->chipset == 0xac) | ||
731 | gr_def(ctx, offset, 0x0cfff007); | ||
732 | else | ||
733 | gr_def(ctx, offset, 0x0cf7f007); | ||
734 | if (dev_priv->chipset == 0x50) | ||
735 | gr_def(ctx, offset + 0x4, 0x00007fff); | ||
736 | else if (dev_priv->chipset < 0xa0) | ||
737 | gr_def(ctx, offset + 0x4, 0x003f7fff); | ||
738 | else | ||
739 | gr_def(ctx, offset + 0x4, 0x02bf7fff); | ||
740 | cp_ctx(ctx, offset + 0x2c, 1); | ||
741 | if (dev_priv->chipset == 0x50) { | ||
742 | cp_ctx(ctx, offset + 0x50, 9); | ||
743 | gr_def(ctx, offset + 0x54, 0x000003ff); | ||
744 | gr_def(ctx, offset + 0x58, 0x00000003); | ||
745 | gr_def(ctx, offset + 0x5c, 0x00000003); | ||
746 | gr_def(ctx, offset + 0x60, 0x000001ff); | ||
747 | gr_def(ctx, offset + 0x64, 0x0000001f); | ||
748 | gr_def(ctx, offset + 0x68, 0x0000000f); | ||
749 | gr_def(ctx, offset + 0x6c, 0x0000000f); | ||
750 | } else if (dev_priv->chipset < 0xa0) { | ||
751 | cp_ctx(ctx, offset + 0x50, 1); | ||
752 | cp_ctx(ctx, offset + 0x70, 1); | ||
753 | } else { | ||
754 | cp_ctx(ctx, offset + 0x50, 1); | ||
755 | cp_ctx(ctx, offset + 0x60, 5); | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static void | ||
762 | dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { | ||
763 | int i; | ||
764 | if (val && ctx->mode == NOUVEAU_GRCTX_VALS) | ||
765 | for (i = 0; i < num; i++) | ||
766 | nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); | ||
767 | ctx->ctxvals_pos += num; | ||
768 | } | ||
769 | |||
770 | static void | ||
771 | nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) | ||
772 | { | ||
773 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
774 | int base, num; | ||
775 | base = ctx->ctxvals_pos; | ||
776 | |||
777 | /* tesla state */ | ||
778 | dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ | ||
779 | dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ | ||
780 | |||
781 | /* SRC_TIC state */ | ||
782 | dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ | ||
783 | dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ | ||
784 | dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ | ||
785 | dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ | ||
786 | dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ | ||
787 | if (dev_priv->chipset >= 0x94) | ||
788 | dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ | ||
789 | dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ | ||
790 | dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ | ||
791 | |||
792 | /* turing state */ | ||
793 | dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ | ||
794 | dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ | ||
795 | dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ | ||
796 | dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ | ||
797 | dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ | ||
798 | dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ | ||
799 | dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ | ||
800 | dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ | ||
801 | dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ | ||
802 | dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ | ||
803 | dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ | ||
804 | dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ | ||
805 | dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ | ||
806 | dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ | ||
807 | dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ | ||
808 | dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ | ||
809 | dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ | ||
810 | dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ | ||
811 | dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ | ||
812 | dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ | ||
813 | dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ | ||
814 | dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ | ||
815 | dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ | ||
816 | if (IS_NVA3F(dev_priv->chipset)) | ||
817 | dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ | ||
818 | dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ | ||
819 | dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ | ||
820 | dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ | ||
821 | dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ | ||
822 | dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ | ||
823 | dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ | ||
824 | dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ | ||
825 | dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ | ||
826 | |||
827 | /* compat 2d state */ | ||
828 | if (dev_priv->chipset == 0x50) { | ||
829 | dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ | ||
830 | |||
831 | dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ | ||
832 | |||
833 | dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ | ||
834 | dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ | ||
835 | dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ | ||
836 | |||
837 | dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ | ||
838 | dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ | ||
839 | dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ | ||
840 | dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ | ||
841 | dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ | ||
842 | dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ | ||
843 | dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ | ||
844 | dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ | ||
845 | dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ | ||
846 | dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ | ||
847 | |||
848 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ | ||
849 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ | ||
850 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ | ||
851 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ | ||
852 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ | ||
853 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ | ||
854 | dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ | ||
855 | dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ | ||
856 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ | ||
857 | dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ | ||
858 | |||
859 | dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ | ||
860 | dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ | ||
861 | |||
862 | dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ | ||
863 | |||
864 | dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ | ||
865 | dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ | ||
866 | dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ | ||
867 | dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ | ||
868 | } | ||
869 | |||
870 | /* m2mf state */ | ||
871 | dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ | ||
872 | dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ | ||
873 | dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ | ||
874 | dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ | ||
875 | dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ | ||
876 | dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ | ||
877 | dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ | ||
878 | dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ | ||
879 | dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ | ||
880 | dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ | ||
881 | dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ | ||
882 | dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ | ||
883 | dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ | ||
884 | dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ | ||
885 | dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ | ||
886 | |||
887 | /* more compat 2d state */ | ||
888 | if (dev_priv->chipset == 0x50) { | ||
889 | dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ | ||
890 | dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ | ||
891 | |||
892 | dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ | ||
893 | dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ | ||
894 | |||
895 | dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ | ||
896 | dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ | ||
897 | dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ | ||
898 | dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ | ||
899 | dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ | ||
900 | dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ | ||
901 | dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ | ||
902 | dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ | ||
903 | dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ | ||
904 | dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ | ||
905 | dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ | ||
906 | dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ | ||
907 | |||
908 | dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ | ||
909 | } | ||
910 | |||
911 | /* tesla state */ | ||
912 | dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ | ||
913 | dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ | ||
914 | dd_emit(ctx, 1, 0); /* 000000ff */ | ||
915 | dd_emit(ctx, 1, 0); /* ffffffff */ | ||
916 | dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ | ||
917 | dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ | ||
918 | dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ | ||
919 | dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ | ||
920 | dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ | ||
921 | dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ | ||
922 | if (IS_NVA3F(dev_priv->chipset)) { | ||
923 | dd_emit(ctx, 1, 0); /* ffffffff */ | ||
924 | dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ | ||
925 | } else { | ||
926 | dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ | ||
927 | } | ||
928 | dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ | ||
929 | if (dev_priv->chipset != 0x50) | ||
930 | dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ | ||
931 | dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ | ||
932 | dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ | ||
933 | if (dev_priv->chipset == 0x50) { | ||
934 | dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ | ||
935 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
936 | } else { | ||
937 | dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ | ||
938 | dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ | ||
939 | dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ | ||
940 | dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ | ||
941 | dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ | ||
942 | dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ | ||
943 | dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ | ||
944 | dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ | ||
945 | } | ||
946 | dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ | ||
947 | dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ | ||
948 | dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ | ||
949 | dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ | ||
950 | dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ | ||
951 | dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ | ||
952 | dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ | ||
953 | dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ | ||
954 | dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ | ||
955 | dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ | ||
956 | dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ | ||
957 | dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ | ||
958 | dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ | ||
959 | if (dev_priv->chipset != 0x50) | ||
960 | dd_emit(ctx, 3, 0); /* 1, 1, 1 */ | ||
961 | else | ||
962 | dd_emit(ctx, 2, 0); /* 1, 1 */ | ||
963 | dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ | ||
964 | dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ | ||
965 | dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ | ||
966 | dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
967 | if (IS_NVA3F(dev_priv->chipset)) { | ||
968 | dd_emit(ctx, 1, 3); /* 00000003 */ | ||
969 | dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ | ||
970 | } | ||
971 | if (dev_priv->chipset != 0x50) | ||
972 | dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ | ||
973 | dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ | ||
974 | dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ | ||
975 | if (dev_priv->chipset != 0x50) | ||
976 | dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ | ||
977 | dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ | ||
978 | dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ | ||
979 | dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ | ||
980 | dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ | ||
981 | dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ | ||
982 | dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ | ||
983 | dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ | ||
984 | dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ | ||
985 | dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ | ||
986 | dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ | ||
987 | if (dev_priv->chipset >= 0xa0) | ||
988 | dd_emit(ctx, 1, 0); /* ffffffff */ | ||
989 | dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ | ||
990 | dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ | ||
991 | dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ | ||
992 | dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ | ||
993 | dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ | ||
994 | if (dev_priv->chipset != 0x50) | ||
995 | dd_emit(ctx, 8, 0); /* 00000001 */ | ||
996 | if (dev_priv->chipset >= 0xa0) { | ||
997 | dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ | ||
998 | dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ | ||
999 | dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ | ||
1000 | dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ | ||
1001 | } | ||
1002 | dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1003 | dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ | ||
1004 | dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
1005 | dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ | ||
1006 | dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ | ||
1007 | if (IS_NVA3F(dev_priv->chipset)) | ||
1008 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
1009 | dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ | ||
1010 | if (dev_priv->chipset >= 0xa0) | ||
1011 | dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ | ||
1012 | dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ | ||
1013 | if (dev_priv->chipset >= 0xa0) | ||
1014 | dd_emit(ctx, 1, 0); /* 00000003 */ | ||
1015 | dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ | ||
1016 | dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ | ||
1017 | dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ | ||
1018 | dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ | ||
1019 | dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ | ||
1020 | if (dev_priv->chipset != 0x50) { | ||
1021 | dd_emit(ctx, 1, 0xe00); /* 7fff */ | ||
1022 | dd_emit(ctx, 1, 0x1000); /* 7fff */ | ||
1023 | dd_emit(ctx, 1, 0x1e00); /* 7fff */ | ||
1024 | } | ||
1025 | dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ | ||
1026 | dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ | ||
1027 | dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ | ||
1028 | dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ | ||
1029 | dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ | ||
1030 | dd_emit(ctx, 1, 1); /* 00000001 */ | ||
1031 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
1032 | dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ | ||
1033 | dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ | ||
1034 | dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ | ||
1035 | if (IS_NVA3F(dev_priv->chipset)) | ||
1036 | dd_emit(ctx, 1, 0x200); | ||
1037 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
1038 | if (dev_priv->chipset < 0xa0) { | ||
1039 | dd_emit(ctx, 1, 1); /* 00000001 */ | ||
1040 | dd_emit(ctx, 1, 0x70); /* 000000ff */ | ||
1041 | dd_emit(ctx, 1, 0x80); /* 000000ff */ | ||
1042 | dd_emit(ctx, 1, 0); /* 000000ff */ | ||
1043 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
1044 | dd_emit(ctx, 1, 1); /* 00000001 */ | ||
1045 | dd_emit(ctx, 1, 0x70); /* 000000ff */ | ||
1046 | dd_emit(ctx, 1, 0x80); /* 000000ff */ | ||
1047 | dd_emit(ctx, 1, 0); /* 000000ff */ | ||
1048 | } else { | ||
1049 | dd_emit(ctx, 1, 1); /* 00000001 */ | ||
1050 | dd_emit(ctx, 1, 0xf0); /* 000000ff */ | ||
1051 | dd_emit(ctx, 1, 0xff); /* 000000ff */ | ||
1052 | dd_emit(ctx, 1, 0); /* 000000ff */ | ||
1053 | dd_emit(ctx, 1, 0); /* 00000001 */ | ||
1054 | dd_emit(ctx, 1, 1); /* 00000001 */ | ||
1055 | dd_emit(ctx, 1, 0xf0); /* 000000ff */ | ||
1056 | dd_emit(ctx, 1, 0xff); /* 000000ff */ | ||
1057 | dd_emit(ctx, 1, 0); /* 000000ff */ | ||
1058 | dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ | ||
1059 | } | ||
1060 | |||
1061 | /* eng2d state */ | ||
1062 | dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ | ||
1063 | dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ | ||
1064 | dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ | ||
1065 | dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ | ||
1066 | dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ | ||
1067 | dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ | ||
1068 | dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ | ||
1069 | dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ | ||
1070 | dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ | ||
1071 | dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ | ||
1072 | dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ | ||
1073 | dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ | ||
1074 | dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ | ||
1075 | dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ | ||
1076 | dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ | ||
1077 | dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ | ||
1078 | dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ | ||
1079 | dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ | ||
1080 | dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ | ||
1081 | dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ | ||
1082 | |||
1083 | num = ctx->ctxvals_pos - base; | ||
1084 | ctx->ctxvals_pos = base; | ||
1085 | if (IS_NVA3F(dev_priv->chipset)) | ||
1086 | cp_ctx(ctx, 0x404800, num); | ||
1087 | else | ||
1088 | cp_ctx(ctx, 0x405400, num); | ||
1089 | } | ||
1090 | |||
1091 | /* | ||
1092 | * xfer areas. These are a pain. | ||
1093 | * | ||
1094 | * There are 2 xfer areas: the first one is big and contains all sorts of | ||
1095 | * stuff, the second is small and contains some per-TP context. | ||
1096 | * | ||
1097 | * Each area is split into 8 "strands". The areas, when saved to grctx, | ||
1098 | * are made of 8-word blocks. Each block contains a single word from | ||
1099 | * each strand. The strands are independent of each other, their | ||
1100 | * addresses are unrelated to each other, and data in them is closely | ||
1101 | * packed together. The strand layout varies a bit between cards: here | ||
1102 | * and there, a single word is thrown out in the middle and the whole | ||
1103 | * strand is offset by a bit from corresponding one on another chipset. | ||
1104 | * For this reason, addresses of stuff in strands are almost useless. | ||
1105 | * Knowing sequence of stuff and size of gaps between them is much more | ||
1106 | * useful, and that's how we build the strands in our generator. | ||
1107 | * | ||
1108 | * NVA0 takes this mess to a whole new level by cutting the old strands | ||
1109 | * into a few dozen pieces [known as genes], rearranging them randomly, | ||
1110 | * and putting them back together to make new strands. Hopefully these | ||
1111 | * genes correspond more or less directly to the same PGRAPH subunits | ||
1112 | * as in 400040 register. | ||
1113 | * | ||
1114 | * The most common value in default context is 0, and when the genes | ||
1115 | * are separated by 0's, gene bounduaries are quite speculative... | ||
1116 | * some of them can be clearly deduced, others can be guessed, and yet | ||
1117 | * others won't be resolved without figuring out the real meaning of | ||
1118 | * given ctxval. For the same reason, ending point of each strand | ||
1119 | * is unknown. Except for strand 0, which is the longest strand and | ||
1120 | * its end corresponds to end of the whole xfer. | ||
1121 | * | ||
1122 | * An unsolved mystery is the seek instruction: it takes an argument | ||
1123 | * in bits 8-18, and that argument is clearly the place in strands to | ||
1124 | * seek to... but the offsets don't seem to correspond to offsets as | ||
1125 | * seen in grctx. Perhaps there's another, real, not randomly-changing | ||
1126 | * addressing in strands, and the xfer insn just happens to skip over | ||
1127 | * the unused bits? NV10-NV30 PIPE comes to mind... | ||
1128 | * | ||
1129 | * As far as I know, there's no way to access the xfer areas directly | ||
1130 | * without the help of ctxprog. | ||
1131 | */ | ||
1132 | |||
1133 | static void | ||
1134 | xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { | ||
1135 | int i; | ||
1136 | if (val && ctx->mode == NOUVEAU_GRCTX_VALS) | ||
1137 | for (i = 0; i < num; i++) | ||
1138 | nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); | ||
1139 | ctx->ctxvals_pos += num << 3; | ||
1140 | } | ||
1141 | |||
1142 | /* Gene declarations... */ | ||
1143 | |||
1144 | static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx); | ||
1145 | static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); | ||
1146 | static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx); | ||
1147 | static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx); | ||
1148 | static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx); | ||
1149 | static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx); | ||
1150 | static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx); | ||
1151 | static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx); | ||
1152 | static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx); | ||
1153 | static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx); | ||
1154 | static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx); | ||
1155 | static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx); | ||
1156 | static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx); | ||
1157 | static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx); | ||
1158 | static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx); | ||
1159 | static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx); | ||
1160 | static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); | ||
1161 | static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); | ||
1162 | |||
1163 | static void | ||
1164 | nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) | ||
1165 | { | ||
1166 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1167 | int i; | ||
1168 | int offset; | ||
1169 | int size = 0; | ||
1170 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); | ||
1171 | |||
1172 | offset = (ctx->ctxvals_pos+0x3f)&~0x3f; | ||
1173 | ctx->ctxvals_base = offset; | ||
1174 | |||
1175 | if (dev_priv->chipset < 0xa0) { | ||
1176 | /* Strand 0 */ | ||
1177 | ctx->ctxvals_pos = offset; | ||
1178 | nv50_graph_construct_gene_dispatch(ctx); | ||
1179 | nv50_graph_construct_gene_m2mf(ctx); | ||
1180 | nv50_graph_construct_gene_unk24xx(ctx); | ||
1181 | nv50_graph_construct_gene_clipid(ctx); | ||
1182 | nv50_graph_construct_gene_zcull(ctx); | ||
1183 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1184 | size = (ctx->ctxvals_pos-offset)/8; | ||
1185 | |||
1186 | /* Strand 1 */ | ||
1187 | ctx->ctxvals_pos = offset + 0x1; | ||
1188 | nv50_graph_construct_gene_vfetch(ctx); | ||
1189 | nv50_graph_construct_gene_eng2d(ctx); | ||
1190 | nv50_graph_construct_gene_csched(ctx); | ||
1191 | nv50_graph_construct_gene_ropm1(ctx); | ||
1192 | nv50_graph_construct_gene_ropm2(ctx); | ||
1193 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1194 | size = (ctx->ctxvals_pos-offset)/8; | ||
1195 | |||
1196 | /* Strand 2 */ | ||
1197 | ctx->ctxvals_pos = offset + 0x2; | ||
1198 | nv50_graph_construct_gene_ccache(ctx); | ||
1199 | nv50_graph_construct_gene_unk1cxx(ctx); | ||
1200 | nv50_graph_construct_gene_strmout(ctx); | ||
1201 | nv50_graph_construct_gene_unk14xx(ctx); | ||
1202 | nv50_graph_construct_gene_unk10xx(ctx); | ||
1203 | nv50_graph_construct_gene_unk34xx(ctx); | ||
1204 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1205 | size = (ctx->ctxvals_pos-offset)/8; | ||
1206 | |||
1207 | /* Strand 3: per-ROP group state */ | ||
1208 | ctx->ctxvals_pos = offset + 3; | ||
1209 | for (i = 0; i < 6; i++) | ||
1210 | if (units & (1 << (i + 16))) | ||
1211 | nv50_graph_construct_gene_ropc(ctx); | ||
1212 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1213 | size = (ctx->ctxvals_pos-offset)/8; | ||
1214 | |||
1215 | /* Strands 4-7: per-TP state */ | ||
1216 | for (i = 0; i < 4; i++) { | ||
1217 | ctx->ctxvals_pos = offset + 4 + i; | ||
1218 | if (units & (1 << (2 * i))) | ||
1219 | nv50_graph_construct_xfer_tp(ctx); | ||
1220 | if (units & (1 << (2 * i + 1))) | ||
1221 | nv50_graph_construct_xfer_tp(ctx); | ||
1222 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1223 | size = (ctx->ctxvals_pos-offset)/8; | ||
1224 | } | ||
1225 | } else { | ||
1226 | /* Strand 0 */ | ||
1227 | ctx->ctxvals_pos = offset; | ||
1228 | nv50_graph_construct_gene_dispatch(ctx); | ||
1229 | nv50_graph_construct_gene_m2mf(ctx); | ||
1230 | nv50_graph_construct_gene_unk34xx(ctx); | ||
1231 | nv50_graph_construct_gene_csched(ctx); | ||
1232 | nv50_graph_construct_gene_unk1cxx(ctx); | ||
1233 | nv50_graph_construct_gene_strmout(ctx); | ||
1234 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1235 | size = (ctx->ctxvals_pos-offset)/8; | ||
1236 | |||
1237 | /* Strand 1 */ | ||
1238 | ctx->ctxvals_pos = offset + 1; | ||
1239 | nv50_graph_construct_gene_unk10xx(ctx); | ||
1240 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1241 | size = (ctx->ctxvals_pos-offset)/8; | ||
1242 | |||
1243 | /* Strand 2 */ | ||
1244 | ctx->ctxvals_pos = offset + 2; | ||
1245 | if (dev_priv->chipset == 0xa0) | ||
1246 | nv50_graph_construct_gene_unk14xx(ctx); | ||
1247 | nv50_graph_construct_gene_unk24xx(ctx); | ||
1248 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1249 | size = (ctx->ctxvals_pos-offset)/8; | ||
1250 | |||
1251 | /* Strand 3 */ | ||
1252 | ctx->ctxvals_pos = offset + 3; | ||
1253 | nv50_graph_construct_gene_vfetch(ctx); | ||
1254 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1255 | size = (ctx->ctxvals_pos-offset)/8; | ||
1256 | |||
1257 | /* Strand 4 */ | ||
1258 | ctx->ctxvals_pos = offset + 4; | ||
1259 | nv50_graph_construct_gene_ccache(ctx); | ||
1260 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1261 | size = (ctx->ctxvals_pos-offset)/8; | ||
1262 | |||
1263 | /* Strand 5 */ | ||
1264 | ctx->ctxvals_pos = offset + 5; | ||
1265 | nv50_graph_construct_gene_ropm2(ctx); | ||
1266 | nv50_graph_construct_gene_ropm1(ctx); | ||
1267 | /* per-ROP context */ | ||
1268 | for (i = 0; i < 8; i++) | ||
1269 | if (units & (1<<(i+16))) | ||
1270 | nv50_graph_construct_gene_ropc(ctx); | ||
1271 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1272 | size = (ctx->ctxvals_pos-offset)/8; | ||
1273 | |||
1274 | /* Strand 6 */ | ||
1275 | ctx->ctxvals_pos = offset + 6; | ||
1276 | nv50_graph_construct_gene_zcull(ctx); | ||
1277 | nv50_graph_construct_gene_clipid(ctx); | ||
1278 | nv50_graph_construct_gene_eng2d(ctx); | ||
1279 | if (units & (1 << 0)) | ||
1280 | nv50_graph_construct_xfer_tp(ctx); | ||
1281 | if (units & (1 << 1)) | ||
1282 | nv50_graph_construct_xfer_tp(ctx); | ||
1283 | if (units & (1 << 2)) | ||
1284 | nv50_graph_construct_xfer_tp(ctx); | ||
1285 | if (units & (1 << 3)) | ||
1286 | nv50_graph_construct_xfer_tp(ctx); | ||
1287 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1288 | size = (ctx->ctxvals_pos-offset)/8; | ||
1289 | |||
1290 | /* Strand 7 */ | ||
1291 | ctx->ctxvals_pos = offset + 7; | ||
1292 | if (dev_priv->chipset == 0xa0) { | ||
1293 | if (units & (1 << 4)) | ||
1294 | nv50_graph_construct_xfer_tp(ctx); | ||
1295 | if (units & (1 << 5)) | ||
1296 | nv50_graph_construct_xfer_tp(ctx); | ||
1297 | if (units & (1 << 6)) | ||
1298 | nv50_graph_construct_xfer_tp(ctx); | ||
1299 | if (units & (1 << 7)) | ||
1300 | nv50_graph_construct_xfer_tp(ctx); | ||
1301 | if (units & (1 << 8)) | ||
1302 | nv50_graph_construct_xfer_tp(ctx); | ||
1303 | if (units & (1 << 9)) | ||
1304 | nv50_graph_construct_xfer_tp(ctx); | ||
1305 | } else { | ||
1306 | nv50_graph_construct_gene_unk14xx(ctx); | ||
1307 | } | ||
1308 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
1309 | size = (ctx->ctxvals_pos-offset)/8; | ||
1310 | } | ||
1311 | |||
1312 | ctx->ctxvals_pos = offset + size * 8; | ||
1313 | ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; | ||
1314 | cp_lsr (ctx, offset); | ||
1315 | cp_out (ctx, CP_SET_XFER_POINTER); | ||
1316 | cp_lsr (ctx, size); | ||
1317 | cp_out (ctx, CP_SEEK_1); | ||
1318 | cp_out (ctx, CP_XFER_1); | ||
1319 | cp_wait(ctx, XFER, BUSY); | ||
1320 | } | ||
1321 | |||
1322 | /* | ||
1323 | * non-trivial demagiced parts of ctx init go here | ||
1324 | */ | ||
1325 | |||
1326 | static void | ||
1327 | nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) | ||
1328 | { | ||
1329 | /* start of strand 0 */ | ||
1330 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1331 | /* SEEK */ | ||
1332 | if (dev_priv->chipset == 0x50) | ||
1333 | xf_emit(ctx, 5, 0); | ||
1334 | else if (!IS_NVA3F(dev_priv->chipset)) | ||
1335 | xf_emit(ctx, 6, 0); | ||
1336 | else | ||
1337 | xf_emit(ctx, 4, 0); | ||
1338 | /* SEEK */ | ||
1339 | /* the PGRAPH's internal FIFO */ | ||
1340 | if (dev_priv->chipset == 0x50) | ||
1341 | xf_emit(ctx, 8*3, 0); | ||
1342 | else | ||
1343 | xf_emit(ctx, 0x100*3, 0); | ||
1344 | /* and another bonus slot?!? */ | ||
1345 | xf_emit(ctx, 3, 0); | ||
1346 | /* and YET ANOTHER bonus slot? */ | ||
1347 | if (IS_NVA3F(dev_priv->chipset)) | ||
1348 | xf_emit(ctx, 3, 0); | ||
1349 | /* SEEK */ | ||
1350 | /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ | ||
1351 | xf_emit(ctx, 9, 0); | ||
1352 | /* SEEK */ | ||
1353 | xf_emit(ctx, 9, 0); | ||
1354 | /* SEEK */ | ||
1355 | xf_emit(ctx, 9, 0); | ||
1356 | /* SEEK */ | ||
1357 | xf_emit(ctx, 9, 0); | ||
1358 | /* SEEK */ | ||
1359 | if (dev_priv->chipset < 0x90) | ||
1360 | xf_emit(ctx, 4, 0); | ||
1361 | /* SEEK */ | ||
1362 | xf_emit(ctx, 2, 0); | ||
1363 | /* SEEK */ | ||
1364 | xf_emit(ctx, 6*2, 0); | ||
1365 | xf_emit(ctx, 2, 0); | ||
1366 | /* SEEK */ | ||
1367 | xf_emit(ctx, 2, 0); | ||
1368 | /* SEEK */ | ||
1369 | xf_emit(ctx, 6*2, 0); | ||
1370 | xf_emit(ctx, 2, 0); | ||
1371 | /* SEEK */ | ||
1372 | if (dev_priv->chipset == 0x50) | ||
1373 | xf_emit(ctx, 0x1c, 0); | ||
1374 | else if (dev_priv->chipset < 0xa0) | ||
1375 | xf_emit(ctx, 0x1e, 0); | ||
1376 | else | ||
1377 | xf_emit(ctx, 0x22, 0); | ||
1378 | /* SEEK */ | ||
1379 | xf_emit(ctx, 0x15, 0); | ||
1380 | } | ||
1381 | |||
1382 | static void | ||
1383 | nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) | ||
1384 | { | ||
1385 | /* Strand 0, right after dispatch */ | ||
1386 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1387 | int smallm2mf = 0; | ||
1388 | if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) | ||
1389 | smallm2mf = 1; | ||
1390 | /* SEEK */ | ||
1391 | xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ | ||
1392 | xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ | ||
1393 | xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ | ||
1394 | xf_emit (ctx, 1, 0); /* OFFSET_IN */ | ||
1395 | xf_emit (ctx, 1, 0); /* OFFSET_OUT */ | ||
1396 | xf_emit (ctx, 1, 0); /* PITCH_IN */ | ||
1397 | xf_emit (ctx, 1, 0); /* PITCH_OUT */ | ||
1398 | xf_emit (ctx, 1, 0); /* LINE_LENGTH */ | ||
1399 | xf_emit (ctx, 1, 0); /* LINE_COUNT */ | ||
1400 | xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ | ||
1401 | xf_emit (ctx, 1, 1); /* LINEAR_IN */ | ||
1402 | xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ | ||
1403 | xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ | ||
1404 | xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ | ||
1405 | xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ | ||
1406 | xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ | ||
1407 | xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ | ||
1408 | xf_emit (ctx, 1, 1); /* LINEAR_OUT */ | ||
1409 | xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ | ||
1410 | xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ | ||
1411 | xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ | ||
1412 | xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ | ||
1413 | xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ | ||
1414 | xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ | ||
1415 | xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ | ||
1416 | xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ | ||
1417 | /* SEEK */ | ||
1418 | if (smallm2mf) | ||
1419 | xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ | ||
1420 | else | ||
1421 | xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ | ||
1422 | xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ | ||
1423 | /* SEEK */ | ||
1424 | if (smallm2mf) | ||
1425 | xf_emit(ctx, 0x400, 0); /* ffffffff */ | ||
1426 | else | ||
1427 | xf_emit(ctx, 0x800, 0); /* ffffffff */ | ||
1428 | xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ | ||
1429 | /* SEEK */ | ||
1430 | xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ | ||
1431 | xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ | ||
1432 | } | ||
1433 | |||
1434 | static void | ||
1435 | nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) | ||
1436 | { | ||
1437 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1438 | xf_emit(ctx, 2, 0); /* RO */ | ||
1439 | xf_emit(ctx, 0x800, 0); /* ffffffff */ | ||
1440 | switch (dev_priv->chipset) { | ||
1441 | case 0x50: | ||
1442 | case 0x92: | ||
1443 | case 0xa0: | ||
1444 | xf_emit(ctx, 0x2b, 0); | ||
1445 | break; | ||
1446 | case 0x84: | ||
1447 | xf_emit(ctx, 0x29, 0); | ||
1448 | break; | ||
1449 | case 0x94: | ||
1450 | case 0x96: | ||
1451 | case 0xa3: | ||
1452 | xf_emit(ctx, 0x27, 0); | ||
1453 | break; | ||
1454 | case 0x86: | ||
1455 | case 0x98: | ||
1456 | case 0xa5: | ||
1457 | case 0xa8: | ||
1458 | case 0xaa: | ||
1459 | case 0xac: | ||
1460 | case 0xaf: | ||
1461 | xf_emit(ctx, 0x25, 0); | ||
1462 | break; | ||
1463 | } | ||
1464 | /* CB bindings, 0x80 of them. first word is address >> 8, second is | ||
1465 | * size >> 4 | valid << 24 */ | ||
1466 | xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ | ||
1467 | xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ | ||
1468 | xf_emit(ctx, 1, 0); /* 0 */ | ||
1469 | xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ | ||
1470 | xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ | ||
1471 | xf_emit(ctx, 4, 0); /* RO */ | ||
1472 | xf_emit(ctx, 0x100, 0); /* ffffffff */ | ||
1473 | xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ | ||
1474 | xf_emit(ctx, 8, 0); /* ffffffff */ | ||
1475 | xf_emit(ctx, 4, 0); /* ffffffff */ | ||
1476 | xf_emit(ctx, 1, 0); /* 3 */ | ||
1477 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1478 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ | ||
1479 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ | ||
1480 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ | ||
1481 | xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ | ||
1482 | xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ | ||
1483 | xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ | ||
1484 | xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ | ||
1485 | xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ | ||
1486 | xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ | ||
1487 | xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ | ||
1488 | xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ | ||
1489 | xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ | ||
1490 | xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ | ||
1491 | xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ | ||
1492 | xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ | ||
1493 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1494 | xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ | ||
1495 | xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ | ||
1496 | xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ | ||
1497 | xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ | ||
1498 | xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ | ||
1499 | xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ | ||
1500 | } | ||
1501 | |||
1502 | static void | ||
1503 | nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) | ||
1504 | { | ||
1505 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1506 | int i; | ||
1507 | /* end of area 2 on pre-NVA0, area 1 on NVAx */ | ||
1508 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1509 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1510 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1511 | xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ | ||
1512 | xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ | ||
1513 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1514 | xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ | ||
1515 | if (dev_priv->chipset == 0x50) | ||
1516 | xf_emit(ctx, 1, 0x3ff); | ||
1517 | else | ||
1518 | xf_emit(ctx, 1, 0x7ff); /* 000007ff */ | ||
1519 | xf_emit(ctx, 1, 0); /* 111/113 */ | ||
1520 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1521 | for (i = 0; i < 8; i++) { | ||
1522 | switch (dev_priv->chipset) { | ||
1523 | case 0x50: | ||
1524 | case 0x86: | ||
1525 | case 0x98: | ||
1526 | case 0xaa: | ||
1527 | case 0xac: | ||
1528 | xf_emit(ctx, 0xa0, 0); /* ffffffff */ | ||
1529 | break; | ||
1530 | case 0x84: | ||
1531 | case 0x92: | ||
1532 | case 0x94: | ||
1533 | case 0x96: | ||
1534 | xf_emit(ctx, 0x120, 0); | ||
1535 | break; | ||
1536 | case 0xa5: | ||
1537 | case 0xa8: | ||
1538 | xf_emit(ctx, 0x100, 0); /* ffffffff */ | ||
1539 | break; | ||
1540 | case 0xa0: | ||
1541 | case 0xa3: | ||
1542 | case 0xaf: | ||
1543 | xf_emit(ctx, 0x400, 0); /* ffffffff */ | ||
1544 | break; | ||
1545 | } | ||
1546 | xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ | ||
1547 | xf_emit(ctx, 4, 0); /* ffffffff */ | ||
1548 | } | ||
1549 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1550 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1551 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1552 | xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ | ||
1553 | xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ | ||
1554 | xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ | ||
1555 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1556 | xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ | ||
1557 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
1558 | xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ | ||
1559 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1560 | } | ||
1561 | |||
1562 | static void | ||
1563 | nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) | ||
1564 | { | ||
1565 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1566 | /* end of area 2 on pre-NVA0, area 1 on NVAx */ | ||
1567 | xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ | ||
1568 | xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ | ||
1569 | xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ | ||
1570 | xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ | ||
1571 | xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ | ||
1572 | xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ | ||
1573 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
1574 | xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ | ||
1575 | xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ | ||
1576 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
1577 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
1578 | xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ | ||
1579 | if (dev_priv->chipset >= 0xa0) | ||
1580 | xf_emit(ctx, 1, 0x0fac6881); | ||
1581 | if (IS_NVA3F(dev_priv->chipset)) { | ||
1582 | xf_emit(ctx, 1, 1); | ||
1583 | xf_emit(ctx, 3, 0); | ||
1584 | } | ||
1585 | } | ||
1586 | |||
1587 | static void | ||
1588 | nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) | ||
1589 | { | ||
1590 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1591 | /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ | ||
1592 | if (dev_priv->chipset != 0x50) { | ||
1593 | xf_emit(ctx, 5, 0); /* ffffffff */ | ||
1594 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1595 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1596 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
1597 | xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ | ||
1598 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1599 | xf_emit(ctx, 2, 4); /* 7f, ff */ | ||
1600 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1601 | } | ||
1602 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1603 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1604 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1605 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1606 | xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ | ||
1607 | xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ | ||
1608 | if (dev_priv->chipset != 0x50) | ||
1609 | xf_emit(ctx, 1, 0); /* 3ff */ | ||
1610 | xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ | ||
1611 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ | ||
1612 | xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ | ||
1613 | xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ | ||
1614 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
1615 | if (dev_priv->chipset != 0x50) | ||
1616 | xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ | ||
1617 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1618 | xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ | ||
1619 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1620 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1621 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1622 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1623 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1624 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1625 | xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ | ||
1626 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ | ||
1627 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ | ||
1628 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1629 | xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ | ||
1630 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1631 | xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ | ||
1632 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1633 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
1634 | if (dev_priv->chipset == 0x50) | ||
1635 | xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ | ||
1636 | else | ||
1637 | xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ | ||
1638 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1639 | xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ | ||
1640 | xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ | ||
1641 | xf_emit(ctx, 3, 0); /* f, 0, 0 */ | ||
1642 | xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ | ||
1643 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1644 | xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ | ||
1645 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1646 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ | ||
1647 | xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ | ||
1648 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1649 | xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ | ||
1650 | xf_emit(ctx, 3, 0); /* f, 0, 0 */ | ||
1651 | xf_emit(ctx, 3, 0); /* ffffffff */ | ||
1652 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1653 | xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ | ||
1654 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ | ||
1655 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1656 | xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ | ||
1657 | xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ | ||
1658 | xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ | ||
1659 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
1660 | xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ | ||
1661 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
1662 | xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ | ||
1663 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1664 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
1665 | xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ | ||
1666 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
1667 | xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ | ||
1668 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
1669 | if (IS_NVA3F(dev_priv->chipset)) | ||
1670 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1671 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
1672 | xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ | ||
1673 | if (dev_priv->chipset != 0x50) { | ||
1674 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1675 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1676 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
1677 | } | ||
1678 | xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ | ||
1679 | xf_emit(ctx, 1, 0); /* f */ | ||
1680 | xf_emit(ctx, 1, 0); /* 0? */ | ||
1681 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1682 | xf_emit(ctx, 1, 0); /* 003fffff */ | ||
1683 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1684 | xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ | ||
1685 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
1686 | xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ | ||
1687 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
1688 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1689 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1690 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1691 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
1692 | xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ | ||
1693 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
1694 | xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ | ||
1695 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
1696 | } | ||
1697 | |||
1698 | static void | ||
1699 | nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) | ||
1700 | { | ||
1701 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1702 | /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ | ||
1703 | /* SEEK */ | ||
1704 | xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ | ||
1705 | xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ | ||
1706 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
1707 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
1708 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ | ||
1709 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ | ||
1710 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ | ||
1711 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ | ||
1712 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ | ||
1713 | xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ | ||
1714 | xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ | ||
1715 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
1716 | xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ | ||
1717 | xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ | ||
1718 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
1719 | xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ | ||
1720 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
1721 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
1722 | xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ | ||
1723 | xf_emit(ctx, 1, 0); /* 0000ffff */ | ||
1724 | xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ | ||
1725 | xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ | ||
1726 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
1727 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1728 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
1729 | xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ | ||
1730 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ | ||
1731 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ | ||
1732 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ | ||
1733 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
1734 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ | ||
1735 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
1736 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
1737 | xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ | ||
1738 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
1739 | if (dev_priv->chipset != 0x50) | ||
1740 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ | ||
1741 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
1742 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
1743 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
1744 | xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ | ||
1745 | /* SEEK */ | ||
1746 | xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ | ||
1747 | xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ | ||
1748 | xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ | ||
1749 | xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ | ||
1750 | xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ | ||
1751 | xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ | ||
1752 | xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ | ||
1753 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ | ||
1754 | if (dev_priv->chipset != 0x50) | ||
1755 | xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ | ||
1756 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ | ||
1757 | } | ||
1758 | |||
1759 | static void | ||
1760 | nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx) | ||
1761 | { | ||
1762 | /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */ | ||
1763 | /* SEEK */ | ||
1764 | xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ | ||
1765 | /* SEEK */ | ||
1766 | xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ | ||
1767 | xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ | ||
1768 | xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ | ||
1769 | xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ | ||
1770 | xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ | ||
1771 | xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ | ||
1772 | xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ | ||
1773 | xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ | ||
1774 | xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ | ||
1775 | xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ | ||
1776 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ | ||
1777 | } | ||
1778 | |||
1779 | static void | ||
1780 | nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) | ||
1781 | { | ||
1782 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1783 | int i; | ||
1784 | /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ | ||
1785 | /* SEEK */ | ||
1786 | xf_emit(ctx, 0x33, 0); | ||
1787 | /* SEEK */ | ||
1788 | xf_emit(ctx, 2, 0); | ||
1789 | /* SEEK */ | ||
1790 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1791 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1792 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1793 | /* SEEK */ | ||
1794 | if (IS_NVA3F(dev_priv->chipset)) { | ||
1795 | xf_emit(ctx, 4, 0); /* RO */ | ||
1796 | xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ | ||
1797 | xf_emit(ctx, 1, 0); /* 1ff */ | ||
1798 | xf_emit(ctx, 8, 0); /* 0? */ | ||
1799 | xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ | ||
1800 | |||
1801 | xf_emit(ctx, 4, 0); /* RO */ | ||
1802 | xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ | ||
1803 | xf_emit(ctx, 1, 0); /* 1ff */ | ||
1804 | xf_emit(ctx, 8, 0); /* 0? */ | ||
1805 | xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ | ||
1806 | } else { | ||
1807 | xf_emit(ctx, 0xc, 0); /* RO */ | ||
1808 | /* SEEK */ | ||
1809 | xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ | ||
1810 | xf_emit(ctx, 1, 0); /* 1ff */ | ||
1811 | xf_emit(ctx, 8, 0); /* 0? */ | ||
1812 | |||
1813 | /* SEEK */ | ||
1814 | xf_emit(ctx, 0xc, 0); /* RO */ | ||
1815 | /* SEEK */ | ||
1816 | xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ | ||
1817 | xf_emit(ctx, 1, 0); /* 1ff */ | ||
1818 | xf_emit(ctx, 8, 0); /* 0? */ | ||
1819 | } | ||
1820 | /* SEEK */ | ||
1821 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1822 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
1823 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
1824 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1825 | if (dev_priv->chipset != 0x50) | ||
1826 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ | ||
1827 | /* SEEK */ | ||
1828 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1829 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1830 | xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ | ||
1831 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1832 | xf_emit(ctx, 1, 1); /* 00000001 */ | ||
1833 | /* SEEK */ | ||
1834 | if (dev_priv->chipset >= 0xa0) | ||
1835 | xf_emit(ctx, 2, 4); /* 000000ff */ | ||
1836 | xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ | ||
1837 | xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ | ||
1838 | xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ | ||
1839 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1840 | xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ | ||
1841 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1842 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
1843 | xf_emit(ctx, 1, 1); /* 00000001 */ | ||
1844 | for (i = 0; i < 10; i++) { | ||
1845 | /* SEEK */ | ||
1846 | xf_emit(ctx, 0x40, 0); /* ffffffff */ | ||
1847 | xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ | ||
1848 | xf_emit(ctx, 0x10, 0); /* ffffffff */ | ||
1849 | } | ||
1850 | /* SEEK */ | ||
1851 | xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ | ||
1852 | xf_emit(ctx, 1, 1); /* 00000001 */ | ||
1853 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1854 | xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ | ||
1855 | xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ | ||
1856 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
1857 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
1858 | if (dev_priv->chipset != 0x50) | ||
1859 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
1860 | } | ||
1861 | |||
1862 | static void | ||
1863 | nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) | ||
1864 | { | ||
1865 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
1866 | int acnt = 0x10, rep, i; | ||
1867 | /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ | ||
1868 | if (IS_NVA3F(dev_priv->chipset)) | ||
1869 | acnt = 0x20; | ||
1870 | /* SEEK */ | ||
1871 | if (dev_priv->chipset >= 0xa0) { | ||
1872 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ | ||
1873 | xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ | ||
1874 | } | ||
1875 | xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ | ||
1876 | xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ | ||
1877 | xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ | ||
1878 | xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ | ||
1879 | xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ | ||
1880 | xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ | ||
1881 | xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ | ||
1882 | xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ | ||
1883 | xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ | ||
1884 | xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ | ||
1885 | xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ | ||
1886 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
1887 | /* SEEK */ | ||
1888 | if (IS_NVA3F(dev_priv->chipset)) | ||
1889 | xf_emit(ctx, 0xb, 0); /* RO */ | ||
1890 | else if (dev_priv->chipset >= 0xa0) | ||
1891 | xf_emit(ctx, 0x9, 0); /* RO */ | ||
1892 | else | ||
1893 | xf_emit(ctx, 0x8, 0); /* RO */ | ||
1894 | /* SEEK */ | ||
1895 | xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ | ||
1896 | xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ | ||
1897 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1898 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
1899 | /* SEEK */ | ||
1900 | xf_emit(ctx, 0xc, 0); /* RO */ | ||
1901 | /* SEEK */ | ||
1902 | xf_emit(ctx, 1, 0); /* 7f/ff */ | ||
1903 | xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ | ||
1904 | xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ | ||
1905 | xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ | ||
1906 | xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ | ||
1907 | xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ | ||
1908 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
1909 | if (dev_priv->chipset == 0x50) | ||
1910 | xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ | ||
1911 | else | ||
1912 | xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ | ||
1913 | if (dev_priv->chipset == 0xa8) | ||
1914 | xf_emit(ctx, 1, 0x1e00); /* 7fff */ | ||
1915 | /* SEEK */ | ||
1916 | xf_emit(ctx, 0xc, 0); /* RO or close */ | ||
1917 | /* SEEK */ | ||
1918 | xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ | ||
1919 | xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ | ||
1920 | xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ | ||
1921 | if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) | ||
1922 | xf_emit(ctx, 2, 0); /* ffffffff */ | ||
1923 | else | ||
1924 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
1925 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ | ||
1926 | /* SEEK */ | ||
1927 | if (IS_NVA3F(dev_priv->chipset)) { | ||
1928 | xf_emit(ctx, 0x10, 0); /* 0? */ | ||
1929 | xf_emit(ctx, 2, 0); /* weird... */ | ||
1930 | xf_emit(ctx, 2, 0); /* RO */ | ||
1931 | } else { | ||
1932 | xf_emit(ctx, 8, 0); /* 0? */ | ||
1933 | xf_emit(ctx, 1, 0); /* weird... */ | ||
1934 | xf_emit(ctx, 2, 0); /* RO */ | ||
1935 | } | ||
1936 | /* SEEK */ | ||
1937 | xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ | ||
1938 | xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ | ||
1939 | xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ | ||
1940 | if (dev_priv->chipset >= 0xa0) | ||
1941 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ | ||
1942 | /* SEEK */ | ||
1943 | xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ | ||
1944 | xf_emit(ctx, 1, 0); /* f/1f */ | ||
1945 | /* SEEK */ | ||
1946 | xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ | ||
1947 | xf_emit(ctx, 1, 0); /* f/1f */ | ||
1948 | /* SEEK */ | ||
1949 | xf_emit(ctx, acnt, 0); /* RO */ | ||
1950 | xf_emit(ctx, 2, 0); /* RO */ | ||
1951 | /* SEEK */ | ||
1952 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ | ||
1953 | xf_emit(ctx, 1, 0); /* RO */ | ||
1954 | /* SEEK */ | ||
1955 | xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ | ||
1956 | xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ | ||
1957 | xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ | ||
1958 | xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ | ||
1959 | /* SEEK */ | ||
1960 | xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ | ||
1961 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1962 | /* SEEK */ | ||
1963 | xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ | ||
1964 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1965 | /* SEEK */ | ||
1966 | xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ | ||
1967 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1968 | /* SEEK */ | ||
1969 | xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ | ||
1970 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1971 | /* SEEK */ | ||
1972 | xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ | ||
1973 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1974 | /* SEEK */ | ||
1975 | xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ | ||
1976 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1977 | /* SEEK */ | ||
1978 | if (IS_NVA3F(dev_priv->chipset)) { | ||
1979 | xf_emit(ctx, acnt, 0); /* f */ | ||
1980 | xf_emit(ctx, 3, 0); /* f/1f */ | ||
1981 | } | ||
1982 | /* SEEK */ | ||
1983 | if (IS_NVA3F(dev_priv->chipset)) | ||
1984 | xf_emit(ctx, 2, 0); /* RO */ | ||
1985 | else | ||
1986 | xf_emit(ctx, 5, 0); /* RO */ | ||
1987 | /* SEEK */ | ||
1988 | xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ | ||
1989 | /* SEEK */ | ||
1990 | if (dev_priv->chipset < 0xa0) { | ||
1991 | xf_emit(ctx, 0x41, 0); /* RO */ | ||
1992 | /* SEEK */ | ||
1993 | xf_emit(ctx, 0x11, 0); /* RO */ | ||
1994 | } else if (!IS_NVA3F(dev_priv->chipset)) | ||
1995 | xf_emit(ctx, 0x50, 0); /* RO */ | ||
1996 | else | ||
1997 | xf_emit(ctx, 0x58, 0); /* RO */ | ||
1998 | /* SEEK */ | ||
1999 | xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ | ||
2000 | xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ | ||
2001 | xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ | ||
2002 | /* SEEK */ | ||
2003 | xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ | ||
2004 | xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ | ||
2005 | /* SEEK */ | ||
2006 | if (IS_NVA3F(dev_priv->chipset)) | ||
2007 | xf_emit(ctx, 0x1d, 0); /* RO */ | ||
2008 | else | ||
2009 | xf_emit(ctx, 0x16, 0); /* RO */ | ||
2010 | /* SEEK */ | ||
2011 | xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ | ||
2012 | xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ | ||
2013 | /* SEEK */ | ||
2014 | if (dev_priv->chipset < 0xa0) | ||
2015 | xf_emit(ctx, 8, 0); /* RO */ | ||
2016 | else if (IS_NVA3F(dev_priv->chipset)) | ||
2017 | xf_emit(ctx, 0xc, 0); /* RO */ | ||
2018 | else | ||
2019 | xf_emit(ctx, 7, 0); /* RO */ | ||
2020 | /* SEEK */ | ||
2021 | xf_emit(ctx, 0xa, 0); /* RO */ | ||
2022 | if (dev_priv->chipset == 0xa0) | ||
2023 | rep = 0xc; | ||
2024 | else | ||
2025 | rep = 4; | ||
2026 | for (i = 0; i < rep; i++) { | ||
2027 | /* SEEK */ | ||
2028 | if (IS_NVA3F(dev_priv->chipset)) | ||
2029 | xf_emit(ctx, 0x20, 0); /* ffffffff */ | ||
2030 | xf_emit(ctx, 0x200, 0); /* ffffffff */ | ||
2031 | xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ | ||
2032 | xf_emit(ctx, 4, 0); /* ffffffff */ | ||
2033 | } | ||
2034 | /* SEEK */ | ||
2035 | xf_emit(ctx, 1, 0); /* 113/111 */ | ||
2036 | xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ | ||
2037 | xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ | ||
2038 | xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ | ||
2039 | xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ | ||
2040 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2041 | /* SEEK */ | ||
2042 | if (IS_NVA3F(dev_priv->chipset)) | ||
2043 | xf_emit(ctx, 7, 0); /* weird... */ | ||
2044 | else | ||
2045 | xf_emit(ctx, 5, 0); /* weird... */ | ||
2046 | } | ||
2047 | |||
2048 | static void | ||
2049 | nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) | ||
2050 | { | ||
2051 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2052 | /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ | ||
2053 | /* SEEK */ | ||
2054 | xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ | ||
2055 | xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ | ||
2056 | xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ | ||
2057 | if (dev_priv->chipset < 0xa0) { | ||
2058 | /* this is useless on everything but the original NV50, | ||
2059 | * guess they forgot to nuke it. Or just didn't bother. */ | ||
2060 | xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ | ||
2061 | xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ | ||
2062 | xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ | ||
2063 | } | ||
2064 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2065 | xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ | ||
2066 | xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ | ||
2067 | xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ | ||
2068 | xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ | ||
2069 | xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ | ||
2070 | xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ | ||
2071 | xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ | ||
2072 | xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ | ||
2073 | xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ | ||
2074 | xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ | ||
2075 | xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ | ||
2076 | xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ | ||
2077 | xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ | ||
2078 | xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ | ||
2079 | xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ | ||
2080 | xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ | ||
2081 | xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ | ||
2082 | xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ | ||
2083 | xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ | ||
2084 | xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ | ||
2085 | xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ | ||
2086 | xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ | ||
2087 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ | ||
2088 | xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ | ||
2089 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ | ||
2090 | xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ | ||
2091 | xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ | ||
2092 | xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ | ||
2093 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ | ||
2094 | xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ | ||
2095 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ | ||
2096 | xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ | ||
2097 | xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ | ||
2098 | xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ | ||
2099 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ | ||
2100 | xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ | ||
2101 | xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ | ||
2102 | xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ | ||
2103 | xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ | ||
2104 | /* SEEK */ | ||
2105 | xf_emit(ctx, 0x10, 0); | ||
2106 | /* SEEK */ | ||
2107 | xf_emit(ctx, 0x27, 0); | ||
2108 | } | ||
2109 | |||
2110 | static void | ||
2111 | nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) | ||
2112 | { | ||
2113 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2114 | /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ | ||
2115 | /* SEEK */ | ||
2116 | xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ | ||
2117 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ | ||
2118 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
2119 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
2120 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
2121 | /* SEEK */ | ||
2122 | xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ | ||
2123 | xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ | ||
2124 | xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ | ||
2125 | xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ | ||
2126 | xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ | ||
2127 | xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ | ||
2128 | xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ | ||
2129 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2130 | xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ | ||
2131 | xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ | ||
2132 | xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ | ||
2133 | xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ | ||
2134 | xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ | ||
2135 | xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ | ||
2136 | /* SEEK */ | ||
2137 | xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ | ||
2138 | switch (dev_priv->chipset) { | ||
2139 | case 0x50: | ||
2140 | case 0x92: | ||
2141 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2142 | xf_emit(ctx, 0x80, 0); /* fff */ | ||
2143 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2144 | xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ | ||
2145 | break; | ||
2146 | case 0x84: | ||
2147 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2148 | xf_emit(ctx, 0x60, 0); /* fff */ | ||
2149 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2150 | xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ | ||
2151 | break; | ||
2152 | case 0x94: | ||
2153 | case 0x96: | ||
2154 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2155 | xf_emit(ctx, 0x40, 0); /* fff */ | ||
2156 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2157 | xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ | ||
2158 | break; | ||
2159 | case 0x86: | ||
2160 | case 0x98: | ||
2161 | xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ | ||
2162 | xf_emit(ctx, 0x10, 0); /* fff */ | ||
2163 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2164 | xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ | ||
2165 | break; | ||
2166 | case 0xa0: | ||
2167 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2168 | xf_emit(ctx, 0xf0, 0); /* fff */ | ||
2169 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2170 | xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ | ||
2171 | break; | ||
2172 | case 0xa3: | ||
2173 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2174 | xf_emit(ctx, 0x60, 0); /* fff */ | ||
2175 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2176 | xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ | ||
2177 | break; | ||
2178 | case 0xa5: | ||
2179 | case 0xaf: | ||
2180 | xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ | ||
2181 | xf_emit(ctx, 0x30, 0); /* fff */ | ||
2182 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2183 | xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ | ||
2184 | break; | ||
2185 | case 0xaa: | ||
2186 | xf_emit(ctx, 0x12, 0); | ||
2187 | break; | ||
2188 | case 0xa8: | ||
2189 | case 0xac: | ||
2190 | xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ | ||
2191 | xf_emit(ctx, 0x10, 0); /* fff */ | ||
2192 | xf_emit(ctx, 2, 0); /* ff, fff */ | ||
2193 | xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ | ||
2194 | break; | ||
2195 | } | ||
2196 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
2197 | xf_emit(ctx, 1, 0); /* 00000000 */ | ||
2198 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2199 | xf_emit(ctx, 1, 0); /* 0000001f */ | ||
2200 | xf_emit(ctx, 4, 0); /* ffffffff */ | ||
2201 | xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ | ||
2202 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2203 | xf_emit(ctx, 4, 0); /* ffffffff */ | ||
2204 | xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ | ||
2205 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2206 | xf_emit(ctx, 1, 0); /* 000000ff */ | ||
2207 | } | ||
2208 | |||
2209 | static void | ||
2210 | nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) | ||
2211 | { | ||
2212 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2213 | xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ | ||
2214 | xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ | ||
2215 | xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ | ||
2216 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ | ||
2217 | xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ | ||
2218 | xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ | ||
2219 | xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ | ||
2220 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
2221 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2222 | xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ | ||
2223 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ | ||
2224 | xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ | ||
2225 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2226 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
2227 | xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ | ||
2228 | xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ | ||
2229 | xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ | ||
2230 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ | ||
2231 | xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ | ||
2232 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
2233 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2234 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2235 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2236 | xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ | ||
2237 | xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ | ||
2238 | xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ | ||
2239 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
2240 | xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ | ||
2241 | xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ | ||
2242 | if (IS_NVA3F(dev_priv->chipset)) | ||
2243 | xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ | ||
2244 | else if (dev_priv->chipset >= 0xa0) | ||
2245 | xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ | ||
2246 | xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ | ||
2247 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ | ||
2248 | xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ | ||
2249 | xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ | ||
2250 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
2251 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2252 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
2253 | xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ | ||
2254 | xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ | ||
2255 | xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ | ||
2256 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
2257 | xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ | ||
2258 | if (dev_priv->chipset != 0x50) { | ||
2259 | xf_emit(ctx, 1, 0); /* 3ff */ | ||
2260 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ | ||
2261 | } | ||
2262 | if (IS_NVA3F(dev_priv->chipset)) | ||
2263 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ | ||
2264 | xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ | ||
2265 | xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ | ||
2266 | xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ | ||
2267 | xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ | ||
2268 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ | ||
2269 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
2270 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2271 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2272 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2273 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ | ||
2274 | xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
2275 | xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ | ||
2276 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2277 | xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ | ||
2278 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2279 | xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ | ||
2280 | xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ | ||
2281 | if (dev_priv->chipset != 0x50) | ||
2282 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ | ||
2283 | if (dev_priv->chipset < 0xa0) | ||
2284 | xf_emit(ctx, 0x1c, 0); /* RO */ | ||
2285 | else if (IS_NVA3F(dev_priv->chipset)) | ||
2286 | xf_emit(ctx, 0x9, 0); | ||
2287 | xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ | ||
2288 | xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ | ||
2289 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
2290 | xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ | ||
2291 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
2292 | xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ | ||
2293 | if (dev_priv->chipset != 0x50) { | ||
2294 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ | ||
2295 | xf_emit(ctx, 1, 0); /* 3ff */ | ||
2296 | } | ||
2297 | /* XXX: the following block could belong either to unk1cxx, or | ||
2298 | * to STRMOUT. Rather hard to tell. */ | ||
2299 | if (dev_priv->chipset < 0xa0) | ||
2300 | xf_emit(ctx, 0x25, 0); | ||
2301 | else | ||
2302 | xf_emit(ctx, 0x3b, 0); | ||
2303 | } | ||
2304 | |||
2305 | static void | ||
2306 | nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) | ||
2307 | { | ||
2308 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2309 | xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ | ||
2310 | xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ | ||
2311 | xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ | ||
2312 | if (dev_priv->chipset >= 0xa0) { | ||
2313 | xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ | ||
2314 | xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ | ||
2315 | } | ||
2316 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
2317 | xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ | ||
2318 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2319 | if (dev_priv->chipset == 0x50) | ||
2320 | xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ | ||
2321 | else | ||
2322 | xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ | ||
2323 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2324 | /* SEEK */ | ||
2325 | xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ | ||
2326 | xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ | ||
2327 | xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ | ||
2328 | xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ | ||
2329 | xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ | ||
2330 | if (dev_priv->chipset >= 0xa0) { | ||
2331 | xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ | ||
2332 | xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ | ||
2333 | } | ||
2334 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ | ||
2335 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ | ||
2336 | xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ | ||
2337 | xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ | ||
2338 | xf_emit(ctx, 2, 0); /* ffffffff */ | ||
2339 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2340 | /* SEEK */ | ||
2341 | xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ | ||
2342 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
2343 | xf_emit(ctx, 1, 0); /* 00000000? */ | ||
2344 | xf_emit(ctx, 2, 0); /* ffffffff */ | ||
2345 | } | ||
2346 | |||
2347 | static void | ||
2348 | nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) | ||
2349 | { | ||
2350 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2351 | xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ | ||
2352 | xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ | ||
2353 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2354 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
2355 | if (IS_NVA3F(dev_priv->chipset)) | ||
2356 | xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ | ||
2357 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2358 | } | ||
2359 | |||
2360 | static void | ||
2361 | nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) | ||
2362 | { | ||
2363 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2364 | /* SEEK */ | ||
2365 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ | ||
2366 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2367 | xf_emit(ctx, 2, 0); /* ffffffff */ | ||
2368 | xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ | ||
2369 | xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ | ||
2370 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
2371 | xf_emit(ctx, 1, 0); /* 7 */ | ||
2372 | /* SEEK */ | ||
2373 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ | ||
2374 | xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ | ||
2375 | xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ | ||
2376 | xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ | ||
2377 | xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ | ||
2378 | xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ | ||
2379 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2380 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2381 | if (IS_NVA3F(dev_priv->chipset)) | ||
2382 | xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ | ||
2383 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2384 | } | ||
2385 | |||
2386 | static void | ||
2387 | nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) | ||
2388 | { | ||
2389 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2390 | int magic2; | ||
2391 | if (dev_priv->chipset == 0x50) { | ||
2392 | magic2 = 0x00003e60; | ||
2393 | } else if (!IS_NVA3F(dev_priv->chipset)) { | ||
2394 | magic2 = 0x001ffe67; | ||
2395 | } else { | ||
2396 | magic2 = 0x00087e67; | ||
2397 | } | ||
2398 | xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ | ||
2399 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2400 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ | ||
2401 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ | ||
2402 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ | ||
2403 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ | ||
2404 | xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ | ||
2405 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2406 | xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ | ||
2407 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2408 | xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ | ||
2409 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2410 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2411 | if (IS_NVA3F(dev_priv->chipset)) | ||
2412 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2413 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ | ||
2414 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ | ||
2415 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
2416 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ | ||
2417 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2418 | if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) | ||
2419 | xf_emit(ctx, 1, 0x15); /* 000000ff */ | ||
2420 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2421 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2422 | xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ | ||
2423 | xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ | ||
2424 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2425 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2426 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2427 | if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { | ||
2428 | xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ | ||
2429 | xf_emit(ctx, 1, 4); /* 7 */ | ||
2430 | xf_emit(ctx, 1, 0x400); /* fffffff */ | ||
2431 | xf_emit(ctx, 1, 0x300); /* ffff */ | ||
2432 | xf_emit(ctx, 1, 0x1001); /* 1fff */ | ||
2433 | if (dev_priv->chipset != 0xa0) { | ||
2434 | if (IS_NVA3F(dev_priv->chipset)) | ||
2435 | xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ | ||
2436 | else | ||
2437 | xf_emit(ctx, 1, 0x15); /* ff */ | ||
2438 | } | ||
2439 | } | ||
2440 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2441 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2442 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ | ||
2443 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ | ||
2444 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2445 | xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ | ||
2446 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2447 | xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ | ||
2448 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2449 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2450 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ | ||
2451 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ | ||
2452 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2453 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2454 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2455 | xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ | ||
2456 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2457 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2458 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2459 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2460 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ | ||
2461 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ | ||
2462 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ | ||
2463 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ | ||
2464 | xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ | ||
2465 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2466 | xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ | ||
2467 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2468 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2469 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
2470 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ | ||
2471 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ | ||
2472 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ | ||
2473 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ | ||
2474 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2475 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2476 | xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ | ||
2477 | xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ | ||
2478 | xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ | ||
2479 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2480 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2481 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ | ||
2482 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ | ||
2483 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ | ||
2484 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ | ||
2485 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ | ||
2486 | xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ | ||
2487 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2488 | xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ | ||
2489 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2490 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2491 | xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ | ||
2492 | xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ | ||
2493 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ | ||
2494 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ | ||
2495 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
2496 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ | ||
2497 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2498 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2499 | xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ | ||
2500 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2501 | xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ | ||
2502 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2503 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2504 | xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ | ||
2505 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ | ||
2506 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
2507 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2508 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2509 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2510 | xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ | ||
2511 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ | ||
2512 | if (dev_priv->chipset >= 0xa0) { | ||
2513 | xf_emit(ctx, 2, 0); | ||
2514 | xf_emit(ctx, 1, 0x1001); | ||
2515 | xf_emit(ctx, 0xb, 0); | ||
2516 | } else { | ||
2517 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2518 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2519 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2520 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
2521 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2522 | } | ||
2523 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2524 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2525 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2526 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2527 | xf_emit(ctx, 1, 0x11); /* 3f/7f */ | ||
2528 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
2529 | if (dev_priv->chipset != 0x50) { | ||
2530 | xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ | ||
2531 | xf_emit(ctx, 1, 0); /* 000000ff */ | ||
2532 | } | ||
2533 | xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ | ||
2534 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2535 | xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ | ||
2536 | xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ | ||
2537 | xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ | ||
2538 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ | ||
2539 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ | ||
2540 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ | ||
2541 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ | ||
2542 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
2543 | xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ | ||
2544 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2545 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2546 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2547 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ | ||
2548 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ | ||
2549 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ | ||
2550 | xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ | ||
2551 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ | ||
2552 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ | ||
2553 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ | ||
2554 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ | ||
2555 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ | ||
2556 | xf_emit(ctx, 2, 0); /* 00000001 */ | ||
2557 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2558 | xf_emit(ctx, 1, 0); /* 0000000f */ | ||
2559 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
2560 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2561 | xf_emit(ctx, 2, 0); /* 00000001 */ | ||
2562 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2563 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
2564 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
2565 | } else if (dev_priv->chipset >= 0xa0) { | ||
2566 | xf_emit(ctx, 2, 0); /* 00000001 */ | ||
2567 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2568 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
2569 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2570 | xf_emit(ctx, 2, 0); /* 00000001 */ | ||
2571 | } else { | ||
2572 | xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ | ||
2573 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ | ||
2574 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2575 | } | ||
2576 | xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ | ||
2577 | xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ | ||
2578 | xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ | ||
2579 | if (dev_priv->chipset >= 0xa0) | ||
2580 | xf_emit(ctx, 2, 0); /* 00000001 */ | ||
2581 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
2582 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
2583 | xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ | ||
2584 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ | ||
2585 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ | ||
2586 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ | ||
2587 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ | ||
2588 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ | ||
2589 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ | ||
2590 | xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ | ||
2591 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
2592 | xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ | ||
2593 | if (dev_priv->chipset >= 0xa0) | ||
2594 | xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ | ||
2595 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2596 | xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ | ||
2597 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ | ||
2598 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ | ||
2599 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ | ||
2600 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ | ||
2601 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ | ||
2602 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ | ||
2603 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ | ||
2604 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
2605 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ | ||
2606 | } | ||
2607 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2608 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2609 | xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ | ||
2610 | xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ | ||
2611 | xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ | ||
2612 | xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ | ||
2613 | xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ | ||
2614 | xf_emit(ctx, 1, 0); /* 000000ff ROP */ | ||
2615 | xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ | ||
2616 | xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ | ||
2617 | xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ | ||
2618 | xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ | ||
2619 | } | ||
2620 | |||
2621 | static void | ||
2622 | nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) | ||
2623 | { | ||
2624 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2625 | int magic3; | ||
2626 | switch (dev_priv->chipset) { | ||
2627 | case 0x50: | ||
2628 | magic3 = 0x1000; | ||
2629 | break; | ||
2630 | case 0x86: | ||
2631 | case 0x98: | ||
2632 | case 0xa8: | ||
2633 | case 0xaa: | ||
2634 | case 0xac: | ||
2635 | case 0xaf: | ||
2636 | magic3 = 0x1e00; | ||
2637 | break; | ||
2638 | default: | ||
2639 | magic3 = 0; | ||
2640 | } | ||
2641 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2642 | xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ | ||
2643 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2644 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2645 | xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ | ||
2646 | if (IS_NVA3F(dev_priv->chipset)) | ||
2647 | xf_emit(ctx, 0x1f, 0); /* ffffffff */ | ||
2648 | else if (dev_priv->chipset >= 0xa0) | ||
2649 | xf_emit(ctx, 0x0f, 0); /* ffffffff */ | ||
2650 | else | ||
2651 | xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ | ||
2652 | xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ | ||
2653 | xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ | ||
2654 | xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ | ||
2655 | if (dev_priv->chipset >= 0xa0) | ||
2656 | xf_emit(ctx, 1, 0x03020100); /* ffffffff */ | ||
2657 | else | ||
2658 | xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ | ||
2659 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2660 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2661 | xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ | ||
2662 | xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ | ||
2663 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2664 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2665 | xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ | ||
2666 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
2667 | xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ | ||
2668 | if (magic3) | ||
2669 | xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ | ||
2670 | xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ | ||
2671 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2672 | xf_emit(ctx, 1, 0); /* 111/113 */ | ||
2673 | xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ | ||
2674 | xf_emit(ctx, 1, 0); /* 0000001f */ | ||
2675 | xf_emit(ctx, 1, 0); /* ffffffff */ | ||
2676 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2677 | xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ | ||
2678 | xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ | ||
2679 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
2680 | xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ | ||
2681 | xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ | ||
2682 | if (magic3) | ||
2683 | xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ | ||
2684 | xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ | ||
2685 | xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ | ||
2686 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2687 | xf_emit(ctx, 1, 0); /* 111/113 */ | ||
2688 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2689 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
2690 | xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ | ||
2691 | xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ | ||
2692 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2693 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ | ||
2694 | xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ | ||
2695 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2696 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2697 | xf_emit(ctx, 1, 0); /* 111/113 */ | ||
2698 | if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) | ||
2699 | xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ | ||
2700 | else if (dev_priv->chipset < 0xa0) | ||
2701 | xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ | ||
2702 | else if (!IS_NVA3F(dev_priv->chipset)) | ||
2703 | xf_emit(ctx, 0x210, 0); /* ffffffff */ | ||
2704 | else | ||
2705 | xf_emit(ctx, 0x410, 0); /* ffffffff */ | ||
2706 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
2707 | xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ | ||
2708 | xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ | ||
2709 | xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ | ||
2710 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
2711 | } | ||
2712 | |||
2713 | static void | ||
2714 | nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) | ||
2715 | { | ||
2716 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
2717 | int magic1, magic2; | ||
2718 | if (dev_priv->chipset == 0x50) { | ||
2719 | magic1 = 0x3ff; | ||
2720 | magic2 = 0x00003e60; | ||
2721 | } else if (!IS_NVA3F(dev_priv->chipset)) { | ||
2722 | magic1 = 0x7ff; | ||
2723 | magic2 = 0x001ffe67; | ||
2724 | } else { | ||
2725 | magic1 = 0x7ff; | ||
2726 | magic2 = 0x00087e67; | ||
2727 | } | ||
2728 | xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ | ||
2729 | xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ | ||
2730 | xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ | ||
2731 | if (IS_NVA3F(dev_priv->chipset)) | ||
2732 | xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ | ||
2733 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2734 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2735 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ | ||
2736 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ | ||
2737 | xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ | ||
2738 | xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ | ||
2739 | xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ | ||
2740 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2741 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2742 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2743 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2744 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
2745 | xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ | ||
2746 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
2747 | xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ | ||
2748 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
2749 | xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ | ||
2750 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2751 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ | ||
2752 | xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ | ||
2753 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ | ||
2754 | xf_emit(ctx, 1, 0); /* 7 */ | ||
2755 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
2756 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2757 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2758 | xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ | ||
2759 | xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ | ||
2760 | xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ | ||
2761 | xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ | ||
2762 | xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ | ||
2763 | xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ | ||
2764 | xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ | ||
2765 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2766 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ | ||
2767 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
2768 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ | ||
2769 | } else if (dev_priv->chipset >= 0xa0) { | ||
2770 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ | ||
2771 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
2772 | } else { | ||
2773 | xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ | ||
2774 | } | ||
2775 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2776 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
2777 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ | ||
2778 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ | ||
2779 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ | ||
2780 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ | ||
2781 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ | ||
2782 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ | ||
2783 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2784 | xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ | ||
2785 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ | ||
2786 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ | ||
2787 | xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ | ||
2788 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ | ||
2789 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ | ||
2790 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ | ||
2791 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ | ||
2792 | xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ | ||
2793 | } | ||
2794 | xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ | ||
2795 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2796 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2797 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2798 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2799 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
2800 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2801 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
2802 | xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ | ||
2803 | xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ | ||
2804 | xf_emit(ctx, 1, 0); /* 7 */ | ||
2805 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2806 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2807 | xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ | ||
2808 | xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ | ||
2809 | xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ | ||
2810 | xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ | ||
2811 | if (IS_NVA3F(dev_priv->chipset)) | ||
2812 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2813 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2814 | xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ | ||
2815 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
2816 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ | ||
2817 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ | ||
2818 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ | ||
2819 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ | ||
2820 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ | ||
2821 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ | ||
2822 | xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ | ||
2823 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2824 | xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ | ||
2825 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2826 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2827 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2828 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2829 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2830 | xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ | ||
2831 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2832 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2833 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2834 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2835 | if (IS_NVA3F(dev_priv->chipset)) | ||
2836 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2837 | if (dev_priv->chipset == 0x50) | ||
2838 | xf_emit(ctx, 1, 0); /* ff */ | ||
2839 | else | ||
2840 | xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ | ||
2841 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
2842 | xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ | ||
2843 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2844 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2845 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
2846 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2847 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2848 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2849 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2850 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2851 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2852 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2853 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2854 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2855 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2856 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2857 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2858 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2859 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2860 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ | ||
2861 | xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ | ||
2862 | xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ | ||
2863 | xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ | ||
2864 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2865 | xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ | ||
2866 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2867 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2868 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2869 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2870 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2871 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2872 | if (IS_NVA3F(dev_priv->chipset)) | ||
2873 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2874 | xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ | ||
2875 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ | ||
2876 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ | ||
2877 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ | ||
2878 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2879 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ | ||
2880 | xf_emit(ctx, 1, 0); /* 7 */ | ||
2881 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2882 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2883 | xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ | ||
2884 | xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ | ||
2885 | xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ | ||
2886 | xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ | ||
2887 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2888 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2889 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2890 | xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ | ||
2891 | xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ | ||
2892 | xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ | ||
2893 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2894 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2895 | xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ | ||
2896 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2897 | xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ | ||
2898 | xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ | ||
2899 | xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ | ||
2900 | xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ | ||
2901 | xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ | ||
2902 | xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ | ||
2903 | xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ | ||
2904 | xf_emit(ctx, 1, 0); /* 0000ffff */ | ||
2905 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ | ||
2906 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2907 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
2908 | xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ | ||
2909 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2910 | if (IS_NVA3F(dev_priv->chipset)) | ||
2911 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2912 | xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ | ||
2913 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2914 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
2915 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2916 | xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ | ||
2917 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2918 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ | ||
2919 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2920 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2921 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2922 | xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ | ||
2923 | xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ | ||
2924 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2925 | xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ | ||
2926 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2927 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2928 | xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ | ||
2929 | xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ | ||
2930 | xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ | ||
2931 | xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ | ||
2932 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2933 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2934 | xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ | ||
2935 | xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ | ||
2936 | xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ | ||
2937 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2938 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2939 | if (IS_NVA3F(dev_priv->chipset)) | ||
2940 | xf_emit(ctx, 1, 0); /* 00000001 */ | ||
2941 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2942 | xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ | ||
2943 | xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ | ||
2944 | xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ | ||
2945 | xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ | ||
2946 | xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ | ||
2947 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2948 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
2949 | xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ | ||
2950 | xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ | ||
2951 | xf_emit(ctx, 1, 0); /* 7 */ | ||
2952 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
2953 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2954 | xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ | ||
2955 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2956 | } | ||
2957 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
2958 | xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ | ||
2959 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2960 | if (dev_priv->chipset >= 0xa0) | ||
2961 | xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ | ||
2962 | xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ | ||
2963 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ | ||
2964 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
2965 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ | ||
2966 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
2967 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ | ||
2968 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
2969 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
2970 | xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ | ||
2971 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ | ||
2972 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ | ||
2973 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
2974 | xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ | ||
2975 | xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ | ||
2976 | xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ | ||
2977 | if (IS_NVA3F(dev_priv->chipset)) { | ||
2978 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
2979 | xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ | ||
2980 | } | ||
2981 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ | ||
2982 | if (dev_priv->chipset >= 0xa0) { | ||
2983 | xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ | ||
2984 | xf_emit(ctx, 1, 0xfac6881); /* fffffff */ | ||
2985 | xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ | ||
2986 | xf_emit(ctx, 1, 4); /* 7 */ | ||
2987 | xf_emit(ctx, 1, 0); /* 1 */ | ||
2988 | xf_emit(ctx, 2, 1); /* 1 */ | ||
2989 | xf_emit(ctx, 2, 0); /* 7, f */ | ||
2990 | xf_emit(ctx, 1, 1); /* 1 */ | ||
2991 | xf_emit(ctx, 1, 0); /* 7/f */ | ||
2992 | if (IS_NVA3F(dev_priv->chipset)) | ||
2993 | xf_emit(ctx, 0x9, 0); /* 1 */ | ||
2994 | else | ||
2995 | xf_emit(ctx, 0x8, 0); /* 1 */ | ||
2996 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
2997 | xf_emit(ctx, 8, 1); /* 1 */ | ||
2998 | xf_emit(ctx, 1, 0x11); /* 7f */ | ||
2999 | xf_emit(ctx, 7, 0); /* 7f */ | ||
3000 | xf_emit(ctx, 1, 0xfac6881); /* fffffff */ | ||
3001 | xf_emit(ctx, 1, 0xf); /* f */ | ||
3002 | xf_emit(ctx, 7, 0); /* f */ | ||
3003 | xf_emit(ctx, 1, 0x11); /* 7f */ | ||
3004 | xf_emit(ctx, 1, 1); /* 1 */ | ||
3005 | xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ | ||
3006 | if (IS_NVA3F(dev_priv->chipset)) { | ||
3007 | xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ | ||
3008 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
3009 | } | ||
3010 | } | ||
3011 | } | ||
3012 | |||
3013 | static void | ||
3014 | nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) | ||
3015 | { | ||
3016 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
3017 | xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ | ||
3018 | if (dev_priv->chipset != 0x50) | ||
3019 | xf_emit(ctx, 1, 0); /* 3 */ | ||
3020 | xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ | ||
3021 | xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ | ||
3022 | xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ | ||
3023 | xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ | ||
3024 | if (dev_priv->chipset == 0x50) | ||
3025 | xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ | ||
3026 | else | ||
3027 | xf_emit(ctx, 2, 0); /* 3ff, 1 */ | ||
3028 | xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ | ||
3029 | xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ | ||
3030 | xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ | ||
3031 | xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ | ||
3032 | xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ | ||
3033 | xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ | ||
3034 | xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ | ||
3035 | xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ | ||
3036 | if (dev_priv->chipset == 0x50) { | ||
3037 | xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ | ||
3038 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ | ||
3039 | xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ | ||
3040 | xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ | ||
3041 | xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ | ||
3042 | } else if (!IS_NVAAF(dev_priv->chipset)) { | ||
3043 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ | ||
3044 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
3045 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
3046 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
3047 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
3048 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ | ||
3049 | xf_emit(ctx, 1, 0); /* 00000003 */ | ||
3050 | xf_emit(ctx, 1, 0); /* 000003ff */ | ||
3051 | } else { | ||
3052 | xf_emit(ctx, 0x6, 0); | ||
3053 | } | ||
3054 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ | ||
3055 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ | ||
3056 | xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ | ||
3057 | } | ||
3058 | |||
3059 | static void | ||
3060 | nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) | ||
3061 | { | ||
3062 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
3063 | xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ | ||
3064 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
3065 | xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ | ||
3066 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
3067 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ | ||
3068 | xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ | ||
3069 | xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ | ||
3070 | xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ | ||
3071 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
3072 | xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ | ||
3073 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ | ||
3074 | if (IS_NVA3F(dev_priv->chipset)) | ||
3075 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
3076 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ | ||
3077 | xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ | ||
3078 | xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ | ||
3079 | xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ | ||
3080 | xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ | ||
3081 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
3082 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ | ||
3083 | xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ | ||
3084 | xf_emit(ctx, 1, 0); /* ffff0ff3 */ | ||
3085 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ | ||
3086 | xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ | ||
3087 | xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ | ||
3088 | xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ | ||
3089 | xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ | ||
3090 | xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ | ||
3091 | xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ | ||
3092 | xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ | ||
3093 | xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ | ||
3094 | xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ | ||
3095 | xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ | ||
3096 | } | ||
3097 | |||
3098 | static void | ||
3099 | nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) | ||
3100 | { | ||
3101 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
3102 | if (dev_priv->chipset < 0xa0) { | ||
3103 | nv50_graph_construct_xfer_unk84xx(ctx); | ||
3104 | nv50_graph_construct_xfer_tprop(ctx); | ||
3105 | nv50_graph_construct_xfer_tex(ctx); | ||
3106 | nv50_graph_construct_xfer_unk8cxx(ctx); | ||
3107 | } else { | ||
3108 | nv50_graph_construct_xfer_tex(ctx); | ||
3109 | nv50_graph_construct_xfer_tprop(ctx); | ||
3110 | nv50_graph_construct_xfer_unk8cxx(ctx); | ||
3111 | nv50_graph_construct_xfer_unk84xx(ctx); | ||
3112 | } | ||
3113 | } | ||
3114 | |||
3115 | static void | ||
3116 | nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) | ||
3117 | { | ||
3118 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
3119 | int i, mpcnt = 2; | ||
3120 | switch (dev_priv->chipset) { | ||
3121 | case 0x98: | ||
3122 | case 0xaa: | ||
3123 | mpcnt = 1; | ||
3124 | break; | ||
3125 | case 0x50: | ||
3126 | case 0x84: | ||
3127 | case 0x86: | ||
3128 | case 0x92: | ||
3129 | case 0x94: | ||
3130 | case 0x96: | ||
3131 | case 0xa8: | ||
3132 | case 0xac: | ||
3133 | mpcnt = 2; | ||
3134 | break; | ||
3135 | case 0xa0: | ||
3136 | case 0xa3: | ||
3137 | case 0xa5: | ||
3138 | case 0xaf: | ||
3139 | mpcnt = 3; | ||
3140 | break; | ||
3141 | } | ||
3142 | for (i = 0; i < mpcnt; i++) { | ||
3143 | xf_emit(ctx, 1, 0); /* ff */ | ||
3144 | xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ | ||
3145 | xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ | ||
3146 | xf_emit(ctx, 1, 0x04000400); /* ffffffff */ | ||
3147 | if (dev_priv->chipset >= 0xa0) | ||
3148 | xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ | ||
3149 | xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ | ||
3150 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
3151 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ | ||
3152 | if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { | ||
3153 | xf_emit(ctx, 1, 0xe00); /* 7fff */ | ||
3154 | xf_emit(ctx, 1, 0x1e00); /* 7fff */ | ||
3155 | } | ||
3156 | xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ | ||
3157 | xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ | ||
3158 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
3159 | if (dev_priv->chipset == 0x50) | ||
3160 | xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ | ||
3161 | xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ | ||
3162 | xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ | ||
3163 | xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ | ||
3164 | xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ | ||
3165 | if (IS_NVAAF(dev_priv->chipset)) | ||
3166 | xf_emit(ctx, 0xb, 0); /* RO */ | ||
3167 | else if (dev_priv->chipset >= 0xa0) | ||
3168 | xf_emit(ctx, 0xc, 0); /* RO */ | ||
3169 | else | ||
3170 | xf_emit(ctx, 0xa, 0); /* RO */ | ||
3171 | } | ||
3172 | xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
3173 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
3174 | if (dev_priv->chipset >= 0xa0) { | ||
3175 | xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ | ||
3176 | } | ||
3177 | xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ | ||
3178 | xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ | ||
3179 | xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ | ||
3180 | xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ | ||
3181 | xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ | ||
3182 | xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ | ||
3183 | xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ | ||
3184 | xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ | ||
3185 | xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ | ||
3186 | xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ | ||
3187 | xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ | ||
3188 | if (IS_NVA3F(dev_priv->chipset)) | ||
3189 | xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ | ||
3190 | xf_emit(ctx, 1, 0); /* ff/3ff */ | ||
3191 | xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ | ||
3192 | xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ | ||
3193 | xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ | ||
3194 | xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ | ||
3195 | xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ | ||
3196 | xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ | ||
3197 | xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ | ||
3198 | xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ | ||
3199 | xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ | ||
3200 | xf_emit(ctx, 1, 0); /* 00000007 */ | ||
3201 | xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ | ||
3202 | xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ | ||
3203 | if (IS_NVA3F(dev_priv->chipset)) | ||
3204 | xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ | ||
3205 | xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ | ||
3206 | xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ | ||
3207 | xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ | ||
3208 | xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ | ||
3209 | xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ | ||
3210 | xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ | ||
3211 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ | ||
3212 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ | ||
3213 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ | ||
3214 | xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ | ||
3215 | xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ | ||
3216 | xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ | ||
3217 | xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ | ||
3218 | if (IS_NVA3F(dev_priv->chipset)) { | ||
3219 | xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ | ||
3220 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ | ||
3221 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ | ||
3222 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ | ||
3223 | xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ | ||
3224 | xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ | ||
3225 | xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ | ||
3226 | xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ | ||
3227 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ | ||
3228 | xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ | ||
3229 | } | ||
3230 | xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ | ||
3231 | xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ | ||
3232 | /* XXX: demagic this part some day */ | ||
3233 | if (dev_priv->chipset == 0x50) | ||
3234 | xf_emit(ctx, 0x3a0, 0); | ||
3235 | else if (dev_priv->chipset < 0x94) | ||
3236 | xf_emit(ctx, 0x3a2, 0); | ||
3237 | else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) | ||
3238 | xf_emit(ctx, 0x39f, 0); | ||
3239 | else | ||
3240 | xf_emit(ctx, 0x3a3, 0); | ||
3241 | xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ | ||
3242 | xf_emit(ctx, 1, 0); /* 7 OPERATION */ | ||
3243 | xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ | ||
3244 | xf_emit(ctx, 0x2d, 0); | ||
3245 | } | ||
3246 | |||
3247 | static void | ||
3248 | nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) | ||
3249 | { | ||
3250 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
3251 | int i; | ||
3252 | uint32_t offset; | ||
3253 | uint32_t units = nv_rd32 (ctx->dev, 0x1540); | ||
3254 | int size = 0; | ||
3255 | |||
3256 | offset = (ctx->ctxvals_pos+0x3f)&~0x3f; | ||
3257 | |||
3258 | if (dev_priv->chipset < 0xa0) { | ||
3259 | for (i = 0; i < 8; i++) { | ||
3260 | ctx->ctxvals_pos = offset + i; | ||
3261 | /* that little bugger belongs to csched. No idea | ||
3262 | * what it's doing here. */ | ||
3263 | if (i == 0) | ||
3264 | xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ | ||
3265 | if (units & (1 << i)) | ||
3266 | nv50_graph_construct_xfer_mpc(ctx); | ||
3267 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
3268 | size = (ctx->ctxvals_pos-offset)/8; | ||
3269 | } | ||
3270 | } else { | ||
3271 | /* Strand 0: TPs 0, 1 */ | ||
3272 | ctx->ctxvals_pos = offset; | ||
3273 | /* that little bugger belongs to csched. No idea | ||
3274 | * what it's doing here. */ | ||
3275 | xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ | ||
3276 | if (units & (1 << 0)) | ||
3277 | nv50_graph_construct_xfer_mpc(ctx); | ||
3278 | if (units & (1 << 1)) | ||
3279 | nv50_graph_construct_xfer_mpc(ctx); | ||
3280 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
3281 | size = (ctx->ctxvals_pos-offset)/8; | ||
3282 | |||
3283 | /* Strand 1: TPs 2, 3 */ | ||
3284 | ctx->ctxvals_pos = offset + 1; | ||
3285 | if (units & (1 << 2)) | ||
3286 | nv50_graph_construct_xfer_mpc(ctx); | ||
3287 | if (units & (1 << 3)) | ||
3288 | nv50_graph_construct_xfer_mpc(ctx); | ||
3289 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
3290 | size = (ctx->ctxvals_pos-offset)/8; | ||
3291 | |||
3292 | /* Strand 2: TPs 4, 5, 6 */ | ||
3293 | ctx->ctxvals_pos = offset + 2; | ||
3294 | if (units & (1 << 4)) | ||
3295 | nv50_graph_construct_xfer_mpc(ctx); | ||
3296 | if (units & (1 << 5)) | ||
3297 | nv50_graph_construct_xfer_mpc(ctx); | ||
3298 | if (units & (1 << 6)) | ||
3299 | nv50_graph_construct_xfer_mpc(ctx); | ||
3300 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
3301 | size = (ctx->ctxvals_pos-offset)/8; | ||
3302 | |||
3303 | /* Strand 3: TPs 7, 8, 9 */ | ||
3304 | ctx->ctxvals_pos = offset + 3; | ||
3305 | if (units & (1 << 7)) | ||
3306 | nv50_graph_construct_xfer_mpc(ctx); | ||
3307 | if (units & (1 << 8)) | ||
3308 | nv50_graph_construct_xfer_mpc(ctx); | ||
3309 | if (units & (1 << 9)) | ||
3310 | nv50_graph_construct_xfer_mpc(ctx); | ||
3311 | if ((ctx->ctxvals_pos-offset)/8 > size) | ||
3312 | size = (ctx->ctxvals_pos-offset)/8; | ||
3313 | } | ||
3314 | ctx->ctxvals_pos = offset + size * 8; | ||
3315 | ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; | ||
3316 | cp_lsr (ctx, offset); | ||
3317 | cp_out (ctx, CP_SET_XFER_POINTER); | ||
3318 | cp_lsr (ctx, size); | ||
3319 | cp_out (ctx, CP_SEEK_2); | ||
3320 | cp_out (ctx, CP_XFER_2); | ||
3321 | cp_wait(ctx, XFER, BUSY); | ||
3322 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c new file mode 100644 index 00000000000..a7c12c94a5a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | |||
31 | #include "nouveau_drv.h" | ||
32 | #include "nouveau_vm.h" | ||
33 | |||
34 | #define BAR1_VM_BASE 0x0020000000ULL | ||
35 | #define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1) | ||
36 | #define BAR3_VM_BASE 0x0000000000ULL | ||
37 | #define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3) | ||
38 | |||
39 | struct nv50_instmem_priv { | ||
40 | uint32_t save1700[5]; /* 0x1700->0x1710 */ | ||
41 | |||
42 | struct nouveau_gpuobj *bar1_dmaobj; | ||
43 | struct nouveau_gpuobj *bar3_dmaobj; | ||
44 | }; | ||
45 | |||
46 | static void | ||
47 | nv50_channel_del(struct nouveau_channel **pchan) | ||
48 | { | ||
49 | struct nouveau_channel *chan; | ||
50 | |||
51 | chan = *pchan; | ||
52 | *pchan = NULL; | ||
53 | if (!chan) | ||
54 | return; | ||
55 | |||
56 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
57 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
58 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | ||
59 | if (drm_mm_initialized(&chan->ramin_heap)) | ||
60 | drm_mm_takedown(&chan->ramin_heap); | ||
61 | nouveau_gpuobj_ref(NULL, &chan->ramin); | ||
62 | kfree(chan); | ||
63 | } | ||
64 | |||
65 | static int | ||
66 | nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, | ||
67 | struct nouveau_channel **pchan) | ||
68 | { | ||
69 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
70 | u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | ||
71 | u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; | ||
72 | struct nouveau_channel *chan; | ||
73 | int ret, i; | ||
74 | |||
75 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | ||
76 | if (!chan) | ||
77 | return -ENOMEM; | ||
78 | chan->dev = dev; | ||
79 | |||
80 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); | ||
81 | if (ret) { | ||
82 | nv50_channel_del(&chan); | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); | ||
87 | if (ret) { | ||
88 | nv50_channel_del(&chan); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : | ||
93 | chan->ramin->pinst + pgd, | ||
94 | chan->ramin->vinst + pgd, | ||
95 | 0x4000, NVOBJ_FLAG_ZERO_ALLOC, | ||
96 | &chan->vm_pd); | ||
97 | if (ret) { | ||
98 | nv50_channel_del(&chan); | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | for (i = 0; i < 0x4000; i += 8) { | ||
103 | nv_wo32(chan->vm_pd, i + 0, 0x00000000); | ||
104 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | ||
105 | } | ||
106 | |||
107 | ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); | ||
108 | if (ret) { | ||
109 | nv50_channel_del(&chan); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : | ||
114 | chan->ramin->pinst + fc, | ||
115 | chan->ramin->vinst + fc, 0x100, | ||
116 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc); | ||
117 | if (ret) { | ||
118 | nv50_channel_del(&chan); | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | *pchan = chan; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | int | ||
127 | nv50_instmem_init(struct drm_device *dev) | ||
128 | { | ||
129 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
130 | struct nv50_instmem_priv *priv; | ||
131 | struct nouveau_channel *chan; | ||
132 | struct nouveau_vm *vm; | ||
133 | int ret, i; | ||
134 | u32 tmp; | ||
135 | |||
136 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
137 | if (!priv) | ||
138 | return -ENOMEM; | ||
139 | dev_priv->engine.instmem.priv = priv; | ||
140 | |||
141 | /* Save state, will restore at takedown. */ | ||
142 | for (i = 0x1700; i <= 0x1710; i += 4) | ||
143 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); | ||
144 | |||
145 | /* Global PRAMIN heap */ | ||
146 | ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); | ||
147 | if (ret) { | ||
148 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); | ||
149 | goto error; | ||
150 | } | ||
151 | |||
152 | /* BAR3 */ | ||
153 | ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, | ||
154 | &dev_priv->bar3_vm); | ||
155 | if (ret) | ||
156 | goto error; | ||
157 | |||
158 | ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, | ||
159 | 0x1000, NVOBJ_FLAG_DONT_MAP | | ||
160 | NVOBJ_FLAG_ZERO_ALLOC, | ||
161 | &dev_priv->bar3_vm->pgt[0].obj[0]); | ||
162 | if (ret) | ||
163 | goto error; | ||
164 | dev_priv->bar3_vm->pgt[0].refcount[0] = 1; | ||
165 | |||
166 | nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); | ||
167 | |||
168 | ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); | ||
169 | if (ret) | ||
170 | goto error; | ||
171 | dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan; | ||
172 | |||
173 | ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE, | ||
174 | NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, | ||
175 | NV_MEM_TYPE_VM, NV_MEM_COMP_VM, | ||
176 | &priv->bar3_dmaobj); | ||
177 | if (ret) | ||
178 | goto error; | ||
179 | |||
180 | nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); | ||
181 | nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); | ||
182 | nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); | ||
183 | |||
184 | dev_priv->engine.instmem.flush(dev); | ||
185 | dev_priv->ramin_available = true; | ||
186 | |||
187 | tmp = nv_ro32(chan->ramin, 0); | ||
188 | nv_wo32(chan->ramin, 0, ~tmp); | ||
189 | if (nv_ro32(chan->ramin, 0) != ~tmp) { | ||
190 | NV_ERROR(dev, "PRAMIN readback failed\n"); | ||
191 | ret = -EIO; | ||
192 | goto error; | ||
193 | } | ||
194 | nv_wo32(chan->ramin, 0, tmp); | ||
195 | |||
196 | /* BAR1 */ | ||
197 | ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm); | ||
198 | if (ret) | ||
199 | goto error; | ||
200 | |||
201 | ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); | ||
202 | if (ret) | ||
203 | goto error; | ||
204 | nouveau_vm_ref(NULL, &vm, NULL); | ||
205 | |||
206 | ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, | ||
207 | NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, | ||
208 | NV_MEM_TYPE_VM, NV_MEM_COMP_VM, | ||
209 | &priv->bar1_dmaobj); | ||
210 | if (ret) | ||
211 | goto error; | ||
212 | |||
213 | nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4)); | ||
214 | for (i = 0; i < 8; i++) | ||
215 | nv_wr32(dev, 0x1900 + (i*4), 0); | ||
216 | |||
217 | /* Create shared channel VM, space is reserved at the beginning | ||
218 | * to catch "NULL pointer" references | ||
219 | */ | ||
220 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, | ||
221 | &dev_priv->chan_vm); | ||
222 | if (ret) | ||
223 | return ret; | ||
224 | |||
225 | return 0; | ||
226 | |||
227 | error: | ||
228 | nv50_instmem_takedown(dev); | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | void | ||
233 | nv50_instmem_takedown(struct drm_device *dev) | ||
234 | { | ||
235 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
236 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | ||
237 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; | ||
238 | int i; | ||
239 | |||
240 | NV_DEBUG(dev, "\n"); | ||
241 | |||
242 | if (!priv) | ||
243 | return; | ||
244 | |||
245 | dev_priv->ramin_available = false; | ||
246 | |||
247 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); | ||
248 | |||
249 | for (i = 0x1700; i <= 0x1710; i += 4) | ||
250 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); | ||
251 | |||
252 | nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); | ||
253 | nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); | ||
254 | |||
255 | nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); | ||
256 | dev_priv->channels.ptr[127] = 0; | ||
257 | nv50_channel_del(&dev_priv->channels.ptr[0]); | ||
258 | |||
259 | nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); | ||
260 | nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); | ||
261 | |||
262 | if (drm_mm_initialized(&dev_priv->ramin_heap)) | ||
263 | drm_mm_takedown(&dev_priv->ramin_heap); | ||
264 | |||
265 | dev_priv->engine.instmem.priv = NULL; | ||
266 | kfree(priv); | ||
267 | } | ||
268 | |||
269 | int | ||
270 | nv50_instmem_suspend(struct drm_device *dev) | ||
271 | { | ||
272 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
273 | |||
274 | dev_priv->ramin_available = false; | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | void | ||
279 | nv50_instmem_resume(struct drm_device *dev) | ||
280 | { | ||
281 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
282 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | ||
283 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; | ||
284 | int i; | ||
285 | |||
286 | /* Poke the relevant regs, and pray it works :) */ | ||
287 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); | ||
288 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); | ||
289 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | | ||
290 | NV50_PUNK_BAR_CFG_BASE_VALID); | ||
291 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) | | ||
292 | NV50_PUNK_BAR1_CTXDMA_VALID); | ||
293 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) | | ||
294 | NV50_PUNK_BAR3_CTXDMA_VALID); | ||
295 | |||
296 | for (i = 0; i < 8; i++) | ||
297 | nv_wr32(dev, 0x1900 + (i*4), 0); | ||
298 | |||
299 | dev_priv->ramin_available = true; | ||
300 | } | ||
301 | |||
302 | struct nv50_gpuobj_node { | ||
303 | struct nouveau_mem *vram; | ||
304 | struct nouveau_vma chan_vma; | ||
305 | u32 align; | ||
306 | }; | ||
307 | |||
308 | int | ||
309 | nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, | ||
310 | u32 size, u32 align) | ||
311 | { | ||
312 | struct drm_device *dev = gpuobj->dev; | ||
313 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
314 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
315 | struct nv50_gpuobj_node *node = NULL; | ||
316 | int ret; | ||
317 | |||
318 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
319 | if (!node) | ||
320 | return -ENOMEM; | ||
321 | node->align = align; | ||
322 | |||
323 | size = (size + 4095) & ~4095; | ||
324 | align = max(align, (u32)4096); | ||
325 | |||
326 | ret = vram->get(dev, size, align, 0, 0, &node->vram); | ||
327 | if (ret) { | ||
328 | kfree(node); | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | gpuobj->vinst = node->vram->offset; | ||
333 | |||
334 | if (gpuobj->flags & NVOBJ_FLAG_VM) { | ||
335 | u32 flags = NV_MEM_ACCESS_RW; | ||
336 | if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) | ||
337 | flags |= NV_MEM_ACCESS_SYS; | ||
338 | |||
339 | ret = nouveau_vm_get(chan->vm, size, 12, flags, | ||
340 | &node->chan_vma); | ||
341 | if (ret) { | ||
342 | vram->put(dev, &node->vram); | ||
343 | kfree(node); | ||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | nouveau_vm_map(&node->chan_vma, node->vram); | ||
348 | gpuobj->linst = node->chan_vma.offset; | ||
349 | } | ||
350 | |||
351 | gpuobj->size = size; | ||
352 | gpuobj->node = node; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | void | ||
357 | nv50_instmem_put(struct nouveau_gpuobj *gpuobj) | ||
358 | { | ||
359 | struct drm_device *dev = gpuobj->dev; | ||
360 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
361 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
362 | struct nv50_gpuobj_node *node; | ||
363 | |||
364 | node = gpuobj->node; | ||
365 | gpuobj->node = NULL; | ||
366 | |||
367 | if (node->chan_vma.node) { | ||
368 | nouveau_vm_unmap(&node->chan_vma); | ||
369 | nouveau_vm_put(&node->chan_vma); | ||
370 | } | ||
371 | vram->put(dev, &node->vram); | ||
372 | kfree(node); | ||
373 | } | ||
374 | |||
375 | int | ||
376 | nv50_instmem_map(struct nouveau_gpuobj *gpuobj) | ||
377 | { | ||
378 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
379 | struct nv50_gpuobj_node *node = gpuobj->node; | ||
380 | int ret; | ||
381 | |||
382 | ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, | ||
383 | NV_MEM_ACCESS_RW, &node->vram->bar_vma); | ||
384 | if (ret) | ||
385 | return ret; | ||
386 | |||
387 | nouveau_vm_map(&node->vram->bar_vma, node->vram); | ||
388 | gpuobj->pinst = node->vram->bar_vma.offset; | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | void | ||
393 | nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) | ||
394 | { | ||
395 | struct nv50_gpuobj_node *node = gpuobj->node; | ||
396 | |||
397 | if (node->vram->bar_vma.node) { | ||
398 | nouveau_vm_unmap(&node->vram->bar_vma); | ||
399 | nouveau_vm_put(&node->vram->bar_vma); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | void | ||
404 | nv50_instmem_flush(struct drm_device *dev) | ||
405 | { | ||
406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
407 | unsigned long flags; | ||
408 | |||
409 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
410 | nv_wr32(dev, 0x00330c, 0x00000001); | ||
411 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | ||
412 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | ||
413 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
414 | } | ||
415 | |||
416 | void | ||
417 | nv84_instmem_flush(struct drm_device *dev) | ||
418 | { | ||
419 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
420 | unsigned long flags; | ||
421 | |||
422 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
423 | nv_wr32(dev, 0x070000, 0x00000001); | ||
424 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | ||
425 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | ||
426 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
427 | } | ||
428 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c new file mode 100644 index 00000000000..e0a9c3faa20 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_mc.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | |||
31 | int | ||
32 | nv50_mc_init(struct drm_device *dev) | ||
33 | { | ||
34 | nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | void nv50_mc_takedown(struct drm_device *dev) | ||
39 | { | ||
40 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c new file mode 100644 index 00000000000..b57a2d180ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_ramht.h" | ||
28 | |||
29 | struct nv50_mpeg_engine { | ||
30 | struct nouveau_exec_engine base; | ||
31 | }; | ||
32 | |||
33 | static inline u32 | ||
34 | CTX_PTR(struct drm_device *dev, u32 offset) | ||
35 | { | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | |||
38 | if (dev_priv->chipset == 0x50) | ||
39 | offset += 0x0260; | ||
40 | else | ||
41 | offset += 0x0060; | ||
42 | |||
43 | return offset; | ||
44 | } | ||
45 | |||
46 | static int | ||
47 | nv50_mpeg_context_new(struct nouveau_channel *chan, int engine) | ||
48 | { | ||
49 | struct drm_device *dev = chan->dev; | ||
50 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
51 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
52 | struct nouveau_gpuobj *ctx = NULL; | ||
53 | int ret; | ||
54 | |||
55 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
56 | |||
57 | ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC | | ||
58 | NVOBJ_FLAG_ZERO_FREE, &ctx); | ||
59 | if (ret) | ||
60 | return ret; | ||
61 | |||
62 | nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002); | ||
63 | nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1); | ||
64 | nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst); | ||
65 | nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0); | ||
66 | nv_wo32(ramin, CTX_PTR(dev, 0x10), 0); | ||
67 | nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000); | ||
68 | |||
69 | nv_wo32(ctx, 0x70, 0x00801ec1); | ||
70 | nv_wo32(ctx, 0x7c, 0x0000037c); | ||
71 | dev_priv->engine.instmem.flush(dev); | ||
72 | |||
73 | chan->engctx[engine] = ctx; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) | ||
79 | { | ||
80 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
81 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
82 | struct drm_device *dev = chan->dev; | ||
83 | unsigned long flags; | ||
84 | u32 inst, i; | ||
85 | |||
86 | if (!chan->ramin) | ||
87 | return; | ||
88 | |||
89 | inst = chan->ramin->vinst >> 12; | ||
90 | inst |= 0x80000000; | ||
91 | |||
92 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
93 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | ||
94 | if (nv_rd32(dev, 0x00b318) == inst) | ||
95 | nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); | ||
96 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | ||
97 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
98 | |||
99 | for (i = 0x00; i <= 0x14; i += 4) | ||
100 | nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); | ||
101 | nouveau_gpuobj_ref(NULL, &ctx); | ||
102 | chan->engctx[engine] = NULL; | ||
103 | } | ||
104 | |||
105 | static int | ||
106 | nv50_mpeg_object_new(struct nouveau_channel *chan, int engine, | ||
107 | u32 handle, u16 class) | ||
108 | { | ||
109 | struct drm_device *dev = chan->dev; | ||
110 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
111 | struct nouveau_gpuobj *obj = NULL; | ||
112 | int ret; | ||
113 | |||
114 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
115 | if (ret) | ||
116 | return ret; | ||
117 | obj->engine = 2; | ||
118 | obj->class = class; | ||
119 | |||
120 | nv_wo32(obj, 0x00, class); | ||
121 | nv_wo32(obj, 0x04, 0x00000000); | ||
122 | nv_wo32(obj, 0x08, 0x00000000); | ||
123 | nv_wo32(obj, 0x0c, 0x00000000); | ||
124 | dev_priv->engine.instmem.flush(dev); | ||
125 | |||
126 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
127 | nouveau_gpuobj_ref(NULL, &obj); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static void | ||
132 | nv50_mpeg_tlb_flush(struct drm_device *dev, int engine) | ||
133 | { | ||
134 | nv50_vm_flush_engine(dev, 0x08); | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | nv50_mpeg_init(struct drm_device *dev, int engine) | ||
139 | { | ||
140 | nv_wr32(dev, 0x00b32c, 0x00000000); | ||
141 | nv_wr32(dev, 0x00b314, 0x00000100); | ||
142 | nv_wr32(dev, 0x00b0e0, 0x0000001a); | ||
143 | |||
144 | nv_wr32(dev, 0x00b220, 0x00000044); | ||
145 | nv_wr32(dev, 0x00b300, 0x00801ec1); | ||
146 | nv_wr32(dev, 0x00b390, 0x00000000); | ||
147 | nv_wr32(dev, 0x00b394, 0x00000000); | ||
148 | nv_wr32(dev, 0x00b398, 0x00000000); | ||
149 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | ||
150 | |||
151 | nv_wr32(dev, 0x00b100, 0xffffffff); | ||
152 | nv_wr32(dev, 0x00b140, 0xffffffff); | ||
153 | |||
154 | if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) { | ||
155 | NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200)); | ||
156 | return -EBUSY; | ||
157 | } | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static int | ||
163 | nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend) | ||
164 | { | ||
165 | /*XXX: context save for s/r */ | ||
166 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | ||
167 | nv_wr32(dev, 0x00b140, 0x00000000); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static void | ||
172 | nv50_mpeg_isr(struct drm_device *dev) | ||
173 | { | ||
174 | u32 stat = nv_rd32(dev, 0x00b100); | ||
175 | u32 type = nv_rd32(dev, 0x00b230); | ||
176 | u32 mthd = nv_rd32(dev, 0x00b234); | ||
177 | u32 data = nv_rd32(dev, 0x00b238); | ||
178 | u32 show = stat; | ||
179 | |||
180 | if (stat & 0x01000000) { | ||
181 | /* happens on initial binding of the object */ | ||
182 | if (type == 0x00000020 && mthd == 0x0000) { | ||
183 | nv_wr32(dev, 0x00b308, 0x00000100); | ||
184 | show &= ~0x01000000; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | if (show && nouveau_ratelimit()) { | ||
189 | NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
190 | stat, type, mthd, data); | ||
191 | } | ||
192 | |||
193 | nv_wr32(dev, 0x00b100, stat); | ||
194 | nv_wr32(dev, 0x00b230, 0x00000001); | ||
195 | nv50_fb_vm_trap(dev, 1); | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | nv50_vpe_isr(struct drm_device *dev) | ||
200 | { | ||
201 | if (nv_rd32(dev, 0x00b100)) | ||
202 | nv50_mpeg_isr(dev); | ||
203 | |||
204 | if (nv_rd32(dev, 0x00b800)) { | ||
205 | u32 stat = nv_rd32(dev, 0x00b800); | ||
206 | NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); | ||
207 | nv_wr32(dev, 0xb800, stat); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static void | ||
212 | nv50_mpeg_destroy(struct drm_device *dev, int engine) | ||
213 | { | ||
214 | struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine); | ||
215 | |||
216 | nouveau_irq_unregister(dev, 0); | ||
217 | |||
218 | NVOBJ_ENGINE_DEL(dev, MPEG); | ||
219 | kfree(pmpeg); | ||
220 | } | ||
221 | |||
222 | int | ||
223 | nv50_mpeg_create(struct drm_device *dev) | ||
224 | { | ||
225 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
226 | struct nv50_mpeg_engine *pmpeg; | ||
227 | |||
228 | pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); | ||
229 | if (!pmpeg) | ||
230 | return -ENOMEM; | ||
231 | |||
232 | pmpeg->base.destroy = nv50_mpeg_destroy; | ||
233 | pmpeg->base.init = nv50_mpeg_init; | ||
234 | pmpeg->base.fini = nv50_mpeg_fini; | ||
235 | pmpeg->base.context_new = nv50_mpeg_context_new; | ||
236 | pmpeg->base.context_del = nv50_mpeg_context_del; | ||
237 | pmpeg->base.object_new = nv50_mpeg_object_new; | ||
238 | pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush; | ||
239 | |||
240 | if (dev_priv->chipset == 0x50) { | ||
241 | nouveau_irq_register(dev, 0, nv50_vpe_isr); | ||
242 | NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); | ||
243 | NVOBJ_CLASS(dev, 0x3174, MPEG); | ||
244 | #if 0 | ||
245 | NVOBJ_ENGINE_ADD(dev, ME, &pme->base); | ||
246 | NVOBJ_CLASS(dev, 0x4075, ME); | ||
247 | #endif | ||
248 | } else { | ||
249 | nouveau_irq_register(dev, 0, nv50_mpeg_isr); | ||
250 | NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); | ||
251 | NVOBJ_CLASS(dev, 0x8274, MPEG); | ||
252 | } | ||
253 | |||
254 | return 0; | ||
255 | |||
256 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c new file mode 100644 index 00000000000..ffe8b483b7b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Maarten Maathuis. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm_crtc_helper.h" | ||
29 | |||
30 | #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) | ||
31 | #include "nouveau_reg.h" | ||
32 | #include "nouveau_drv.h" | ||
33 | #include "nouveau_dma.h" | ||
34 | #include "nouveau_encoder.h" | ||
35 | #include "nouveau_connector.h" | ||
36 | #include "nouveau_crtc.h" | ||
37 | #include "nv50_display.h" | ||
38 | |||
39 | static void | ||
40 | nv50_sor_disconnect(struct drm_encoder *encoder) | ||
41 | { | ||
42 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
43 | struct drm_device *dev = encoder->dev; | ||
44 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
45 | int ret; | ||
46 | |||
47 | if (!nv_encoder->crtc) | ||
48 | return; | ||
49 | nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); | ||
50 | |||
51 | NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); | ||
52 | |||
53 | ret = RING_SPACE(evo, 4); | ||
54 | if (ret) { | ||
55 | NV_ERROR(dev, "no space while disconnecting SOR\n"); | ||
56 | return; | ||
57 | } | ||
58 | BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); | ||
59 | OUT_RING (evo, 0); | ||
60 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
61 | OUT_RING (evo, 0); | ||
62 | |||
63 | nv_encoder->crtc = NULL; | ||
64 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
65 | } | ||
66 | |||
67 | static void | ||
68 | nv50_sor_dpms(struct drm_encoder *encoder, int mode) | ||
69 | { | ||
70 | struct drm_device *dev = encoder->dev; | ||
71 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
72 | struct drm_encoder *enc; | ||
73 | uint32_t val; | ||
74 | int or = nv_encoder->or; | ||
75 | |||
76 | NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); | ||
77 | |||
78 | nv_encoder->last_dpms = mode; | ||
79 | list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { | ||
80 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); | ||
81 | |||
82 | if (nvenc == nv_encoder || | ||
83 | (nvenc->dcb->type != OUTPUT_TMDS && | ||
84 | nvenc->dcb->type != OUTPUT_LVDS && | ||
85 | nvenc->dcb->type != OUTPUT_DP) || | ||
86 | nvenc->dcb->or != nv_encoder->dcb->or) | ||
87 | continue; | ||
88 | |||
89 | if (nvenc->last_dpms == DRM_MODE_DPMS_ON) | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | /* wait for it to be done */ | ||
94 | if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), | ||
95 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { | ||
96 | NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); | ||
97 | NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, | ||
98 | nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); | ||
99 | } | ||
100 | |||
101 | val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); | ||
102 | |||
103 | if (mode == DRM_MODE_DPMS_ON) | ||
104 | val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; | ||
105 | else | ||
106 | val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; | ||
107 | |||
108 | nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | | ||
109 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); | ||
110 | if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or), | ||
111 | NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { | ||
112 | NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); | ||
113 | NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, | ||
114 | nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); | ||
115 | } | ||
116 | |||
117 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
118 | struct nouveau_i2c_chan *auxch; | ||
119 | |||
120 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | ||
121 | if (!auxch) | ||
122 | return; | ||
123 | |||
124 | if (mode == DRM_MODE_DPMS_ON) { | ||
125 | u8 status = DP_SET_POWER_D0; | ||
126 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | ||
127 | nouveau_dp_link_train(encoder); | ||
128 | } else { | ||
129 | u8 status = DP_SET_POWER_D3; | ||
130 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | ||
131 | } | ||
132 | } | ||
133 | } | ||
134 | |||
135 | static void | ||
136 | nv50_sor_save(struct drm_encoder *encoder) | ||
137 | { | ||
138 | NV_ERROR(encoder->dev, "!!\n"); | ||
139 | } | ||
140 | |||
141 | static void | ||
142 | nv50_sor_restore(struct drm_encoder *encoder) | ||
143 | { | ||
144 | NV_ERROR(encoder->dev, "!!\n"); | ||
145 | } | ||
146 | |||
147 | static bool | ||
148 | nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
149 | struct drm_display_mode *adjusted_mode) | ||
150 | { | ||
151 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
152 | struct nouveau_connector *connector; | ||
153 | |||
154 | NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); | ||
155 | |||
156 | connector = nouveau_encoder_connector_get(nv_encoder); | ||
157 | if (!connector) { | ||
158 | NV_ERROR(encoder->dev, "Encoder has no connector\n"); | ||
159 | return false; | ||
160 | } | ||
161 | |||
162 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && | ||
163 | connector->native_mode) { | ||
164 | int id = adjusted_mode->base.id; | ||
165 | *adjusted_mode = *connector->native_mode; | ||
166 | adjusted_mode->base.id = id; | ||
167 | } | ||
168 | |||
169 | return true; | ||
170 | } | ||
171 | |||
172 | static void | ||
173 | nv50_sor_prepare(struct drm_encoder *encoder) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | static void | ||
178 | nv50_sor_commit(struct drm_encoder *encoder) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static void | ||
183 | nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
184 | struct drm_display_mode *adjusted_mode) | ||
185 | { | ||
186 | struct nouveau_channel *evo = nv50_display(encoder->dev)->master; | ||
187 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
188 | struct drm_device *dev = encoder->dev; | ||
189 | struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); | ||
190 | uint32_t mode_ctl = 0; | ||
191 | int ret; | ||
192 | |||
193 | NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", | ||
194 | nv_encoder->or, nv_encoder->dcb->type, crtc->index); | ||
195 | |||
196 | nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); | ||
197 | |||
198 | switch (nv_encoder->dcb->type) { | ||
199 | case OUTPUT_TMDS: | ||
200 | if (nv_encoder->dcb->sorconf.link & 1) { | ||
201 | if (adjusted_mode->clock < 165000) | ||
202 | mode_ctl = 0x0100; | ||
203 | else | ||
204 | mode_ctl = 0x0500; | ||
205 | } else | ||
206 | mode_ctl = 0x0200; | ||
207 | break; | ||
208 | case OUTPUT_DP: | ||
209 | mode_ctl |= (nv_encoder->dp.mc_unknown << 16); | ||
210 | if (nv_encoder->dcb->sorconf.link & 1) | ||
211 | mode_ctl |= 0x00000800; | ||
212 | else | ||
213 | mode_ctl |= 0x00000900; | ||
214 | break; | ||
215 | default: | ||
216 | break; | ||
217 | } | ||
218 | |||
219 | if (crtc->index == 1) | ||
220 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1; | ||
221 | else | ||
222 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; | ||
223 | |||
224 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
225 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; | ||
226 | |||
227 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
228 | mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; | ||
229 | |||
230 | ret = RING_SPACE(evo, 2); | ||
231 | if (ret) { | ||
232 | NV_ERROR(dev, "no space while connecting SOR\n"); | ||
233 | return; | ||
234 | } | ||
235 | BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); | ||
236 | OUT_RING(evo, mode_ctl); | ||
237 | |||
238 | nv_encoder->crtc = encoder->crtc; | ||
239 | } | ||
240 | |||
241 | static struct drm_crtc * | ||
242 | nv50_sor_crtc_get(struct drm_encoder *encoder) | ||
243 | { | ||
244 | return nouveau_encoder(encoder)->crtc; | ||
245 | } | ||
246 | |||
247 | static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { | ||
248 | .dpms = nv50_sor_dpms, | ||
249 | .save = nv50_sor_save, | ||
250 | .restore = nv50_sor_restore, | ||
251 | .mode_fixup = nv50_sor_mode_fixup, | ||
252 | .prepare = nv50_sor_prepare, | ||
253 | .commit = nv50_sor_commit, | ||
254 | .mode_set = nv50_sor_mode_set, | ||
255 | .get_crtc = nv50_sor_crtc_get, | ||
256 | .detect = NULL, | ||
257 | .disable = nv50_sor_disconnect | ||
258 | }; | ||
259 | |||
260 | static void | ||
261 | nv50_sor_destroy(struct drm_encoder *encoder) | ||
262 | { | ||
263 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
264 | |||
265 | if (!encoder) | ||
266 | return; | ||
267 | |||
268 | NV_DEBUG_KMS(encoder->dev, "\n"); | ||
269 | |||
270 | drm_encoder_cleanup(encoder); | ||
271 | |||
272 | kfree(nv_encoder); | ||
273 | } | ||
274 | |||
275 | static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { | ||
276 | .destroy = nv50_sor_destroy, | ||
277 | }; | ||
278 | |||
279 | int | ||
280 | nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) | ||
281 | { | ||
282 | struct nouveau_encoder *nv_encoder = NULL; | ||
283 | struct drm_device *dev = connector->dev; | ||
284 | struct drm_encoder *encoder; | ||
285 | int type; | ||
286 | |||
287 | NV_DEBUG_KMS(dev, "\n"); | ||
288 | |||
289 | switch (entry->type) { | ||
290 | case OUTPUT_TMDS: | ||
291 | case OUTPUT_DP: | ||
292 | type = DRM_MODE_ENCODER_TMDS; | ||
293 | break; | ||
294 | case OUTPUT_LVDS: | ||
295 | type = DRM_MODE_ENCODER_LVDS; | ||
296 | break; | ||
297 | default: | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | |||
301 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); | ||
302 | if (!nv_encoder) | ||
303 | return -ENOMEM; | ||
304 | encoder = to_drm_encoder(nv_encoder); | ||
305 | |||
306 | nv_encoder->dcb = entry; | ||
307 | nv_encoder->or = ffs(entry->or) - 1; | ||
308 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
309 | |||
310 | drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); | ||
311 | drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); | ||
312 | |||
313 | encoder->possible_crtcs = entry->heads; | ||
314 | encoder->possible_clones = 0; | ||
315 | |||
316 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
317 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); | ||
318 | uint32_t tmp; | ||
319 | |||
320 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); | ||
321 | if (!tmp) | ||
322 | tmp = nv_rd32(dev, 0x610798 + (or * 8)); | ||
323 | |||
324 | switch ((tmp & 0x00000f00) >> 8) { | ||
325 | case 8: | ||
326 | case 9: | ||
327 | nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; | ||
328 | tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
329 | nv_encoder->dp.unk0 = tmp & 0x000001fc; | ||
330 | tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); | ||
331 | nv_encoder->dp.unk1 = tmp & 0x010f7f3f; | ||
332 | break; | ||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | if (!nv_encoder->dp.mc_unknown) | ||
338 | nv_encoder->dp.mc_unknown = 5; | ||
339 | } | ||
340 | |||
341 | drm_mode_connector_attach_encoder(connector, encoder); | ||
342 | return 0; | ||
343 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c new file mode 100644 index 00000000000..40b84f22d81 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, | ||
32 | struct nouveau_gpuobj *pgt[2]) | ||
33 | { | ||
34 | u64 phys = 0xdeadcafe00000000ULL; | ||
35 | u32 coverage = 0; | ||
36 | |||
37 | if (pgt[0]) { | ||
38 | phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ | ||
39 | coverage = (pgt[0]->size >> 3) << 12; | ||
40 | } else | ||
41 | if (pgt[1]) { | ||
42 | phys = 0x00000001 | pgt[1]->vinst; /* present */ | ||
43 | coverage = (pgt[1]->size >> 3) << 16; | ||
44 | } | ||
45 | |||
46 | if (phys & 1) { | ||
47 | if (coverage <= 32 * 1024 * 1024) | ||
48 | phys |= 0x60; | ||
49 | else if (coverage <= 64 * 1024 * 1024) | ||
50 | phys |= 0x40; | ||
51 | else if (coverage < 128 * 1024 * 1024) | ||
52 | phys |= 0x20; | ||
53 | } | ||
54 | |||
55 | nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); | ||
56 | nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); | ||
57 | } | ||
58 | |||
59 | static inline u64 | ||
60 | nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) | ||
61 | { | ||
62 | struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private; | ||
63 | |||
64 | phys |= 1; /* present */ | ||
65 | phys |= (u64)memtype << 40; | ||
66 | |||
67 | /* IGPs don't have real VRAM, re-target to stolen system memory */ | ||
68 | if (target == 0 && dev_priv->vram_sys_base) { | ||
69 | phys += dev_priv->vram_sys_base; | ||
70 | target = 3; | ||
71 | } | ||
72 | |||
73 | phys |= target << 4; | ||
74 | |||
75 | if (vma->access & NV_MEM_ACCESS_SYS) | ||
76 | phys |= (1 << 6); | ||
77 | |||
78 | if (!(vma->access & NV_MEM_ACCESS_WO)) | ||
79 | phys |= (1 << 3); | ||
80 | |||
81 | return phys; | ||
82 | } | ||
83 | |||
84 | void | ||
85 | nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
86 | struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) | ||
87 | { | ||
88 | u32 comp = (mem->memtype & 0x180) >> 7; | ||
89 | u32 block; | ||
90 | int i; | ||
91 | |||
92 | phys = nv50_vm_addr(vma, phys, mem->memtype, 0); | ||
93 | pte <<= 3; | ||
94 | cnt <<= 3; | ||
95 | |||
96 | while (cnt) { | ||
97 | u32 offset_h = upper_32_bits(phys); | ||
98 | u32 offset_l = lower_32_bits(phys); | ||
99 | |||
100 | for (i = 7; i >= 0; i--) { | ||
101 | block = 1 << (i + 3); | ||
102 | if (cnt >= block && !(pte & (block - 1))) | ||
103 | break; | ||
104 | } | ||
105 | offset_l |= (i << 7); | ||
106 | |||
107 | phys += block << (vma->node->type - 3); | ||
108 | cnt -= block; | ||
109 | if (comp) { | ||
110 | u32 tag = mem->tag->start + ((delta >> 16) * comp); | ||
111 | offset_h |= (tag << 17); | ||
112 | delta += block << (vma->node->type - 3); | ||
113 | } | ||
114 | |||
115 | while (block) { | ||
116 | nv_wo32(pgt, pte + 0, offset_l); | ||
117 | nv_wo32(pgt, pte + 4, offset_h); | ||
118 | pte += 8; | ||
119 | block -= 8; | ||
120 | } | ||
121 | } | ||
122 | } | ||
123 | |||
124 | void | ||
125 | nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
126 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) | ||
127 | { | ||
128 | pte <<= 3; | ||
129 | while (cnt--) { | ||
130 | u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2); | ||
131 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | ||
132 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | ||
133 | pte += 8; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | void | ||
138 | nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) | ||
139 | { | ||
140 | pte <<= 3; | ||
141 | while (cnt--) { | ||
142 | nv_wo32(pgt, pte + 0, 0x00000000); | ||
143 | nv_wo32(pgt, pte + 4, 0x00000000); | ||
144 | pte += 8; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | void | ||
149 | nv50_vm_flush(struct nouveau_vm *vm) | ||
150 | { | ||
151 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | ||
152 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
153 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
154 | int i; | ||
155 | |||
156 | pinstmem->flush(vm->dev); | ||
157 | |||
158 | /* BAR */ | ||
159 | if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { | ||
160 | nv50_vm_flush_engine(vm->dev, 6); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | pfifo->tlb_flush(vm->dev); | ||
165 | for (i = 0; i < NVOBJ_ENGINE_NR; i++) { | ||
166 | if (atomic_read(&vm->engref[i])) | ||
167 | dev_priv->eng[i]->tlb_flush(vm->dev, i); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | void | ||
172 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | ||
173 | { | ||
174 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
175 | unsigned long flags; | ||
176 | |||
177 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
178 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | ||
179 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | ||
180 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | ||
181 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
182 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c new file mode 100644 index 00000000000..af32daecd1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | static int types[0x80] = { | ||
30 | 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
31 | 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, | ||
32 | 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, | ||
33 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
34 | 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, | ||
35 | 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
36 | 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, | ||
37 | 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 | ||
38 | }; | ||
39 | |||
40 | bool | ||
41 | nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
42 | { | ||
43 | int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; | ||
44 | |||
45 | if (likely(type < ARRAY_SIZE(types) && types[type])) | ||
46 | return true; | ||
47 | return false; | ||
48 | } | ||
49 | |||
50 | void | ||
51 | nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) | ||
52 | { | ||
53 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
54 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | ||
55 | struct nouveau_mm_node *this; | ||
56 | struct nouveau_mem *mem; | ||
57 | |||
58 | mem = *pmem; | ||
59 | *pmem = NULL; | ||
60 | if (unlikely(mem == NULL)) | ||
61 | return; | ||
62 | |||
63 | mutex_lock(&mm->mutex); | ||
64 | while (!list_empty(&mem->regions)) { | ||
65 | this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); | ||
66 | |||
67 | list_del(&this->rl_entry); | ||
68 | nouveau_mm_put(mm, this); | ||
69 | } | ||
70 | |||
71 | if (mem->tag) { | ||
72 | drm_mm_put_block(mem->tag); | ||
73 | mem->tag = NULL; | ||
74 | } | ||
75 | mutex_unlock(&mm->mutex); | ||
76 | |||
77 | kfree(mem); | ||
78 | } | ||
79 | |||
80 | int | ||
81 | nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, | ||
82 | u32 memtype, struct nouveau_mem **pmem) | ||
83 | { | ||
84 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
85 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | ||
86 | struct nouveau_mm_node *r; | ||
87 | struct nouveau_mem *mem; | ||
88 | int comp = (memtype & 0x300) >> 8; | ||
89 | int type = (memtype & 0x07f); | ||
90 | int ret; | ||
91 | |||
92 | if (!types[type]) | ||
93 | return -EINVAL; | ||
94 | size >>= 12; | ||
95 | align >>= 12; | ||
96 | size_nc >>= 12; | ||
97 | |||
98 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | ||
99 | if (!mem) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | mutex_lock(&mm->mutex); | ||
103 | if (comp) { | ||
104 | if (align == 16) { | ||
105 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
106 | int n = (size >> 4) * comp; | ||
107 | |||
108 | mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0); | ||
109 | if (mem->tag) | ||
110 | mem->tag = drm_mm_get_block(mem->tag, n, 0); | ||
111 | } | ||
112 | |||
113 | if (unlikely(!mem->tag)) | ||
114 | comp = 0; | ||
115 | } | ||
116 | |||
117 | INIT_LIST_HEAD(&mem->regions); | ||
118 | mem->dev = dev_priv->dev; | ||
119 | mem->memtype = (comp << 7) | type; | ||
120 | mem->size = size; | ||
121 | |||
122 | do { | ||
123 | ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); | ||
124 | if (ret) { | ||
125 | mutex_unlock(&mm->mutex); | ||
126 | nv50_vram_del(dev, &mem); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | list_add_tail(&r->rl_entry, &mem->regions); | ||
131 | size -= r->length; | ||
132 | } while (size); | ||
133 | mutex_unlock(&mm->mutex); | ||
134 | |||
135 | r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); | ||
136 | mem->offset = (u64)r->offset << 12; | ||
137 | *pmem = mem; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static u32 | ||
142 | nv50_vram_rblock(struct drm_device *dev) | ||
143 | { | ||
144 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
145 | int i, parts, colbits, rowbitsa, rowbitsb, banks; | ||
146 | u64 rowsize, predicted; | ||
147 | u32 r0, r4, rt, ru, rblock_size; | ||
148 | |||
149 | r0 = nv_rd32(dev, 0x100200); | ||
150 | r4 = nv_rd32(dev, 0x100204); | ||
151 | rt = nv_rd32(dev, 0x100250); | ||
152 | ru = nv_rd32(dev, 0x001540); | ||
153 | NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); | ||
154 | |||
155 | for (i = 0, parts = 0; i < 8; i++) { | ||
156 | if (ru & (0x00010000 << i)) | ||
157 | parts++; | ||
158 | } | ||
159 | |||
160 | colbits = (r4 & 0x0000f000) >> 12; | ||
161 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | ||
162 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | ||
163 | banks = ((r4 & 0x01000000) ? 8 : 4); | ||
164 | |||
165 | rowsize = parts * banks * (1 << colbits) * 8; | ||
166 | predicted = rowsize << rowbitsa; | ||
167 | if (r0 & 0x00000004) | ||
168 | predicted += rowsize << rowbitsb; | ||
169 | |||
170 | if (predicted != dev_priv->vram_size) { | ||
171 | NV_WARN(dev, "memory controller reports %dMiB VRAM\n", | ||
172 | (u32)(dev_priv->vram_size >> 20)); | ||
173 | NV_WARN(dev, "we calculated %dMiB VRAM\n", | ||
174 | (u32)(predicted >> 20)); | ||
175 | } | ||
176 | |||
177 | rblock_size = rowsize; | ||
178 | if (rt & 1) | ||
179 | rblock_size *= 3; | ||
180 | |||
181 | NV_DEBUG(dev, "rblock %d bytes\n", rblock_size); | ||
182 | return rblock_size; | ||
183 | } | ||
184 | |||
185 | int | ||
186 | nv50_vram_init(struct drm_device *dev) | ||
187 | { | ||
188 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
189 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
190 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | ||
191 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | ||
192 | u32 rblock, length; | ||
193 | |||
194 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); | ||
195 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | ||
196 | dev_priv->vram_size &= 0xffffffff00ULL; | ||
197 | |||
198 | /* IGPs, no funky reordering happens here, they don't have VRAM */ | ||
199 | if (dev_priv->chipset == 0xaa || | ||
200 | dev_priv->chipset == 0xac || | ||
201 | dev_priv->chipset == 0xaf) { | ||
202 | dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; | ||
203 | rblock = 4096 >> 12; | ||
204 | } else { | ||
205 | rblock = nv50_vram_rblock(dev) >> 12; | ||
206 | } | ||
207 | |||
208 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; | ||
209 | |||
210 | return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock); | ||
211 | } | ||
212 | |||
213 | void | ||
214 | nv50_vram_fini(struct drm_device *dev) | ||
215 | { | ||
216 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
217 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
218 | |||
219 | nouveau_mm_fini(&vram->mm); | ||
220 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c new file mode 100644 index 00000000000..edece9c616e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_util.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | #include "nouveau_ramht.h" | ||
30 | |||
31 | struct nv84_crypt_engine { | ||
32 | struct nouveau_exec_engine base; | ||
33 | }; | ||
34 | |||
35 | static int | ||
36 | nv84_crypt_context_new(struct nouveau_channel *chan, int engine) | ||
37 | { | ||
38 | struct drm_device *dev = chan->dev; | ||
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
40 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
41 | struct nouveau_gpuobj *ctx; | ||
42 | int ret; | ||
43 | |||
44 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
45 | |||
46 | ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | | ||
47 | NVOBJ_FLAG_ZERO_FREE, &ctx); | ||
48 | if (ret) | ||
49 | return ret; | ||
50 | |||
51 | nv_wo32(ramin, 0xa0, 0x00190000); | ||
52 | nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1); | ||
53 | nv_wo32(ramin, 0xa8, ctx->vinst); | ||
54 | nv_wo32(ramin, 0xac, 0); | ||
55 | nv_wo32(ramin, 0xb0, 0); | ||
56 | nv_wo32(ramin, 0xb4, 0); | ||
57 | dev_priv->engine.instmem.flush(dev); | ||
58 | |||
59 | atomic_inc(&chan->vm->engref[engine]); | ||
60 | chan->engctx[engine] = ctx; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void | ||
65 | nv84_crypt_context_del(struct nouveau_channel *chan, int engine) | ||
66 | { | ||
67 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
68 | struct drm_device *dev = chan->dev; | ||
69 | u32 inst; | ||
70 | |||
71 | inst = (chan->ramin->vinst >> 12); | ||
72 | inst |= 0x80000000; | ||
73 | |||
74 | /* mark context as invalid if still on the hardware, not | ||
75 | * doing this causes issues the next time PCRYPT is used, | ||
76 | * unsurprisingly :) | ||
77 | */ | ||
78 | nv_wr32(dev, 0x10200c, 0x00000000); | ||
79 | if (nv_rd32(dev, 0x102188) == inst) | ||
80 | nv_mask(dev, 0x102188, 0x80000000, 0x00000000); | ||
81 | if (nv_rd32(dev, 0x10218c) == inst) | ||
82 | nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); | ||
83 | nv_wr32(dev, 0x10200c, 0x00000010); | ||
84 | |||
85 | nouveau_gpuobj_ref(NULL, &ctx); | ||
86 | |||
87 | atomic_dec(&chan->vm->engref[engine]); | ||
88 | chan->engctx[engine] = NULL; | ||
89 | } | ||
90 | |||
91 | static int | ||
92 | nv84_crypt_object_new(struct nouveau_channel *chan, int engine, | ||
93 | u32 handle, u16 class) | ||
94 | { | ||
95 | struct drm_device *dev = chan->dev; | ||
96 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
97 | struct nouveau_gpuobj *obj = NULL; | ||
98 | int ret; | ||
99 | |||
100 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
101 | if (ret) | ||
102 | return ret; | ||
103 | obj->engine = 5; | ||
104 | obj->class = class; | ||
105 | |||
106 | nv_wo32(obj, 0x00, class); | ||
107 | dev_priv->engine.instmem.flush(dev); | ||
108 | |||
109 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
110 | nouveau_gpuobj_ref(NULL, &obj); | ||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static void | ||
115 | nv84_crypt_tlb_flush(struct drm_device *dev, int engine) | ||
116 | { | ||
117 | nv50_vm_flush_engine(dev, 0x0a); | ||
118 | } | ||
119 | |||
120 | static void | ||
121 | nv84_crypt_isr(struct drm_device *dev) | ||
122 | { | ||
123 | u32 stat = nv_rd32(dev, 0x102130); | ||
124 | u32 mthd = nv_rd32(dev, 0x102190); | ||
125 | u32 data = nv_rd32(dev, 0x102194); | ||
126 | u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; | ||
127 | int show = nouveau_ratelimit(); | ||
128 | |||
129 | if (show) { | ||
130 | NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
131 | stat, mthd, data, inst); | ||
132 | } | ||
133 | |||
134 | nv_wr32(dev, 0x102130, stat); | ||
135 | nv_wr32(dev, 0x10200c, 0x10); | ||
136 | |||
137 | nv50_fb_vm_trap(dev, show); | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend) | ||
142 | { | ||
143 | nv_wr32(dev, 0x102140, 0x00000000); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int | ||
148 | nv84_crypt_init(struct drm_device *dev, int engine) | ||
149 | { | ||
150 | nv_mask(dev, 0x000200, 0x00004000, 0x00000000); | ||
151 | nv_mask(dev, 0x000200, 0x00004000, 0x00004000); | ||
152 | |||
153 | nv_wr32(dev, 0x102130, 0xffffffff); | ||
154 | nv_wr32(dev, 0x102140, 0xffffffbf); | ||
155 | |||
156 | nv_wr32(dev, 0x10200c, 0x00000010); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | nv84_crypt_destroy(struct drm_device *dev, int engine) | ||
162 | { | ||
163 | struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine); | ||
164 | |||
165 | NVOBJ_ENGINE_DEL(dev, CRYPT); | ||
166 | |||
167 | nouveau_irq_unregister(dev, 14); | ||
168 | kfree(pcrypt); | ||
169 | } | ||
170 | |||
171 | int | ||
172 | nv84_crypt_create(struct drm_device *dev) | ||
173 | { | ||
174 | struct nv84_crypt_engine *pcrypt; | ||
175 | |||
176 | pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL); | ||
177 | if (!pcrypt) | ||
178 | return -ENOMEM; | ||
179 | |||
180 | pcrypt->base.destroy = nv84_crypt_destroy; | ||
181 | pcrypt->base.init = nv84_crypt_init; | ||
182 | pcrypt->base.fini = nv84_crypt_fini; | ||
183 | pcrypt->base.context_new = nv84_crypt_context_new; | ||
184 | pcrypt->base.context_del = nv84_crypt_context_del; | ||
185 | pcrypt->base.object_new = nv84_crypt_object_new; | ||
186 | pcrypt->base.tlb_flush = nv84_crypt_tlb_flush; | ||
187 | |||
188 | nouveau_irq_register(dev, 14, nv84_crypt_isr); | ||
189 | |||
190 | NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base); | ||
191 | NVOBJ_CLASS (dev, 0x74c1, CRYPT); | ||
192 | return 0; | ||
193 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c new file mode 100644 index 00000000000..8f356d58e40 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_copy.c | |||
@@ -0,0 +1,226 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include "drmP.h" | ||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_util.h" | ||
29 | #include "nouveau_vm.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nva3_copy.fuc.h" | ||
32 | |||
33 | struct nva3_copy_engine { | ||
34 | struct nouveau_exec_engine base; | ||
35 | }; | ||
36 | |||
37 | static int | ||
38 | nva3_copy_context_new(struct nouveau_channel *chan, int engine) | ||
39 | { | ||
40 | struct drm_device *dev = chan->dev; | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
43 | struct nouveau_gpuobj *ctx = NULL; | ||
44 | int ret; | ||
45 | |||
46 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
47 | |||
48 | ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | | ||
49 | NVOBJ_FLAG_ZERO_FREE, &ctx); | ||
50 | if (ret) | ||
51 | return ret; | ||
52 | |||
53 | nv_wo32(ramin, 0xc0, 0x00190000); | ||
54 | nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1); | ||
55 | nv_wo32(ramin, 0xc8, ctx->vinst); | ||
56 | nv_wo32(ramin, 0xcc, 0x00000000); | ||
57 | nv_wo32(ramin, 0xd0, 0x00000000); | ||
58 | nv_wo32(ramin, 0xd4, 0x00000000); | ||
59 | dev_priv->engine.instmem.flush(dev); | ||
60 | |||
61 | atomic_inc(&chan->vm->engref[engine]); | ||
62 | chan->engctx[engine] = ctx; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int | ||
67 | nva3_copy_object_new(struct nouveau_channel *chan, int engine, | ||
68 | u32 handle, u16 class) | ||
69 | { | ||
70 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
71 | |||
72 | /* fuc engine doesn't need an object, our ramht code does.. */ | ||
73 | ctx->engine = 3; | ||
74 | ctx->class = class; | ||
75 | return nouveau_ramht_insert(chan, handle, ctx); | ||
76 | } | ||
77 | |||
78 | static void | ||
79 | nva3_copy_context_del(struct nouveau_channel *chan, int engine) | ||
80 | { | ||
81 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
82 | struct drm_device *dev = chan->dev; | ||
83 | u32 inst; | ||
84 | |||
85 | inst = (chan->ramin->vinst >> 12); | ||
86 | inst |= 0x40000000; | ||
87 | |||
88 | /* disable fifo access */ | ||
89 | nv_wr32(dev, 0x104048, 0x00000000); | ||
90 | /* mark channel as unloaded if it's currently active */ | ||
91 | if (nv_rd32(dev, 0x104050) == inst) | ||
92 | nv_mask(dev, 0x104050, 0x40000000, 0x00000000); | ||
93 | /* mark next channel as invalid if it's about to be loaded */ | ||
94 | if (nv_rd32(dev, 0x104054) == inst) | ||
95 | nv_mask(dev, 0x104054, 0x40000000, 0x00000000); | ||
96 | /* restore fifo access */ | ||
97 | nv_wr32(dev, 0x104048, 0x00000003); | ||
98 | |||
99 | for (inst = 0xc0; inst <= 0xd4; inst += 4) | ||
100 | nv_wo32(chan->ramin, inst, 0x00000000); | ||
101 | |||
102 | nouveau_gpuobj_ref(NULL, &ctx); | ||
103 | |||
104 | atomic_dec(&chan->vm->engref[engine]); | ||
105 | chan->engctx[engine] = ctx; | ||
106 | } | ||
107 | |||
108 | static void | ||
109 | nva3_copy_tlb_flush(struct drm_device *dev, int engine) | ||
110 | { | ||
111 | nv50_vm_flush_engine(dev, 0x0d); | ||
112 | } | ||
113 | |||
114 | static int | ||
115 | nva3_copy_init(struct drm_device *dev, int engine) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | nv_mask(dev, 0x000200, 0x00002000, 0x00000000); | ||
120 | nv_mask(dev, 0x000200, 0x00002000, 0x00002000); | ||
121 | nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */ | ||
122 | |||
123 | /* upload ucode */ | ||
124 | nv_wr32(dev, 0x1041c0, 0x01000000); | ||
125 | for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++) | ||
126 | nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]); | ||
127 | |||
128 | nv_wr32(dev, 0x104180, 0x01000000); | ||
129 | for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { | ||
130 | if ((i & 0x3f) == 0) | ||
131 | nv_wr32(dev, 0x104188, i >> 6); | ||
132 | nv_wr32(dev, 0x104184, nva3_pcopy_code[i]); | ||
133 | } | ||
134 | |||
135 | /* start it running */ | ||
136 | nv_wr32(dev, 0x10410c, 0x00000000); | ||
137 | nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */ | ||
138 | nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */ | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static int | ||
143 | nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) | ||
144 | { | ||
145 | nv_mask(dev, 0x104048, 0x00000003, 0x00000000); | ||
146 | |||
147 | /* trigger fuc context unload */ | ||
148 | nv_wait(dev, 0x104008, 0x0000000c, 0x00000000); | ||
149 | nv_mask(dev, 0x104054, 0x40000000, 0x00000000); | ||
150 | nv_wr32(dev, 0x104000, 0x00000008); | ||
151 | nv_wait(dev, 0x104008, 0x00000008, 0x00000000); | ||
152 | |||
153 | nv_wr32(dev, 0x104014, 0xffffffff); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static struct nouveau_enum nva3_copy_isr_error_name[] = { | ||
158 | { 0x0001, "ILLEGAL_MTHD" }, | ||
159 | { 0x0002, "INVALID_ENUM" }, | ||
160 | { 0x0003, "INVALID_BITFIELD" }, | ||
161 | {} | ||
162 | }; | ||
163 | |||
164 | static void | ||
165 | nva3_copy_isr(struct drm_device *dev) | ||
166 | { | ||
167 | u32 dispatch = nv_rd32(dev, 0x10401c); | ||
168 | u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16); | ||
169 | u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff; | ||
170 | u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff; | ||
171 | u32 addr = nv_rd32(dev, 0x104040) >> 16; | ||
172 | u32 mthd = (addr & 0x07ff) << 2; | ||
173 | u32 subc = (addr & 0x3800) >> 11; | ||
174 | u32 data = nv_rd32(dev, 0x104044); | ||
175 | int chid = nv50_graph_isr_chid(dev, inst); | ||
176 | |||
177 | if (stat & 0x00000040) { | ||
178 | NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); | ||
179 | nouveau_enum_print(nva3_copy_isr_error_name, ssta); | ||
180 | printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n", | ||
181 | chid, inst, subc, mthd, data); | ||
182 | nv_wr32(dev, 0x104004, 0x00000040); | ||
183 | stat &= ~0x00000040; | ||
184 | } | ||
185 | |||
186 | if (stat) { | ||
187 | NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); | ||
188 | nv_wr32(dev, 0x104004, stat); | ||
189 | } | ||
190 | nv50_fb_vm_trap(dev, 1); | ||
191 | } | ||
192 | |||
193 | static void | ||
194 | nva3_copy_destroy(struct drm_device *dev, int engine) | ||
195 | { | ||
196 | struct nva3_copy_engine *pcopy = nv_engine(dev, engine); | ||
197 | |||
198 | nouveau_irq_unregister(dev, 22); | ||
199 | |||
200 | NVOBJ_ENGINE_DEL(dev, COPY0); | ||
201 | kfree(pcopy); | ||
202 | } | ||
203 | |||
204 | int | ||
205 | nva3_copy_create(struct drm_device *dev) | ||
206 | { | ||
207 | struct nva3_copy_engine *pcopy; | ||
208 | |||
209 | pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); | ||
210 | if (!pcopy) | ||
211 | return -ENOMEM; | ||
212 | |||
213 | pcopy->base.destroy = nva3_copy_destroy; | ||
214 | pcopy->base.init = nva3_copy_init; | ||
215 | pcopy->base.fini = nva3_copy_fini; | ||
216 | pcopy->base.context_new = nva3_copy_context_new; | ||
217 | pcopy->base.context_del = nva3_copy_context_del; | ||
218 | pcopy->base.object_new = nva3_copy_object_new; | ||
219 | pcopy->base.tlb_flush = nva3_copy_tlb_flush; | ||
220 | |||
221 | nouveau_irq_register(dev, 22, nva3_copy_isr); | ||
222 | |||
223 | NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); | ||
224 | NVOBJ_CLASS(dev, 0x85b5, COPY0); | ||
225 | return 0; | ||
226 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc new file mode 100644 index 00000000000..eaf35f8321e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc | |||
@@ -0,0 +1,870 @@ | |||
1 | /* fuc microcode for copy engine on nva3- chipsets | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | /* To build for nva3:nvc0 | ||
27 | * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h | ||
28 | * | ||
29 | * To build for nvc0- | ||
30 | * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h | ||
31 | */ | ||
32 | |||
33 | ifdef(`NVA3', | ||
34 | .section nva3_pcopy_data, | ||
35 | .section nvc0_pcopy_data | ||
36 | ) | ||
37 | |||
38 | ctx_object: .b32 0 | ||
39 | ifdef(`NVA3', | ||
40 | ctx_dma: | ||
41 | ctx_dma_query: .b32 0 | ||
42 | ctx_dma_src: .b32 0 | ||
43 | ctx_dma_dst: .b32 0 | ||
44 | ,) | ||
45 | .equ ctx_dma_count 3 | ||
46 | ctx_query_address_high: .b32 0 | ||
47 | ctx_query_address_low: .b32 0 | ||
48 | ctx_query_counter: .b32 0 | ||
49 | ctx_src_address_high: .b32 0 | ||
50 | ctx_src_address_low: .b32 0 | ||
51 | ctx_src_pitch: .b32 0 | ||
52 | ctx_src_tile_mode: .b32 0 | ||
53 | ctx_src_xsize: .b32 0 | ||
54 | ctx_src_ysize: .b32 0 | ||
55 | ctx_src_zsize: .b32 0 | ||
56 | ctx_src_zoff: .b32 0 | ||
57 | ctx_src_xoff: .b32 0 | ||
58 | ctx_src_yoff: .b32 0 | ||
59 | ctx_src_cpp: .b32 0 | ||
60 | ctx_dst_address_high: .b32 0 | ||
61 | ctx_dst_address_low: .b32 0 | ||
62 | ctx_dst_pitch: .b32 0 | ||
63 | ctx_dst_tile_mode: .b32 0 | ||
64 | ctx_dst_xsize: .b32 0 | ||
65 | ctx_dst_ysize: .b32 0 | ||
66 | ctx_dst_zsize: .b32 0 | ||
67 | ctx_dst_zoff: .b32 0 | ||
68 | ctx_dst_xoff: .b32 0 | ||
69 | ctx_dst_yoff: .b32 0 | ||
70 | ctx_dst_cpp: .b32 0 | ||
71 | ctx_format: .b32 0 | ||
72 | ctx_swz_const0: .b32 0 | ||
73 | ctx_swz_const1: .b32 0 | ||
74 | ctx_xcnt: .b32 0 | ||
75 | ctx_ycnt: .b32 0 | ||
76 | .align 256 | ||
77 | |||
78 | dispatch_table: | ||
79 | // mthd 0x0000, NAME | ||
80 | .b16 0x000 1 | ||
81 | .b32 ctx_object ~0xffffffff | ||
82 | // mthd 0x0100, NOP | ||
83 | .b16 0x040 1 | ||
84 | .b32 0x00010000 + cmd_nop ~0xffffffff | ||
85 | // mthd 0x0140, PM_TRIGGER | ||
86 | .b16 0x050 1 | ||
87 | .b32 0x00010000 + cmd_pm_trigger ~0xffffffff | ||
88 | ifdef(`NVA3', ` | ||
89 | // mthd 0x0180-0x018c, DMA_ | ||
90 | .b16 0x060 ctx_dma_count | ||
91 | dispatch_dma: | ||
92 | .b32 0x00010000 + cmd_dma ~0xffffffff | ||
93 | .b32 0x00010000 + cmd_dma ~0xffffffff | ||
94 | .b32 0x00010000 + cmd_dma ~0xffffffff | ||
95 | ',) | ||
96 | // mthd 0x0200-0x0218, SRC_TILE | ||
97 | .b16 0x80 7 | ||
98 | .b32 ctx_src_tile_mode ~0x00000fff | ||
99 | .b32 ctx_src_xsize ~0x0007ffff | ||
100 | .b32 ctx_src_ysize ~0x00001fff | ||
101 | .b32 ctx_src_zsize ~0x000007ff | ||
102 | .b32 ctx_src_zoff ~0x00000fff | ||
103 | .b32 ctx_src_xoff ~0x0007ffff | ||
104 | .b32 ctx_src_yoff ~0x00001fff | ||
105 | // mthd 0x0220-0x0238, DST_TILE | ||
106 | .b16 0x88 7 | ||
107 | .b32 ctx_dst_tile_mode ~0x00000fff | ||
108 | .b32 ctx_dst_xsize ~0x0007ffff | ||
109 | .b32 ctx_dst_ysize ~0x00001fff | ||
110 | .b32 ctx_dst_zsize ~0x000007ff | ||
111 | .b32 ctx_dst_zoff ~0x00000fff | ||
112 | .b32 ctx_dst_xoff ~0x0007ffff | ||
113 | .b32 ctx_dst_yoff ~0x00001fff | ||
114 | // mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH | ||
115 | .b16 0xc0 2 | ||
116 | .b32 0x00010000 + cmd_exec ~0xffffffff | ||
117 | .b32 0x00010000 + cmd_wrcache_flush ~0xffffffff | ||
118 | // mthd 0x030c-0x0340, various stuff | ||
119 | .b16 0xc3 14 | ||
120 | .b32 ctx_src_address_high ~0x000000ff | ||
121 | .b32 ctx_src_address_low ~0xfffffff0 | ||
122 | .b32 ctx_dst_address_high ~0x000000ff | ||
123 | .b32 ctx_dst_address_low ~0xfffffff0 | ||
124 | .b32 ctx_src_pitch ~0x0007ffff | ||
125 | .b32 ctx_dst_pitch ~0x0007ffff | ||
126 | .b32 ctx_xcnt ~0x0000ffff | ||
127 | .b32 ctx_ycnt ~0x00001fff | ||
128 | .b32 ctx_format ~0x0333ffff | ||
129 | .b32 ctx_swz_const0 ~0xffffffff | ||
130 | .b32 ctx_swz_const1 ~0xffffffff | ||
131 | .b32 ctx_query_address_high ~0x000000ff | ||
132 | .b32 ctx_query_address_low ~0xffffffff | ||
133 | .b32 ctx_query_counter ~0xffffffff | ||
134 | .b16 0x800 0 | ||
135 | |||
136 | ifdef(`NVA3', | ||
137 | .section nva3_pcopy_code, | ||
138 | .section nvc0_pcopy_code | ||
139 | ) | ||
140 | |||
141 | main: | ||
142 | clear b32 $r0 | ||
143 | mov $sp $r0 | ||
144 | |||
145 | // setup i0 handler and route fifo and ctxswitch to it | ||
146 | mov $r1 ih | ||
147 | mov $iv0 $r1 | ||
148 | mov $r1 0x400 | ||
149 | movw $r2 0xfff3 | ||
150 | sethi $r2 0 | ||
151 | iowr I[$r2 + 0x300] $r2 | ||
152 | |||
153 | // enable interrupts | ||
154 | or $r2 0xc | ||
155 | iowr I[$r1] $r2 | ||
156 | bset $flags ie0 | ||
157 | |||
158 | // enable fifo access and context switching | ||
159 | mov $r1 0x1200 | ||
160 | mov $r2 3 | ||
161 | iowr I[$r1] $r2 | ||
162 | |||
163 | // sleep forever, waking for interrupts | ||
164 | bset $flags $p0 | ||
165 | spin: | ||
166 | sleep $p0 | ||
167 | bra spin | ||
168 | |||
169 | // i0 handler | ||
170 | ih: | ||
171 | iord $r1 I[$r0 + 0x200] | ||
172 | |||
173 | and $r2 $r1 0x00000008 | ||
174 | bra e ih_no_chsw | ||
175 | call chsw | ||
176 | ih_no_chsw: | ||
177 | and $r2 $r1 0x00000004 | ||
178 | bra e ih_no_cmd | ||
179 | call dispatch | ||
180 | |||
181 | ih_no_cmd: | ||
182 | and $r1 $r1 0x0000000c | ||
183 | iowr I[$r0 + 0x100] $r1 | ||
184 | iret | ||
185 | |||
186 | // $p1 direction (0 = unload, 1 = load) | ||
187 | // $r3 channel | ||
188 | swctx: | ||
189 | mov $r4 0x7700 | ||
190 | mov $xtargets $r4 | ||
191 | ifdef(`NVA3', ` | ||
192 | // target 7 hardcoded to ctx dma object | ||
193 | mov $xdbase $r0 | ||
194 | ', ` // NVC0 | ||
195 | // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1 | ||
196 | mov $r4 0x2100 | ||
197 | iord $r4 I[$r4 + 0] | ||
198 | and $r4 1 | ||
199 | shl b32 $r4 4 | ||
200 | add b32 $r4 0x30 | ||
201 | |||
202 | // channel is in vram | ||
203 | mov $r15 0x61c | ||
204 | shl b32 $r15 6 | ||
205 | mov $r5 0x114 | ||
206 | iowrs I[$r15] $r5 | ||
207 | |||
208 | // read 16-byte PCOPYn info, containing context pointer, from channel | ||
209 | shl b32 $r5 $r3 4 | ||
210 | add b32 $r5 2 | ||
211 | mov $xdbase $r5 | ||
212 | mov $r5 $sp | ||
213 | // get a chunk of stack space, aligned to 256 byte boundary | ||
214 | sub b32 $r5 0x100 | ||
215 | mov $r6 0xff | ||
216 | not b32 $r6 | ||
217 | and $r5 $r6 | ||
218 | sethi $r5 0x00020000 | ||
219 | xdld $r4 $r5 | ||
220 | xdwait | ||
221 | sethi $r5 0 | ||
222 | |||
223 | // set context pointer, from within channel VM | ||
224 | mov $r14 0 | ||
225 | iowrs I[$r15] $r14 | ||
226 | ld b32 $r4 D[$r5 + 0] | ||
227 | shr b32 $r4 8 | ||
228 | ld b32 $r6 D[$r5 + 4] | ||
229 | shl b32 $r6 24 | ||
230 | or $r4 $r6 | ||
231 | mov $xdbase $r4 | ||
232 | ') | ||
233 | // 256-byte context, at start of data segment | ||
234 | mov b32 $r4 $r0 | ||
235 | sethi $r4 0x60000 | ||
236 | |||
237 | // swap! | ||
238 | bra $p1 swctx_load | ||
239 | xdst $r0 $r4 | ||
240 | bra swctx_done | ||
241 | swctx_load: | ||
242 | xdld $r0 $r4 | ||
243 | swctx_done: | ||
244 | xdwait | ||
245 | ret | ||
246 | |||
247 | chsw: | ||
248 | // read current channel | ||
249 | mov $r2 0x1400 | ||
250 | iord $r3 I[$r2] | ||
251 | |||
252 | // if it's active, unload it and return | ||
253 | xbit $r15 $r3 0x1e | ||
254 | bra e chsw_no_unload | ||
255 | bclr $flags $p1 | ||
256 | call swctx | ||
257 | bclr $r3 0x1e | ||
258 | iowr I[$r2] $r3 | ||
259 | mov $r4 1 | ||
260 | iowr I[$r2 + 0x200] $r4 | ||
261 | ret | ||
262 | |||
263 | // read next channel | ||
264 | chsw_no_unload: | ||
265 | iord $r3 I[$r2 + 0x100] | ||
266 | |||
267 | // is there a channel waiting to be loaded? | ||
268 | xbit $r13 $r3 0x1e | ||
269 | bra e chsw_finish_load | ||
270 | bset $flags $p1 | ||
271 | call swctx | ||
272 | ifdef(`NVA3', | ||
273 | // load dma objects back into TARGET regs | ||
274 | mov $r5 ctx_dma | ||
275 | mov $r6 ctx_dma_count | ||
276 | chsw_load_ctx_dma: | ||
277 | ld b32 $r7 D[$r5 + $r6 * 4] | ||
278 | add b32 $r8 $r6 0x180 | ||
279 | shl b32 $r8 8 | ||
280 | iowr I[$r8] $r7 | ||
281 | sub b32 $r6 1 | ||
282 | bra nc chsw_load_ctx_dma | ||
283 | ,) | ||
284 | |||
285 | chsw_finish_load: | ||
286 | mov $r3 2 | ||
287 | iowr I[$r2 + 0x200] $r3 | ||
288 | ret | ||
289 | |||
290 | dispatch: | ||
291 | // read incoming fifo command | ||
292 | mov $r3 0x1900 | ||
293 | iord $r2 I[$r3 + 0x100] | ||
294 | iord $r3 I[$r3 + 0x000] | ||
295 | and $r4 $r2 0x7ff | ||
296 | // $r2 will be used to store exception data | ||
297 | shl b32 $r2 0x10 | ||
298 | |||
299 | // lookup method in the dispatch table, ILLEGAL_MTHD if not found | ||
300 | mov $r5 dispatch_table | ||
301 | clear b32 $r6 | ||
302 | clear b32 $r7 | ||
303 | dispatch_loop: | ||
304 | ld b16 $r6 D[$r5 + 0] | ||
305 | ld b16 $r7 D[$r5 + 2] | ||
306 | add b32 $r5 4 | ||
307 | cmpu b32 $r4 $r6 | ||
308 | bra c dispatch_illegal_mthd | ||
309 | add b32 $r7 $r6 | ||
310 | cmpu b32 $r4 $r7 | ||
311 | bra c dispatch_valid_mthd | ||
312 | sub b32 $r7 $r6 | ||
313 | shl b32 $r7 3 | ||
314 | add b32 $r5 $r7 | ||
315 | bra dispatch_loop | ||
316 | |||
317 | // ensure no bits set in reserved fields, INVALID_BITFIELD | ||
318 | dispatch_valid_mthd: | ||
319 | sub b32 $r4 $r6 | ||
320 | shl b32 $r4 3 | ||
321 | add b32 $r4 $r5 | ||
322 | ld b32 $r5 D[$r4 + 4] | ||
323 | and $r5 $r3 | ||
324 | cmpu b32 $r5 0 | ||
325 | bra ne dispatch_invalid_bitfield | ||
326 | |||
327 | // depending on dispatch flags: execute method, or save data as state | ||
328 | ld b16 $r5 D[$r4 + 0] | ||
329 | ld b16 $r6 D[$r4 + 2] | ||
330 | cmpu b32 $r6 0 | ||
331 | bra ne dispatch_cmd | ||
332 | st b32 D[$r5] $r3 | ||
333 | bra dispatch_done | ||
334 | dispatch_cmd: | ||
335 | bclr $flags $p1 | ||
336 | call $r5 | ||
337 | bra $p1 dispatch_error | ||
338 | bra dispatch_done | ||
339 | |||
340 | dispatch_invalid_bitfield: | ||
341 | or $r2 2 | ||
342 | dispatch_illegal_mthd: | ||
343 | or $r2 1 | ||
344 | |||
345 | // store exception data in SCRATCH0/SCRATCH1, signal hostirq | ||
346 | dispatch_error: | ||
347 | mov $r4 0x1000 | ||
348 | iowr I[$r4 + 0x000] $r2 | ||
349 | iowr I[$r4 + 0x100] $r3 | ||
350 | mov $r2 0x40 | ||
351 | iowr I[$r0] $r2 | ||
352 | hostirq_wait: | ||
353 | iord $r2 I[$r0 + 0x200] | ||
354 | and $r2 0x40 | ||
355 | cmpu b32 $r2 0 | ||
356 | bra ne hostirq_wait | ||
357 | |||
358 | dispatch_done: | ||
359 | mov $r2 0x1d00 | ||
360 | mov $r3 1 | ||
361 | iowr I[$r2] $r3 | ||
362 | ret | ||
363 | |||
364 | // No-operation | ||
365 | // | ||
366 | // Inputs: | ||
367 | // $r1: irqh state | ||
368 | // $r2: hostirq state | ||
369 | // $r3: data | ||
370 | // $r4: dispatch table entry | ||
371 | // Outputs: | ||
372 | // $r1: irqh state | ||
373 | // $p1: set on error | ||
374 | // $r2: hostirq state | ||
375 | // $r3: data | ||
376 | cmd_nop: | ||
377 | ret | ||
378 | |||
379 | // PM_TRIGGER | ||
380 | // | ||
381 | // Inputs: | ||
382 | // $r1: irqh state | ||
383 | // $r2: hostirq state | ||
384 | // $r3: data | ||
385 | // $r4: dispatch table entry | ||
386 | // Outputs: | ||
387 | // $r1: irqh state | ||
388 | // $p1: set on error | ||
389 | // $r2: hostirq state | ||
390 | // $r3: data | ||
391 | cmd_pm_trigger: | ||
392 | mov $r2 0x2200 | ||
393 | clear b32 $r3 | ||
394 | sethi $r3 0x20000 | ||
395 | iowr I[$r2] $r3 | ||
396 | ret | ||
397 | |||
398 | ifdef(`NVA3', | ||
399 | // SET_DMA_* method handler | ||
400 | // | ||
401 | // Inputs: | ||
402 | // $r1: irqh state | ||
403 | // $r2: hostirq state | ||
404 | // $r3: data | ||
405 | // $r4: dispatch table entry | ||
406 | // Outputs: | ||
407 | // $r1: irqh state | ||
408 | // $p1: set on error | ||
409 | // $r2: hostirq state | ||
410 | // $r3: data | ||
411 | cmd_dma: | ||
412 | sub b32 $r4 dispatch_dma | ||
413 | shr b32 $r4 1 | ||
414 | bset $r3 0x1e | ||
415 | st b32 D[$r4 + ctx_dma] $r3 | ||
416 | add b32 $r4 0x600 | ||
417 | shl b32 $r4 6 | ||
418 | iowr I[$r4] $r3 | ||
419 | ret | ||
420 | ,) | ||
421 | |||
422 | // Calculates the hw swizzle mask and adjusts the surface's xcnt to match | ||
423 | // | ||
424 | cmd_exec_set_format: | ||
425 | // zero out a chunk of the stack to store the swizzle into | ||
426 | add $sp -0x10 | ||
427 | st b32 D[$sp + 0x00] $r0 | ||
428 | st b32 D[$sp + 0x04] $r0 | ||
429 | st b32 D[$sp + 0x08] $r0 | ||
430 | st b32 D[$sp + 0x0c] $r0 | ||
431 | |||
432 | // extract cpp, src_ncomp and dst_ncomp from FORMAT | ||
433 | ld b32 $r4 D[$r0 + ctx_format] | ||
434 | extr $r5 $r4 16:17 | ||
435 | add b32 $r5 1 | ||
436 | extr $r6 $r4 20:21 | ||
437 | add b32 $r6 1 | ||
438 | extr $r7 $r4 24:25 | ||
439 | add b32 $r7 1 | ||
440 | |||
441 | // convert FORMAT swizzle mask to hw swizzle mask | ||
442 | bclr $flags $p2 | ||
443 | clear b32 $r8 | ||
444 | clear b32 $r9 | ||
445 | ncomp_loop: | ||
446 | and $r10 $r4 0xf | ||
447 | shr b32 $r4 4 | ||
448 | clear b32 $r11 | ||
449 | bpc_loop: | ||
450 | cmpu b8 $r10 4 | ||
451 | bra nc cmp_c0 | ||
452 | mulu $r12 $r10 $r5 | ||
453 | add b32 $r12 $r11 | ||
454 | bset $flags $p2 | ||
455 | bra bpc_next | ||
456 | cmp_c0: | ||
457 | bra ne cmp_c1 | ||
458 | mov $r12 0x10 | ||
459 | add b32 $r12 $r11 | ||
460 | bra bpc_next | ||
461 | cmp_c1: | ||
462 | cmpu b8 $r10 6 | ||
463 | bra nc cmp_zero | ||
464 | mov $r12 0x14 | ||
465 | add b32 $r12 $r11 | ||
466 | bra bpc_next | ||
467 | cmp_zero: | ||
468 | mov $r12 0x80 | ||
469 | bpc_next: | ||
470 | st b8 D[$sp + $r8] $r12 | ||
471 | add b32 $r8 1 | ||
472 | add b32 $r11 1 | ||
473 | cmpu b32 $r11 $r5 | ||
474 | bra c bpc_loop | ||
475 | add b32 $r9 1 | ||
476 | cmpu b32 $r9 $r7 | ||
477 | bra c ncomp_loop | ||
478 | |||
479 | // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang) | ||
480 | mulu $r6 $r5 | ||
481 | st b32 D[$r0 + ctx_src_cpp] $r6 | ||
482 | ld b32 $r8 D[$r0 + ctx_xcnt] | ||
483 | mulu $r6 $r8 | ||
484 | bra $p2 dst_xcnt | ||
485 | clear b32 $r6 | ||
486 | |||
487 | dst_xcnt: | ||
488 | mulu $r7 $r5 | ||
489 | st b32 D[$r0 + ctx_dst_cpp] $r7 | ||
490 | mulu $r7 $r8 | ||
491 | |||
492 | mov $r5 0x810 | ||
493 | shl b32 $r5 6 | ||
494 | iowr I[$r5 + 0x000] $r6 | ||
495 | iowr I[$r5 + 0x100] $r7 | ||
496 | add b32 $r5 0x800 | ||
497 | ld b32 $r6 D[$r0 + ctx_dst_cpp] | ||
498 | sub b32 $r6 1 | ||
499 | shl b32 $r6 8 | ||
500 | ld b32 $r7 D[$r0 + ctx_src_cpp] | ||
501 | sub b32 $r7 1 | ||
502 | or $r6 $r7 | ||
503 | iowr I[$r5 + 0x000] $r6 | ||
504 | add b32 $r5 0x100 | ||
505 | ld b32 $r6 D[$sp + 0x00] | ||
506 | iowr I[$r5 + 0x000] $r6 | ||
507 | ld b32 $r6 D[$sp + 0x04] | ||
508 | iowr I[$r5 + 0x100] $r6 | ||
509 | ld b32 $r6 D[$sp + 0x08] | ||
510 | iowr I[$r5 + 0x200] $r6 | ||
511 | ld b32 $r6 D[$sp + 0x0c] | ||
512 | iowr I[$r5 + 0x300] $r6 | ||
513 | add b32 $r5 0x400 | ||
514 | ld b32 $r6 D[$r0 + ctx_swz_const0] | ||
515 | iowr I[$r5 + 0x000] $r6 | ||
516 | ld b32 $r6 D[$r0 + ctx_swz_const1] | ||
517 | iowr I[$r5 + 0x100] $r6 | ||
518 | add $sp 0x10 | ||
519 | ret | ||
520 | |||
521 | // Setup to handle a tiled surface | ||
522 | // | ||
523 | // Calculates a number of parameters the hardware requires in order | ||
524 | // to correctly handle tiling. | ||
525 | // | ||
526 | // Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE): | ||
527 | // nTx = round_up(w * cpp, 1 << Tp) >> Tp | ||
528 | // nTy = round_up(h, 1 << Th) >> Th | ||
529 | // Txo = (x * cpp) & ((1 << Tp) - 1) | ||
530 | // Tx = (x * cpp) >> Tp | ||
531 | // Tyo = y & ((1 << Th) - 1) | ||
532 | // Ty = y >> Th | ||
533 | // Tzo = z & ((1 << Td) - 1) | ||
534 | // Tz = z >> Td | ||
535 | // | ||
536 | // off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo | ||
537 | // off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp; | ||
538 | // | ||
539 | // Inputs: | ||
540 | // $r4: hw command (0x104800) | ||
541 | // $r5: ctx offset adjustment for src/dst selection | ||
542 | // $p2: set if dst surface | ||
543 | // | ||
544 | cmd_exec_set_surface_tiled: | ||
545 | // translate TILE_MODE into Tp, Th, Td shift values | ||
546 | ld b32 $r7 D[$r5 + ctx_src_tile_mode] | ||
547 | extr $r9 $r7 8:11 | ||
548 | extr $r8 $r7 4:7 | ||
549 | ifdef(`NVA3', | ||
550 | add b32 $r8 2 | ||
551 | , | ||
552 | add b32 $r8 3 | ||
553 | ) | ||
554 | extr $r7 $r7 0:3 | ||
555 | cmp b32 $r7 0xe | ||
556 | bra ne xtile64 | ||
557 | mov $r7 4 | ||
558 | bra xtileok | ||
559 | xtile64: | ||
560 | xbit $r7 $flags $p2 | ||
561 | add b32 $r7 17 | ||
562 | bset $r4 $r7 | ||
563 | mov $r7 6 | ||
564 | xtileok: | ||
565 | |||
566 | // Op = (x * cpp) & ((1 << Tp) - 1) | ||
567 | // Tx = (x * cpp) >> Tp | ||
568 | ld b32 $r10 D[$r5 + ctx_src_xoff] | ||
569 | ld b32 $r11 D[$r5 + ctx_src_cpp] | ||
570 | mulu $r10 $r11 | ||
571 | mov $r11 1 | ||
572 | shl b32 $r11 $r7 | ||
573 | sub b32 $r11 1 | ||
574 | and $r12 $r10 $r11 | ||
575 | shr b32 $r10 $r7 | ||
576 | |||
577 | // Tyo = y & ((1 << Th) - 1) | ||
578 | // Ty = y >> Th | ||
579 | ld b32 $r13 D[$r5 + ctx_src_yoff] | ||
580 | mov $r14 1 | ||
581 | shl b32 $r14 $r8 | ||
582 | sub b32 $r14 1 | ||
583 | and $r11 $r13 $r14 | ||
584 | shr b32 $r13 $r8 | ||
585 | |||
586 | // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo) | ||
587 | add b32 $r14 1 | ||
588 | shl b32 $r15 $r14 12 | ||
589 | sub b32 $r14 $r11 | ||
590 | or $r15 $r14 | ||
591 | xbit $r6 $flags $p2 | ||
592 | add b32 $r6 0x208 | ||
593 | shl b32 $r6 8 | ||
594 | iowr I[$r6 + 0x000] $r15 | ||
595 | |||
596 | // Op += Tyo << Tp | ||
597 | shl b32 $r11 $r7 | ||
598 | add b32 $r12 $r11 | ||
599 | |||
600 | // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp) | ||
601 | ld b32 $r15 D[$r5 + ctx_src_xsize] | ||
602 | ld b32 $r11 D[$r5 + ctx_src_cpp] | ||
603 | mulu $r15 $r11 | ||
604 | mov $r11 1 | ||
605 | shl b32 $r11 $r7 | ||
606 | sub b32 $r11 1 | ||
607 | add b32 $r15 $r11 | ||
608 | shr b32 $r15 $r7 | ||
609 | push $r15 | ||
610 | |||
611 | // nTy = (h + ((1 << Th) - 1)) >> Th | ||
612 | ld b32 $r15 D[$r5 + ctx_src_ysize] | ||
613 | mov $r11 1 | ||
614 | shl b32 $r11 $r8 | ||
615 | sub b32 $r11 1 | ||
616 | add b32 $r15 $r11 | ||
617 | shr b32 $r15 $r8 | ||
618 | push $r15 | ||
619 | |||
620 | // Tys = Tp + Th | ||
621 | // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td | ||
622 | add b32 $r7 $r8 | ||
623 | sub b32 $r8 2 | ||
624 | mov $r11 1 | ||
625 | shl b32 $r11 $r8 | ||
626 | shl b32 $r11 $r9 | ||
627 | |||
628 | // Tzo = z & ((1 << Td) - 1) | ||
629 | // Tz = z >> Td | ||
630 | // Op += Tzo << Tys | ||
631 | // Ts = Tys + Td | ||
632 | ld b32 $r8 D[$r5 + ctx_src_zoff] | ||
633 | mov $r14 1 | ||
634 | shl b32 $r14 $r9 | ||
635 | sub b32 $r14 1 | ||
636 | and $r15 $r8 $r14 | ||
637 | shl b32 $r15 $r7 | ||
638 | add b32 $r12 $r15 | ||
639 | add b32 $r7 $r9 | ||
640 | shr b32 $r8 $r9 | ||
641 | |||
642 | // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts | ||
643 | pop $r15 | ||
644 | pop $r9 | ||
645 | mulu $r13 $r9 | ||
646 | add b32 $r10 $r13 | ||
647 | mulu $r8 $r9 | ||
648 | mulu $r8 $r15 | ||
649 | add b32 $r10 $r8 | ||
650 | shl b32 $r10 $r7 | ||
651 | |||
652 | // PITCH = (nTx - 1) << Ts | ||
653 | sub b32 $r9 1 | ||
654 | shl b32 $r9 $r7 | ||
655 | iowr I[$r6 + 0x200] $r9 | ||
656 | |||
657 | // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff | ||
658 | // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16 | ||
659 | ld b32 $r7 D[$r5 + ctx_src_address_low] | ||
660 | ld b32 $r8 D[$r5 + ctx_src_address_high] | ||
661 | add b32 $r10 $r12 | ||
662 | add b32 $r7 $r10 | ||
663 | adc b32 $r8 0 | ||
664 | shl b32 $r8 16 | ||
665 | or $r8 $r11 | ||
666 | sub b32 $r6 0x600 | ||
667 | iowr I[$r6 + 0x000] $r7 | ||
668 | add b32 $r6 0x400 | ||
669 | iowr I[$r6 + 0x000] $r8 | ||
670 | ret | ||
671 | |||
672 | // Setup to handle a linear surface | ||
673 | // | ||
674 | // Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting | ||
675 | // | ||
676 | cmd_exec_set_surface_linear: | ||
677 | xbit $r6 $flags $p2 | ||
678 | add b32 $r6 0x202 | ||
679 | shl b32 $r6 8 | ||
680 | ld b32 $r7 D[$r5 + ctx_src_address_low] | ||
681 | iowr I[$r6 + 0x000] $r7 | ||
682 | add b32 $r6 0x400 | ||
683 | ld b32 $r7 D[$r5 + ctx_src_address_high] | ||
684 | shl b32 $r7 16 | ||
685 | iowr I[$r6 + 0x000] $r7 | ||
686 | add b32 $r6 0x400 | ||
687 | ld b32 $r7 D[$r5 + ctx_src_pitch] | ||
688 | iowr I[$r6 + 0x000] $r7 | ||
689 | ret | ||
690 | |||
691 | // wait for regs to be available for use | ||
692 | cmd_exec_wait: | ||
693 | push $r0 | ||
694 | push $r1 | ||
695 | mov $r0 0x800 | ||
696 | shl b32 $r0 6 | ||
697 | loop: | ||
698 | iord $r1 I[$r0] | ||
699 | and $r1 1 | ||
700 | bra ne loop | ||
701 | pop $r1 | ||
702 | pop $r0 | ||
703 | ret | ||
704 | |||
705 | cmd_exec_query: | ||
706 | // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI } | ||
707 | xbit $r4 $r3 13 | ||
708 | bra ne query_counter | ||
709 | call cmd_exec_wait | ||
710 | mov $r4 0x80c | ||
711 | shl b32 $r4 6 | ||
712 | ld b32 $r5 D[$r0 + ctx_query_address_low] | ||
713 | add b32 $r5 4 | ||
714 | iowr I[$r4 + 0x000] $r5 | ||
715 | iowr I[$r4 + 0x100] $r0 | ||
716 | mov $r5 0xc | ||
717 | iowr I[$r4 + 0x200] $r5 | ||
718 | add b32 $r4 0x400 | ||
719 | ld b32 $r5 D[$r0 + ctx_query_address_high] | ||
720 | shl b32 $r5 16 | ||
721 | iowr I[$r4 + 0x000] $r5 | ||
722 | add b32 $r4 0x500 | ||
723 | mov $r5 0x00000b00 | ||
724 | sethi $r5 0x00010000 | ||
725 | iowr I[$r4 + 0x000] $r5 | ||
726 | mov $r5 0x00004040 | ||
727 | shl b32 $r5 1 | ||
728 | sethi $r5 0x80800000 | ||
729 | iowr I[$r4 + 0x100] $r5 | ||
730 | mov $r5 0x00001110 | ||
731 | sethi $r5 0x13120000 | ||
732 | iowr I[$r4 + 0x200] $r5 | ||
733 | mov $r5 0x00001514 | ||
734 | sethi $r5 0x17160000 | ||
735 | iowr I[$r4 + 0x300] $r5 | ||
736 | mov $r5 0x00002601 | ||
737 | sethi $r5 0x00010000 | ||
738 | mov $r4 0x800 | ||
739 | shl b32 $r4 6 | ||
740 | iowr I[$r4 + 0x000] $r5 | ||
741 | |||
742 | // write COUNTER | ||
743 | query_counter: | ||
744 | call cmd_exec_wait | ||
745 | mov $r4 0x80c | ||
746 | shl b32 $r4 6 | ||
747 | ld b32 $r5 D[$r0 + ctx_query_address_low] | ||
748 | iowr I[$r4 + 0x000] $r5 | ||
749 | iowr I[$r4 + 0x100] $r0 | ||
750 | mov $r5 0x4 | ||
751 | iowr I[$r4 + 0x200] $r5 | ||
752 | add b32 $r4 0x400 | ||
753 | ld b32 $r5 D[$r0 + ctx_query_address_high] | ||
754 | shl b32 $r5 16 | ||
755 | iowr I[$r4 + 0x000] $r5 | ||
756 | add b32 $r4 0x500 | ||
757 | mov $r5 0x00000300 | ||
758 | iowr I[$r4 + 0x000] $r5 | ||
759 | mov $r5 0x00001110 | ||
760 | sethi $r5 0x13120000 | ||
761 | iowr I[$r4 + 0x100] $r5 | ||
762 | ld b32 $r5 D[$r0 + ctx_query_counter] | ||
763 | add b32 $r4 0x500 | ||
764 | iowr I[$r4 + 0x000] $r5 | ||
765 | mov $r5 0x00002601 | ||
766 | sethi $r5 0x00010000 | ||
767 | mov $r4 0x800 | ||
768 | shl b32 $r4 6 | ||
769 | iowr I[$r4 + 0x000] $r5 | ||
770 | ret | ||
771 | |||
772 | // Execute a copy operation | ||
773 | // | ||
774 | // Inputs: | ||
775 | // $r1: irqh state | ||
776 | // $r2: hostirq state | ||
777 | // $r3: data | ||
778 | // 000002000 QUERY_SHORT | ||
779 | // 000001000 QUERY | ||
780 | // 000000100 DST_LINEAR | ||
781 | // 000000010 SRC_LINEAR | ||
782 | // 000000001 FORMAT | ||
783 | // $r4: dispatch table entry | ||
784 | // Outputs: | ||
785 | // $r1: irqh state | ||
786 | // $p1: set on error | ||
787 | // $r2: hostirq state | ||
788 | // $r3: data | ||
789 | cmd_exec: | ||
790 | call cmd_exec_wait | ||
791 | |||
792 | // if format requested, call function to calculate it, otherwise | ||
793 | // fill in cpp/xcnt for both surfaces as if (cpp == 1) | ||
794 | xbit $r15 $r3 0 | ||
795 | bra e cmd_exec_no_format | ||
796 | call cmd_exec_set_format | ||
797 | mov $r4 0x200 | ||
798 | bra cmd_exec_init_src_surface | ||
799 | cmd_exec_no_format: | ||
800 | mov $r6 0x810 | ||
801 | shl b32 $r6 6 | ||
802 | mov $r7 1 | ||
803 | st b32 D[$r0 + ctx_src_cpp] $r7 | ||
804 | st b32 D[$r0 + ctx_dst_cpp] $r7 | ||
805 | ld b32 $r7 D[$r0 + ctx_xcnt] | ||
806 | iowr I[$r6 + 0x000] $r7 | ||
807 | iowr I[$r6 + 0x100] $r7 | ||
808 | clear b32 $r4 | ||
809 | |||
810 | cmd_exec_init_src_surface: | ||
811 | bclr $flags $p2 | ||
812 | clear b32 $r5 | ||
813 | xbit $r15 $r3 4 | ||
814 | bra e src_tiled | ||
815 | call cmd_exec_set_surface_linear | ||
816 | bra cmd_exec_init_dst_surface | ||
817 | src_tiled: | ||
818 | call cmd_exec_set_surface_tiled | ||
819 | bset $r4 7 | ||
820 | |||
821 | cmd_exec_init_dst_surface: | ||
822 | bset $flags $p2 | ||
823 | mov $r5 ctx_dst_address_high - ctx_src_address_high | ||
824 | xbit $r15 $r3 8 | ||
825 | bra e dst_tiled | ||
826 | call cmd_exec_set_surface_linear | ||
827 | bra cmd_exec_kick | ||
828 | dst_tiled: | ||
829 | call cmd_exec_set_surface_tiled | ||
830 | bset $r4 8 | ||
831 | |||
832 | cmd_exec_kick: | ||
833 | mov $r5 0x800 | ||
834 | shl b32 $r5 6 | ||
835 | ld b32 $r6 D[$r0 + ctx_ycnt] | ||
836 | iowr I[$r5 + 0x100] $r6 | ||
837 | mov $r6 0x0041 | ||
838 | // SRC_TARGET = 1, DST_TARGET = 2 | ||
839 | sethi $r6 0x44000000 | ||
840 | or $r4 $r6 | ||
841 | iowr I[$r5] $r4 | ||
842 | |||
843 | // if requested, queue up a QUERY write after the copy has completed | ||
844 | xbit $r15 $r3 12 | ||
845 | bra e cmd_exec_done | ||
846 | call cmd_exec_query | ||
847 | |||
848 | cmd_exec_done: | ||
849 | ret | ||
850 | |||
851 | // Flush write cache | ||
852 | // | ||
853 | // Inputs: | ||
854 | // $r1: irqh state | ||
855 | // $r2: hostirq state | ||
856 | // $r3: data | ||
857 | // $r4: dispatch table entry | ||
858 | // Outputs: | ||
859 | // $r1: irqh state | ||
860 | // $p1: set on error | ||
861 | // $r2: hostirq state | ||
862 | // $r3: data | ||
863 | cmd_wrcache_flush: | ||
864 | mov $r2 0x2200 | ||
865 | clear b32 $r3 | ||
866 | sethi $r3 0x10000 | ||
867 | iowr I[$r2] $r3 | ||
868 | ret | ||
869 | |||
870 | .align 0x100 | ||
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h new file mode 100644 index 00000000000..2731de22ebe --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h | |||
@@ -0,0 +1,534 @@ | |||
1 | uint32_t nva3_pcopy_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x00000000, | ||
26 | 0x00000000, | ||
27 | 0x00000000, | ||
28 | 0x00000000, | ||
29 | 0x00000000, | ||
30 | 0x00000000, | ||
31 | 0x00000000, | ||
32 | 0x00000000, | ||
33 | 0x00000000, | ||
34 | 0x00000000, | ||
35 | 0x00000000, | ||
36 | 0x00000000, | ||
37 | 0x00000000, | ||
38 | 0x00000000, | ||
39 | 0x00000000, | ||
40 | 0x00000000, | ||
41 | 0x00000000, | ||
42 | 0x00000000, | ||
43 | 0x00000000, | ||
44 | 0x00000000, | ||
45 | 0x00000000, | ||
46 | 0x00000000, | ||
47 | 0x00000000, | ||
48 | 0x00000000, | ||
49 | 0x00000000, | ||
50 | 0x00000000, | ||
51 | 0x00000000, | ||
52 | 0x00000000, | ||
53 | 0x00000000, | ||
54 | 0x00000000, | ||
55 | 0x00000000, | ||
56 | 0x00000000, | ||
57 | 0x00000000, | ||
58 | 0x00000000, | ||
59 | 0x00000000, | ||
60 | 0x00000000, | ||
61 | 0x00000000, | ||
62 | 0x00000000, | ||
63 | 0x00000000, | ||
64 | 0x00000000, | ||
65 | 0x00000000, | ||
66 | 0x00010000, | ||
67 | 0x00000000, | ||
68 | 0x00000000, | ||
69 | 0x00010040, | ||
70 | 0x00010160, | ||
71 | 0x00000000, | ||
72 | 0x00010050, | ||
73 | 0x00010162, | ||
74 | 0x00000000, | ||
75 | 0x00030060, | ||
76 | 0x00010170, | ||
77 | 0x00000000, | ||
78 | 0x00010170, | ||
79 | 0x00000000, | ||
80 | 0x00010170, | ||
81 | 0x00000000, | ||
82 | 0x00070080, | ||
83 | 0x00000028, | ||
84 | 0xfffff000, | ||
85 | 0x0000002c, | ||
86 | 0xfff80000, | ||
87 | 0x00000030, | ||
88 | 0xffffe000, | ||
89 | 0x00000034, | ||
90 | 0xfffff800, | ||
91 | 0x00000038, | ||
92 | 0xfffff000, | ||
93 | 0x0000003c, | ||
94 | 0xfff80000, | ||
95 | 0x00000040, | ||
96 | 0xffffe000, | ||
97 | 0x00070088, | ||
98 | 0x00000054, | ||
99 | 0xfffff000, | ||
100 | 0x00000058, | ||
101 | 0xfff80000, | ||
102 | 0x0000005c, | ||
103 | 0xffffe000, | ||
104 | 0x00000060, | ||
105 | 0xfffff800, | ||
106 | 0x00000064, | ||
107 | 0xfffff000, | ||
108 | 0x00000068, | ||
109 | 0xfff80000, | ||
110 | 0x0000006c, | ||
111 | 0xffffe000, | ||
112 | 0x000200c0, | ||
113 | 0x00010492, | ||
114 | 0x00000000, | ||
115 | 0x0001051b, | ||
116 | 0x00000000, | ||
117 | 0x000e00c3, | ||
118 | 0x0000001c, | ||
119 | 0xffffff00, | ||
120 | 0x00000020, | ||
121 | 0x0000000f, | ||
122 | 0x00000048, | ||
123 | 0xffffff00, | ||
124 | 0x0000004c, | ||
125 | 0x0000000f, | ||
126 | 0x00000024, | ||
127 | 0xfff80000, | ||
128 | 0x00000050, | ||
129 | 0xfff80000, | ||
130 | 0x00000080, | ||
131 | 0xffff0000, | ||
132 | 0x00000084, | ||
133 | 0xffffe000, | ||
134 | 0x00000074, | ||
135 | 0xfccc0000, | ||
136 | 0x00000078, | ||
137 | 0x00000000, | ||
138 | 0x0000007c, | ||
139 | 0x00000000, | ||
140 | 0x00000010, | ||
141 | 0xffffff00, | ||
142 | 0x00000014, | ||
143 | 0x00000000, | ||
144 | 0x00000018, | ||
145 | 0x00000000, | ||
146 | 0x00000800, | ||
147 | }; | ||
148 | |||
149 | uint32_t nva3_pcopy_code[] = { | ||
150 | 0x04fe04bd, | ||
151 | 0x3517f000, | ||
152 | 0xf10010fe, | ||
153 | 0xf1040017, | ||
154 | 0xf0fff327, | ||
155 | 0x22d00023, | ||
156 | 0x0c25f0c0, | ||
157 | 0xf40012d0, | ||
158 | 0x17f11031, | ||
159 | 0x27f01200, | ||
160 | 0x0012d003, | ||
161 | 0xf40031f4, | ||
162 | 0x0ef40028, | ||
163 | 0x8001cffd, | ||
164 | 0xf40812c4, | ||
165 | 0x21f4060b, | ||
166 | 0x0412c472, | ||
167 | 0xf4060bf4, | ||
168 | 0x11c4c321, | ||
169 | 0x4001d00c, | ||
170 | 0x47f101f8, | ||
171 | 0x4bfe7700, | ||
172 | 0x0007fe00, | ||
173 | 0xf00204b9, | ||
174 | 0x01f40643, | ||
175 | 0x0604fa09, | ||
176 | 0xfa060ef4, | ||
177 | 0x03f80504, | ||
178 | 0x27f100f8, | ||
179 | 0x23cf1400, | ||
180 | 0x1e3fc800, | ||
181 | 0xf4170bf4, | ||
182 | 0x21f40132, | ||
183 | 0x1e3af052, | ||
184 | 0xf00023d0, | ||
185 | 0x24d00147, | ||
186 | 0xcf00f880, | ||
187 | 0x3dc84023, | ||
188 | 0x220bf41e, | ||
189 | 0xf40131f4, | ||
190 | 0x57f05221, | ||
191 | 0x0367f004, | ||
192 | 0xa07856bc, | ||
193 | 0xb6018068, | ||
194 | 0x87d00884, | ||
195 | 0x0162b600, | ||
196 | 0xf0f018f4, | ||
197 | 0x23d00237, | ||
198 | 0xf100f880, | ||
199 | 0xcf190037, | ||
200 | 0x33cf4032, | ||
201 | 0xff24e400, | ||
202 | 0x1024b607, | ||
203 | 0x010057f1, | ||
204 | 0x74bd64bd, | ||
205 | 0x58005658, | ||
206 | 0x50b60157, | ||
207 | 0x0446b804, | ||
208 | 0xbb4d08f4, | ||
209 | 0x47b80076, | ||
210 | 0x0f08f404, | ||
211 | 0xb60276bb, | ||
212 | 0x57bb0374, | ||
213 | 0xdf0ef400, | ||
214 | 0xb60246bb, | ||
215 | 0x45bb0344, | ||
216 | 0x01459800, | ||
217 | 0xb00453fd, | ||
218 | 0x1bf40054, | ||
219 | 0x00455820, | ||
220 | 0xb0014658, | ||
221 | 0x1bf40064, | ||
222 | 0x00538009, | ||
223 | 0xf4300ef4, | ||
224 | 0x55f90132, | ||
225 | 0xf40c01f4, | ||
226 | 0x25f0250e, | ||
227 | 0x0125f002, | ||
228 | 0x100047f1, | ||
229 | 0xd00042d0, | ||
230 | 0x27f04043, | ||
231 | 0x0002d040, | ||
232 | 0xf08002cf, | ||
233 | 0x24b04024, | ||
234 | 0xf71bf400, | ||
235 | 0x1d0027f1, | ||
236 | 0xd00137f0, | ||
237 | 0x00f80023, | ||
238 | 0x27f100f8, | ||
239 | 0x34bd2200, | ||
240 | 0xd00233f0, | ||
241 | 0x00f80023, | ||
242 | 0x012842b7, | ||
243 | 0xf00145b6, | ||
244 | 0x43801e39, | ||
245 | 0x0040b701, | ||
246 | 0x0644b606, | ||
247 | 0xf80043d0, | ||
248 | 0xf030f400, | ||
249 | 0xb00001b0, | ||
250 | 0x01b00101, | ||
251 | 0x0301b002, | ||
252 | 0xc71d0498, | ||
253 | 0x50b63045, | ||
254 | 0x3446c701, | ||
255 | 0xc70160b6, | ||
256 | 0x70b63847, | ||
257 | 0x0232f401, | ||
258 | 0x94bd84bd, | ||
259 | 0xb60f4ac4, | ||
260 | 0xb4bd0445, | ||
261 | 0xf404a430, | ||
262 | 0xa5ff0f18, | ||
263 | 0x00cbbbc0, | ||
264 | 0xf40231f4, | ||
265 | 0x1bf4220e, | ||
266 | 0x10c7f00c, | ||
267 | 0xf400cbbb, | ||
268 | 0xa430160e, | ||
269 | 0x0c18f406, | ||
270 | 0xbb14c7f0, | ||
271 | 0x0ef400cb, | ||
272 | 0x80c7f107, | ||
273 | 0x01c83800, | ||
274 | 0xb60180b6, | ||
275 | 0xb5b801b0, | ||
276 | 0xc308f404, | ||
277 | 0xb80190b6, | ||
278 | 0x08f40497, | ||
279 | 0x0065fdb2, | ||
280 | 0x98110680, | ||
281 | 0x68fd2008, | ||
282 | 0x0502f400, | ||
283 | 0x75fd64bd, | ||
284 | 0x1c078000, | ||
285 | 0xf10078fd, | ||
286 | 0xb6081057, | ||
287 | 0x56d00654, | ||
288 | 0x4057d000, | ||
289 | 0x080050b7, | ||
290 | 0xb61c0698, | ||
291 | 0x64b60162, | ||
292 | 0x11079808, | ||
293 | 0xfd0172b6, | ||
294 | 0x56d00567, | ||
295 | 0x0050b700, | ||
296 | 0x0060b401, | ||
297 | 0xb40056d0, | ||
298 | 0x56d00160, | ||
299 | 0x0260b440, | ||
300 | 0xb48056d0, | ||
301 | 0x56d00360, | ||
302 | 0x0050b7c0, | ||
303 | 0x1e069804, | ||
304 | 0x980056d0, | ||
305 | 0x56d01f06, | ||
306 | 0x1030f440, | ||
307 | 0x579800f8, | ||
308 | 0x6879c70a, | ||
309 | 0xb66478c7, | ||
310 | 0x77c70280, | ||
311 | 0x0e76b060, | ||
312 | 0xf0091bf4, | ||
313 | 0x0ef40477, | ||
314 | 0x027cf00f, | ||
315 | 0xfd1170b6, | ||
316 | 0x77f00947, | ||
317 | 0x0f5a9806, | ||
318 | 0xfd115b98, | ||
319 | 0xb7f000ab, | ||
320 | 0x04b7bb01, | ||
321 | 0xff01b2b6, | ||
322 | 0xa7bbc4ab, | ||
323 | 0x105d9805, | ||
324 | 0xbb01e7f0, | ||
325 | 0xe2b604e8, | ||
326 | 0xb4deff01, | ||
327 | 0xb605d8bb, | ||
328 | 0xef9401e0, | ||
329 | 0x02ebbb0c, | ||
330 | 0xf005fefd, | ||
331 | 0x60b7026c, | ||
332 | 0x64b60208, | ||
333 | 0x006fd008, | ||
334 | 0xbb04b7bb, | ||
335 | 0x5f9800cb, | ||
336 | 0x115b980b, | ||
337 | 0xf000fbfd, | ||
338 | 0xb7bb01b7, | ||
339 | 0x01b2b604, | ||
340 | 0xbb00fbbb, | ||
341 | 0xf0f905f7, | ||
342 | 0xf00c5f98, | ||
343 | 0xb8bb01b7, | ||
344 | 0x01b2b604, | ||
345 | 0xbb00fbbb, | ||
346 | 0xf0f905f8, | ||
347 | 0xb60078bb, | ||
348 | 0xb7f00282, | ||
349 | 0x04b8bb01, | ||
350 | 0x9804b9bb, | ||
351 | 0xe7f00e58, | ||
352 | 0x04e9bb01, | ||
353 | 0xff01e2b6, | ||
354 | 0xf7bbf48e, | ||
355 | 0x00cfbb04, | ||
356 | 0xbb0079bb, | ||
357 | 0xf0fc0589, | ||
358 | 0xd9fd90fc, | ||
359 | 0x00adbb00, | ||
360 | 0xfd0089fd, | ||
361 | 0xa8bb008f, | ||
362 | 0x04a7bb00, | ||
363 | 0xbb0192b6, | ||
364 | 0x69d00497, | ||
365 | 0x08579880, | ||
366 | 0xbb075898, | ||
367 | 0x7abb00ac, | ||
368 | 0x0081b600, | ||
369 | 0xfd1084b6, | ||
370 | 0x62b7058b, | ||
371 | 0x67d00600, | ||
372 | 0x0060b700, | ||
373 | 0x0068d004, | ||
374 | 0x6cf000f8, | ||
375 | 0x0260b702, | ||
376 | 0x0864b602, | ||
377 | 0xd0085798, | ||
378 | 0x60b70067, | ||
379 | 0x57980400, | ||
380 | 0x1074b607, | ||
381 | 0xb70067d0, | ||
382 | 0x98040060, | ||
383 | 0x67d00957, | ||
384 | 0xf900f800, | ||
385 | 0xf110f900, | ||
386 | 0xb6080007, | ||
387 | 0x01cf0604, | ||
388 | 0x0114f000, | ||
389 | 0xfcfa1bf4, | ||
390 | 0xf800fc10, | ||
391 | 0x0d34c800, | ||
392 | 0xf5701bf4, | ||
393 | 0xf103ab21, | ||
394 | 0xb6080c47, | ||
395 | 0x05980644, | ||
396 | 0x0450b605, | ||
397 | 0xd00045d0, | ||
398 | 0x57f04040, | ||
399 | 0x8045d00c, | ||
400 | 0x040040b7, | ||
401 | 0xb6040598, | ||
402 | 0x45d01054, | ||
403 | 0x0040b700, | ||
404 | 0x0057f105, | ||
405 | 0x0153f00b, | ||
406 | 0xf10045d0, | ||
407 | 0xb6404057, | ||
408 | 0x53f10154, | ||
409 | 0x45d08080, | ||
410 | 0x1057f140, | ||
411 | 0x1253f111, | ||
412 | 0x8045d013, | ||
413 | 0x151457f1, | ||
414 | 0x171653f1, | ||
415 | 0xf1c045d0, | ||
416 | 0xf0260157, | ||
417 | 0x47f10153, | ||
418 | 0x44b60800, | ||
419 | 0x0045d006, | ||
420 | 0x03ab21f5, | ||
421 | 0x080c47f1, | ||
422 | 0x980644b6, | ||
423 | 0x45d00505, | ||
424 | 0x4040d000, | ||
425 | 0xd00457f0, | ||
426 | 0x40b78045, | ||
427 | 0x05980400, | ||
428 | 0x1054b604, | ||
429 | 0xb70045d0, | ||
430 | 0xf1050040, | ||
431 | 0xd0030057, | ||
432 | 0x57f10045, | ||
433 | 0x53f11110, | ||
434 | 0x45d01312, | ||
435 | 0x06059840, | ||
436 | 0x050040b7, | ||
437 | 0xf10045d0, | ||
438 | 0xf0260157, | ||
439 | 0x47f10153, | ||
440 | 0x44b60800, | ||
441 | 0x0045d006, | ||
442 | 0x21f500f8, | ||
443 | 0x3fc803ab, | ||
444 | 0x0e0bf400, | ||
445 | 0x018921f5, | ||
446 | 0x020047f1, | ||
447 | 0xf11e0ef4, | ||
448 | 0xb6081067, | ||
449 | 0x77f00664, | ||
450 | 0x11078001, | ||
451 | 0x981c0780, | ||
452 | 0x67d02007, | ||
453 | 0x4067d000, | ||
454 | 0x32f444bd, | ||
455 | 0xc854bd02, | ||
456 | 0x0bf4043f, | ||
457 | 0x8221f50a, | ||
458 | 0x0a0ef403, | ||
459 | 0x027621f5, | ||
460 | 0xf40749f0, | ||
461 | 0x57f00231, | ||
462 | 0x083fc82c, | ||
463 | 0xf50a0bf4, | ||
464 | 0xf4038221, | ||
465 | 0x21f50a0e, | ||
466 | 0x49f00276, | ||
467 | 0x0057f108, | ||
468 | 0x0654b608, | ||
469 | 0xd0210698, | ||
470 | 0x67f04056, | ||
471 | 0x0063f141, | ||
472 | 0x0546fd44, | ||
473 | 0xc80054d0, | ||
474 | 0x0bf40c3f, | ||
475 | 0xc521f507, | ||
476 | 0xf100f803, | ||
477 | 0xbd220027, | ||
478 | 0x0133f034, | ||
479 | 0xf80023d0, | ||
480 | 0x00000000, | ||
481 | 0x00000000, | ||
482 | 0x00000000, | ||
483 | 0x00000000, | ||
484 | 0x00000000, | ||
485 | 0x00000000, | ||
486 | 0x00000000, | ||
487 | 0x00000000, | ||
488 | 0x00000000, | ||
489 | 0x00000000, | ||
490 | 0x00000000, | ||
491 | 0x00000000, | ||
492 | 0x00000000, | ||
493 | 0x00000000, | ||
494 | 0x00000000, | ||
495 | 0x00000000, | ||
496 | 0x00000000, | ||
497 | 0x00000000, | ||
498 | 0x00000000, | ||
499 | 0x00000000, | ||
500 | 0x00000000, | ||
501 | 0x00000000, | ||
502 | 0x00000000, | ||
503 | 0x00000000, | ||
504 | 0x00000000, | ||
505 | 0x00000000, | ||
506 | 0x00000000, | ||
507 | 0x00000000, | ||
508 | 0x00000000, | ||
509 | 0x00000000, | ||
510 | 0x00000000, | ||
511 | 0x00000000, | ||
512 | 0x00000000, | ||
513 | 0x00000000, | ||
514 | 0x00000000, | ||
515 | 0x00000000, | ||
516 | 0x00000000, | ||
517 | 0x00000000, | ||
518 | 0x00000000, | ||
519 | 0x00000000, | ||
520 | 0x00000000, | ||
521 | 0x00000000, | ||
522 | 0x00000000, | ||
523 | 0x00000000, | ||
524 | 0x00000000, | ||
525 | 0x00000000, | ||
526 | 0x00000000, | ||
527 | 0x00000000, | ||
528 | 0x00000000, | ||
529 | 0x00000000, | ||
530 | 0x00000000, | ||
531 | 0x00000000, | ||
532 | 0x00000000, | ||
533 | 0x00000000, | ||
534 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c new file mode 100644 index 00000000000..dddf006f6d8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_copy.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include "drmP.h" | ||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_util.h" | ||
29 | #include "nouveau_vm.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nvc0_copy.fuc.h" | ||
32 | |||
33 | struct nvc0_copy_engine { | ||
34 | struct nouveau_exec_engine base; | ||
35 | u32 irq; | ||
36 | u32 pmc; | ||
37 | u32 fuc; | ||
38 | u32 ctx; | ||
39 | }; | ||
40 | |||
41 | static int | ||
42 | nvc0_copy_context_new(struct nouveau_channel *chan, int engine) | ||
43 | { | ||
44 | struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); | ||
45 | struct drm_device *dev = chan->dev; | ||
46 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
47 | struct nouveau_gpuobj *ramin = chan->ramin; | ||
48 | struct nouveau_gpuobj *ctx = NULL; | ||
49 | int ret; | ||
50 | |||
51 | ret = nouveau_gpuobj_new(dev, chan, 256, 256, | ||
52 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | | ||
53 | NVOBJ_FLAG_ZERO_ALLOC, &ctx); | ||
54 | if (ret) | ||
55 | return ret; | ||
56 | |||
57 | nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst)); | ||
58 | nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst)); | ||
59 | dev_priv->engine.instmem.flush(dev); | ||
60 | |||
61 | chan->engctx[engine] = ctx; | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int | ||
66 | nvc0_copy_object_new(struct nouveau_channel *chan, int engine, | ||
67 | u32 handle, u16 class) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | nvc0_copy_context_del(struct nouveau_channel *chan, int engine) | ||
74 | { | ||
75 | struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); | ||
76 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | ||
77 | struct drm_device *dev = chan->dev; | ||
78 | u32 inst; | ||
79 | |||
80 | inst = (chan->ramin->vinst >> 12); | ||
81 | inst |= 0x40000000; | ||
82 | |||
83 | /* disable fifo access */ | ||
84 | nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000); | ||
85 | /* mark channel as unloaded if it's currently active */ | ||
86 | if (nv_rd32(dev, pcopy->fuc + 0x050) == inst) | ||
87 | nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000); | ||
88 | /* mark next channel as invalid if it's about to be loaded */ | ||
89 | if (nv_rd32(dev, pcopy->fuc + 0x054) == inst) | ||
90 | nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000); | ||
91 | /* restore fifo access */ | ||
92 | nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003); | ||
93 | |||
94 | nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000); | ||
95 | nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000); | ||
96 | nouveau_gpuobj_ref(NULL, &ctx); | ||
97 | |||
98 | chan->engctx[engine] = ctx; | ||
99 | } | ||
100 | |||
101 | static int | ||
102 | nvc0_copy_init(struct drm_device *dev, int engine) | ||
103 | { | ||
104 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); | ||
105 | int i; | ||
106 | |||
107 | nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000); | ||
108 | nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc); | ||
109 | nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff); | ||
110 | |||
111 | nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000); | ||
112 | for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++) | ||
113 | nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]); | ||
114 | |||
115 | nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000); | ||
116 | for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) { | ||
117 | if ((i & 0x3f) == 0) | ||
118 | nv_wr32(dev, pcopy->fuc + 0x188, i >> 6); | ||
119 | nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]); | ||
120 | } | ||
121 | |||
122 | nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0); | ||
123 | nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000); | ||
124 | nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */ | ||
125 | nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */ | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int | ||
130 | nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend) | ||
131 | { | ||
132 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); | ||
133 | |||
134 | nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000); | ||
135 | |||
136 | /* trigger fuc context unload */ | ||
137 | nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000); | ||
138 | nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000); | ||
139 | nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008); | ||
140 | nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000); | ||
141 | |||
142 | nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static struct nouveau_enum nvc0_copy_isr_error_name[] = { | ||
147 | { 0x0001, "ILLEGAL_MTHD" }, | ||
148 | { 0x0002, "INVALID_ENUM" }, | ||
149 | { 0x0003, "INVALID_BITFIELD" }, | ||
150 | {} | ||
151 | }; | ||
152 | |||
153 | static void | ||
154 | nvc0_copy_isr(struct drm_device *dev, int engine) | ||
155 | { | ||
156 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); | ||
157 | u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c); | ||
158 | u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16); | ||
159 | u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12; | ||
160 | u32 chid = nvc0_graph_isr_chid(dev, inst); | ||
161 | u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff; | ||
162 | u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16; | ||
163 | u32 mthd = (addr & 0x07ff) << 2; | ||
164 | u32 subc = (addr & 0x3800) >> 11; | ||
165 | u32 data = nv_rd32(dev, pcopy->fuc + 0x044); | ||
166 | |||
167 | if (stat & 0x00000040) { | ||
168 | NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); | ||
169 | nouveau_enum_print(nvc0_copy_isr_error_name, ssta); | ||
170 | printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", | ||
171 | chid, inst, subc, mthd, data); | ||
172 | nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040); | ||
173 | stat &= ~0x00000040; | ||
174 | } | ||
175 | |||
176 | if (stat) { | ||
177 | NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); | ||
178 | nv_wr32(dev, pcopy->fuc + 0x004, stat); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | static void | ||
183 | nvc0_copy_isr_0(struct drm_device *dev) | ||
184 | { | ||
185 | nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0); | ||
186 | } | ||
187 | |||
188 | static void | ||
189 | nvc0_copy_isr_1(struct drm_device *dev) | ||
190 | { | ||
191 | nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1); | ||
192 | } | ||
193 | |||
194 | static void | ||
195 | nvc0_copy_destroy(struct drm_device *dev, int engine) | ||
196 | { | ||
197 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); | ||
198 | |||
199 | nouveau_irq_unregister(dev, pcopy->irq); | ||
200 | |||
201 | if (engine == NVOBJ_ENGINE_COPY0) | ||
202 | NVOBJ_ENGINE_DEL(dev, COPY0); | ||
203 | else | ||
204 | NVOBJ_ENGINE_DEL(dev, COPY1); | ||
205 | kfree(pcopy); | ||
206 | } | ||
207 | |||
208 | int | ||
209 | nvc0_copy_create(struct drm_device *dev, int engine) | ||
210 | { | ||
211 | struct nvc0_copy_engine *pcopy; | ||
212 | |||
213 | pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); | ||
214 | if (!pcopy) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | pcopy->base.destroy = nvc0_copy_destroy; | ||
218 | pcopy->base.init = nvc0_copy_init; | ||
219 | pcopy->base.fini = nvc0_copy_fini; | ||
220 | pcopy->base.context_new = nvc0_copy_context_new; | ||
221 | pcopy->base.context_del = nvc0_copy_context_del; | ||
222 | pcopy->base.object_new = nvc0_copy_object_new; | ||
223 | |||
224 | if (engine == 0) { | ||
225 | pcopy->irq = 5; | ||
226 | pcopy->pmc = 0x00000040; | ||
227 | pcopy->fuc = 0x104000; | ||
228 | pcopy->ctx = 0x0230; | ||
229 | nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0); | ||
230 | NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); | ||
231 | NVOBJ_CLASS(dev, 0x90b5, COPY0); | ||
232 | } else { | ||
233 | pcopy->irq = 6; | ||
234 | pcopy->pmc = 0x00000080; | ||
235 | pcopy->fuc = 0x105000; | ||
236 | pcopy->ctx = 0x0240; | ||
237 | nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1); | ||
238 | NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base); | ||
239 | NVOBJ_CLASS(dev, 0x90b8, COPY1); | ||
240 | } | ||
241 | |||
242 | return 0; | ||
243 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h new file mode 100644 index 00000000000..419903880e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h | |||
@@ -0,0 +1,527 @@ | |||
1 | uint32_t nvc0_pcopy_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x00000000, | ||
26 | 0x00000000, | ||
27 | 0x00000000, | ||
28 | 0x00000000, | ||
29 | 0x00000000, | ||
30 | 0x00000000, | ||
31 | 0x00000000, | ||
32 | 0x00000000, | ||
33 | 0x00000000, | ||
34 | 0x00000000, | ||
35 | 0x00000000, | ||
36 | 0x00000000, | ||
37 | 0x00000000, | ||
38 | 0x00000000, | ||
39 | 0x00000000, | ||
40 | 0x00000000, | ||
41 | 0x00000000, | ||
42 | 0x00000000, | ||
43 | 0x00000000, | ||
44 | 0x00000000, | ||
45 | 0x00000000, | ||
46 | 0x00000000, | ||
47 | 0x00000000, | ||
48 | 0x00000000, | ||
49 | 0x00000000, | ||
50 | 0x00000000, | ||
51 | 0x00000000, | ||
52 | 0x00000000, | ||
53 | 0x00000000, | ||
54 | 0x00000000, | ||
55 | 0x00000000, | ||
56 | 0x00000000, | ||
57 | 0x00000000, | ||
58 | 0x00000000, | ||
59 | 0x00000000, | ||
60 | 0x00000000, | ||
61 | 0x00000000, | ||
62 | 0x00000000, | ||
63 | 0x00000000, | ||
64 | 0x00000000, | ||
65 | 0x00000000, | ||
66 | 0x00010000, | ||
67 | 0x00000000, | ||
68 | 0x00000000, | ||
69 | 0x00010040, | ||
70 | 0x0001019f, | ||
71 | 0x00000000, | ||
72 | 0x00010050, | ||
73 | 0x000101a1, | ||
74 | 0x00000000, | ||
75 | 0x00070080, | ||
76 | 0x0000001c, | ||
77 | 0xfffff000, | ||
78 | 0x00000020, | ||
79 | 0xfff80000, | ||
80 | 0x00000024, | ||
81 | 0xffffe000, | ||
82 | 0x00000028, | ||
83 | 0xfffff800, | ||
84 | 0x0000002c, | ||
85 | 0xfffff000, | ||
86 | 0x00000030, | ||
87 | 0xfff80000, | ||
88 | 0x00000034, | ||
89 | 0xffffe000, | ||
90 | 0x00070088, | ||
91 | 0x00000048, | ||
92 | 0xfffff000, | ||
93 | 0x0000004c, | ||
94 | 0xfff80000, | ||
95 | 0x00000050, | ||
96 | 0xffffe000, | ||
97 | 0x00000054, | ||
98 | 0xfffff800, | ||
99 | 0x00000058, | ||
100 | 0xfffff000, | ||
101 | 0x0000005c, | ||
102 | 0xfff80000, | ||
103 | 0x00000060, | ||
104 | 0xffffe000, | ||
105 | 0x000200c0, | ||
106 | 0x000104b8, | ||
107 | 0x00000000, | ||
108 | 0x00010541, | ||
109 | 0x00000000, | ||
110 | 0x000e00c3, | ||
111 | 0x00000010, | ||
112 | 0xffffff00, | ||
113 | 0x00000014, | ||
114 | 0x0000000f, | ||
115 | 0x0000003c, | ||
116 | 0xffffff00, | ||
117 | 0x00000040, | ||
118 | 0x0000000f, | ||
119 | 0x00000018, | ||
120 | 0xfff80000, | ||
121 | 0x00000044, | ||
122 | 0xfff80000, | ||
123 | 0x00000074, | ||
124 | 0xffff0000, | ||
125 | 0x00000078, | ||
126 | 0xffffe000, | ||
127 | 0x00000068, | ||
128 | 0xfccc0000, | ||
129 | 0x0000006c, | ||
130 | 0x00000000, | ||
131 | 0x00000070, | ||
132 | 0x00000000, | ||
133 | 0x00000004, | ||
134 | 0xffffff00, | ||
135 | 0x00000008, | ||
136 | 0x00000000, | ||
137 | 0x0000000c, | ||
138 | 0x00000000, | ||
139 | 0x00000800, | ||
140 | }; | ||
141 | |||
142 | uint32_t nvc0_pcopy_code[] = { | ||
143 | 0x04fe04bd, | ||
144 | 0x3517f000, | ||
145 | 0xf10010fe, | ||
146 | 0xf1040017, | ||
147 | 0xf0fff327, | ||
148 | 0x22d00023, | ||
149 | 0x0c25f0c0, | ||
150 | 0xf40012d0, | ||
151 | 0x17f11031, | ||
152 | 0x27f01200, | ||
153 | 0x0012d003, | ||
154 | 0xf40031f4, | ||
155 | 0x0ef40028, | ||
156 | 0x8001cffd, | ||
157 | 0xf40812c4, | ||
158 | 0x21f4060b, | ||
159 | 0x0412c4ca, | ||
160 | 0xf5070bf4, | ||
161 | 0xc4010221, | ||
162 | 0x01d00c11, | ||
163 | 0xf101f840, | ||
164 | 0xfe770047, | ||
165 | 0x47f1004b, | ||
166 | 0x44cf2100, | ||
167 | 0x0144f000, | ||
168 | 0xb60444b6, | ||
169 | 0xf7f13040, | ||
170 | 0xf4b6061c, | ||
171 | 0x1457f106, | ||
172 | 0x00f5d101, | ||
173 | 0xb6043594, | ||
174 | 0x57fe0250, | ||
175 | 0x0145fe00, | ||
176 | 0x010052b7, | ||
177 | 0x00ff67f1, | ||
178 | 0x56fd60bd, | ||
179 | 0x0253f004, | ||
180 | 0xf80545fa, | ||
181 | 0x0053f003, | ||
182 | 0xd100e7f0, | ||
183 | 0x549800fe, | ||
184 | 0x0845b600, | ||
185 | 0xb6015698, | ||
186 | 0x46fd1864, | ||
187 | 0x0047fe05, | ||
188 | 0xf00204b9, | ||
189 | 0x01f40643, | ||
190 | 0x0604fa09, | ||
191 | 0xfa060ef4, | ||
192 | 0x03f80504, | ||
193 | 0x27f100f8, | ||
194 | 0x23cf1400, | ||
195 | 0x1e3fc800, | ||
196 | 0xf4170bf4, | ||
197 | 0x21f40132, | ||
198 | 0x1e3af053, | ||
199 | 0xf00023d0, | ||
200 | 0x24d00147, | ||
201 | 0xcf00f880, | ||
202 | 0x3dc84023, | ||
203 | 0x090bf41e, | ||
204 | 0xf40131f4, | ||
205 | 0x37f05321, | ||
206 | 0x8023d002, | ||
207 | 0x37f100f8, | ||
208 | 0x32cf1900, | ||
209 | 0x0033cf40, | ||
210 | 0x07ff24e4, | ||
211 | 0xf11024b6, | ||
212 | 0xbd010057, | ||
213 | 0x5874bd64, | ||
214 | 0x57580056, | ||
215 | 0x0450b601, | ||
216 | 0xf40446b8, | ||
217 | 0x76bb4d08, | ||
218 | 0x0447b800, | ||
219 | 0xbb0f08f4, | ||
220 | 0x74b60276, | ||
221 | 0x0057bb03, | ||
222 | 0xbbdf0ef4, | ||
223 | 0x44b60246, | ||
224 | 0x0045bb03, | ||
225 | 0xfd014598, | ||
226 | 0x54b00453, | ||
227 | 0x201bf400, | ||
228 | 0x58004558, | ||
229 | 0x64b00146, | ||
230 | 0x091bf400, | ||
231 | 0xf4005380, | ||
232 | 0x32f4300e, | ||
233 | 0xf455f901, | ||
234 | 0x0ef40c01, | ||
235 | 0x0225f025, | ||
236 | 0xf10125f0, | ||
237 | 0xd0100047, | ||
238 | 0x43d00042, | ||
239 | 0x4027f040, | ||
240 | 0xcf0002d0, | ||
241 | 0x24f08002, | ||
242 | 0x0024b040, | ||
243 | 0xf1f71bf4, | ||
244 | 0xf01d0027, | ||
245 | 0x23d00137, | ||
246 | 0xf800f800, | ||
247 | 0x0027f100, | ||
248 | 0xf034bd22, | ||
249 | 0x23d00233, | ||
250 | 0xf400f800, | ||
251 | 0x01b0f030, | ||
252 | 0x0101b000, | ||
253 | 0xb00201b0, | ||
254 | 0x04980301, | ||
255 | 0x3045c71a, | ||
256 | 0xc70150b6, | ||
257 | 0x60b63446, | ||
258 | 0x3847c701, | ||
259 | 0xf40170b6, | ||
260 | 0x84bd0232, | ||
261 | 0x4ac494bd, | ||
262 | 0x0445b60f, | ||
263 | 0xa430b4bd, | ||
264 | 0x0f18f404, | ||
265 | 0xbbc0a5ff, | ||
266 | 0x31f400cb, | ||
267 | 0x220ef402, | ||
268 | 0xf00c1bf4, | ||
269 | 0xcbbb10c7, | ||
270 | 0x160ef400, | ||
271 | 0xf406a430, | ||
272 | 0xc7f00c18, | ||
273 | 0x00cbbb14, | ||
274 | 0xf1070ef4, | ||
275 | 0x380080c7, | ||
276 | 0x80b601c8, | ||
277 | 0x01b0b601, | ||
278 | 0xf404b5b8, | ||
279 | 0x90b6c308, | ||
280 | 0x0497b801, | ||
281 | 0xfdb208f4, | ||
282 | 0x06800065, | ||
283 | 0x1d08980e, | ||
284 | 0xf40068fd, | ||
285 | 0x64bd0502, | ||
286 | 0x800075fd, | ||
287 | 0x78fd1907, | ||
288 | 0x1057f100, | ||
289 | 0x0654b608, | ||
290 | 0xd00056d0, | ||
291 | 0x50b74057, | ||
292 | 0x06980800, | ||
293 | 0x0162b619, | ||
294 | 0x980864b6, | ||
295 | 0x72b60e07, | ||
296 | 0x0567fd01, | ||
297 | 0xb70056d0, | ||
298 | 0xb4010050, | ||
299 | 0x56d00060, | ||
300 | 0x0160b400, | ||
301 | 0xb44056d0, | ||
302 | 0x56d00260, | ||
303 | 0x0360b480, | ||
304 | 0xb7c056d0, | ||
305 | 0x98040050, | ||
306 | 0x56d01b06, | ||
307 | 0x1c069800, | ||
308 | 0xf44056d0, | ||
309 | 0x00f81030, | ||
310 | 0xc7075798, | ||
311 | 0x78c76879, | ||
312 | 0x0380b664, | ||
313 | 0xb06077c7, | ||
314 | 0x1bf40e76, | ||
315 | 0x0477f009, | ||
316 | 0xf00f0ef4, | ||
317 | 0x70b6027c, | ||
318 | 0x0947fd11, | ||
319 | 0x980677f0, | ||
320 | 0x5b980c5a, | ||
321 | 0x00abfd0e, | ||
322 | 0xbb01b7f0, | ||
323 | 0xb2b604b7, | ||
324 | 0xc4abff01, | ||
325 | 0x9805a7bb, | ||
326 | 0xe7f00d5d, | ||
327 | 0x04e8bb01, | ||
328 | 0xff01e2b6, | ||
329 | 0xd8bbb4de, | ||
330 | 0x01e0b605, | ||
331 | 0xbb0cef94, | ||
332 | 0xfefd02eb, | ||
333 | 0x026cf005, | ||
334 | 0x020860b7, | ||
335 | 0xd00864b6, | ||
336 | 0xb7bb006f, | ||
337 | 0x00cbbb04, | ||
338 | 0x98085f98, | ||
339 | 0xfbfd0e5b, | ||
340 | 0x01b7f000, | ||
341 | 0xb604b7bb, | ||
342 | 0xfbbb01b2, | ||
343 | 0x05f7bb00, | ||
344 | 0x5f98f0f9, | ||
345 | 0x01b7f009, | ||
346 | 0xb604b8bb, | ||
347 | 0xfbbb01b2, | ||
348 | 0x05f8bb00, | ||
349 | 0x78bbf0f9, | ||
350 | 0x0282b600, | ||
351 | 0xbb01b7f0, | ||
352 | 0xb9bb04b8, | ||
353 | 0x0b589804, | ||
354 | 0xbb01e7f0, | ||
355 | 0xe2b604e9, | ||
356 | 0xf48eff01, | ||
357 | 0xbb04f7bb, | ||
358 | 0x79bb00cf, | ||
359 | 0x0589bb00, | ||
360 | 0x90fcf0fc, | ||
361 | 0xbb00d9fd, | ||
362 | 0x89fd00ad, | ||
363 | 0x008ffd00, | ||
364 | 0xbb00a8bb, | ||
365 | 0x92b604a7, | ||
366 | 0x0497bb01, | ||
367 | 0x988069d0, | ||
368 | 0x58980557, | ||
369 | 0x00acbb04, | ||
370 | 0xb6007abb, | ||
371 | 0x84b60081, | ||
372 | 0x058bfd10, | ||
373 | 0x060062b7, | ||
374 | 0xb70067d0, | ||
375 | 0xd0040060, | ||
376 | 0x00f80068, | ||
377 | 0xb7026cf0, | ||
378 | 0xb6020260, | ||
379 | 0x57980864, | ||
380 | 0x0067d005, | ||
381 | 0x040060b7, | ||
382 | 0xb6045798, | ||
383 | 0x67d01074, | ||
384 | 0x0060b700, | ||
385 | 0x06579804, | ||
386 | 0xf80067d0, | ||
387 | 0xf900f900, | ||
388 | 0x0007f110, | ||
389 | 0x0604b608, | ||
390 | 0xf00001cf, | ||
391 | 0x1bf40114, | ||
392 | 0xfc10fcfa, | ||
393 | 0xc800f800, | ||
394 | 0x1bf40d34, | ||
395 | 0xd121f570, | ||
396 | 0x0c47f103, | ||
397 | 0x0644b608, | ||
398 | 0xb6020598, | ||
399 | 0x45d00450, | ||
400 | 0x4040d000, | ||
401 | 0xd00c57f0, | ||
402 | 0x40b78045, | ||
403 | 0x05980400, | ||
404 | 0x1054b601, | ||
405 | 0xb70045d0, | ||
406 | 0xf1050040, | ||
407 | 0xf00b0057, | ||
408 | 0x45d00153, | ||
409 | 0x4057f100, | ||
410 | 0x0154b640, | ||
411 | 0x808053f1, | ||
412 | 0xf14045d0, | ||
413 | 0xf1111057, | ||
414 | 0xd0131253, | ||
415 | 0x57f18045, | ||
416 | 0x53f11514, | ||
417 | 0x45d01716, | ||
418 | 0x0157f1c0, | ||
419 | 0x0153f026, | ||
420 | 0x080047f1, | ||
421 | 0xd00644b6, | ||
422 | 0x21f50045, | ||
423 | 0x47f103d1, | ||
424 | 0x44b6080c, | ||
425 | 0x02059806, | ||
426 | 0xd00045d0, | ||
427 | 0x57f04040, | ||
428 | 0x8045d004, | ||
429 | 0x040040b7, | ||
430 | 0xb6010598, | ||
431 | 0x45d01054, | ||
432 | 0x0040b700, | ||
433 | 0x0057f105, | ||
434 | 0x0045d003, | ||
435 | 0x111057f1, | ||
436 | 0x131253f1, | ||
437 | 0x984045d0, | ||
438 | 0x40b70305, | ||
439 | 0x45d00500, | ||
440 | 0x0157f100, | ||
441 | 0x0153f026, | ||
442 | 0x080047f1, | ||
443 | 0xd00644b6, | ||
444 | 0x00f80045, | ||
445 | 0x03d121f5, | ||
446 | 0xf4003fc8, | ||
447 | 0x21f50e0b, | ||
448 | 0x47f101af, | ||
449 | 0x0ef40200, | ||
450 | 0x1067f11e, | ||
451 | 0x0664b608, | ||
452 | 0x800177f0, | ||
453 | 0x07800e07, | ||
454 | 0x1d079819, | ||
455 | 0xd00067d0, | ||
456 | 0x44bd4067, | ||
457 | 0xbd0232f4, | ||
458 | 0x043fc854, | ||
459 | 0xf50a0bf4, | ||
460 | 0xf403a821, | ||
461 | 0x21f50a0e, | ||
462 | 0x49f0029c, | ||
463 | 0x0231f407, | ||
464 | 0xc82c57f0, | ||
465 | 0x0bf4083f, | ||
466 | 0xa821f50a, | ||
467 | 0x0a0ef403, | ||
468 | 0x029c21f5, | ||
469 | 0xf10849f0, | ||
470 | 0xb6080057, | ||
471 | 0x06980654, | ||
472 | 0x4056d01e, | ||
473 | 0xf14167f0, | ||
474 | 0xfd440063, | ||
475 | 0x54d00546, | ||
476 | 0x0c3fc800, | ||
477 | 0xf5070bf4, | ||
478 | 0xf803eb21, | ||
479 | 0x0027f100, | ||
480 | 0xf034bd22, | ||
481 | 0x23d00133, | ||
482 | 0x0000f800, | ||
483 | 0x00000000, | ||
484 | 0x00000000, | ||
485 | 0x00000000, | ||
486 | 0x00000000, | ||
487 | 0x00000000, | ||
488 | 0x00000000, | ||
489 | 0x00000000, | ||
490 | 0x00000000, | ||
491 | 0x00000000, | ||
492 | 0x00000000, | ||
493 | 0x00000000, | ||
494 | 0x00000000, | ||
495 | 0x00000000, | ||
496 | 0x00000000, | ||
497 | 0x00000000, | ||
498 | 0x00000000, | ||
499 | 0x00000000, | ||
500 | 0x00000000, | ||
501 | 0x00000000, | ||
502 | 0x00000000, | ||
503 | 0x00000000, | ||
504 | 0x00000000, | ||
505 | 0x00000000, | ||
506 | 0x00000000, | ||
507 | 0x00000000, | ||
508 | 0x00000000, | ||
509 | 0x00000000, | ||
510 | 0x00000000, | ||
511 | 0x00000000, | ||
512 | 0x00000000, | ||
513 | 0x00000000, | ||
514 | 0x00000000, | ||
515 | 0x00000000, | ||
516 | 0x00000000, | ||
517 | 0x00000000, | ||
518 | 0x00000000, | ||
519 | 0x00000000, | ||
520 | 0x00000000, | ||
521 | 0x00000000, | ||
522 | 0x00000000, | ||
523 | 0x00000000, | ||
524 | 0x00000000, | ||
525 | 0x00000000, | ||
526 | 0x00000000, | ||
527 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c new file mode 100644 index 00000000000..08e6b118f02 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_drm.h" | ||
29 | |||
30 | struct nvc0_fb_priv { | ||
31 | struct page *r100c10_page; | ||
32 | dma_addr_t r100c10; | ||
33 | }; | ||
34 | |||
35 | static void | ||
36 | nvc0_fb_destroy(struct drm_device *dev) | ||
37 | { | ||
38 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
39 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
40 | struct nvc0_fb_priv *priv = pfb->priv; | ||
41 | |||
42 | if (priv->r100c10_page) { | ||
43 | pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, | ||
44 | PCI_DMA_BIDIRECTIONAL); | ||
45 | __free_page(priv->r100c10_page); | ||
46 | } | ||
47 | |||
48 | kfree(priv); | ||
49 | pfb->priv = NULL; | ||
50 | } | ||
51 | |||
52 | static int | ||
53 | nvc0_fb_create(struct drm_device *dev) | ||
54 | { | ||
55 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
56 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
57 | struct nvc0_fb_priv *priv; | ||
58 | |||
59 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
60 | if (!priv) | ||
61 | return -ENOMEM; | ||
62 | pfb->priv = priv; | ||
63 | |||
64 | priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
65 | if (!priv->r100c10_page) { | ||
66 | nvc0_fb_destroy(dev); | ||
67 | return -ENOMEM; | ||
68 | } | ||
69 | |||
70 | priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0, | ||
71 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
72 | if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) { | ||
73 | nvc0_fb_destroy(dev); | ||
74 | return -EFAULT; | ||
75 | } | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | int | ||
81 | nvc0_fb_init(struct drm_device *dev) | ||
82 | { | ||
83 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
84 | struct nvc0_fb_priv *priv; | ||
85 | int ret; | ||
86 | |||
87 | if (!dev_priv->engine.fb.priv) { | ||
88 | ret = nvc0_fb_create(dev); | ||
89 | if (ret) | ||
90 | return ret; | ||
91 | } | ||
92 | priv = dev_priv->engine.fb.priv; | ||
93 | |||
94 | nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | void | ||
99 | nvc0_fb_takedown(struct drm_device *dev) | ||
100 | { | ||
101 | nvc0_fb_destroy(dev); | ||
102 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c new file mode 100644 index 00000000000..6f9f341c3e8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
@@ -0,0 +1,508 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_mm.h" | ||
29 | |||
30 | static void nvc0_fifo_isr(struct drm_device *); | ||
31 | |||
32 | struct nvc0_fifo_priv { | ||
33 | struct nouveau_gpuobj *playlist[2]; | ||
34 | int cur_playlist; | ||
35 | struct nouveau_vma user_vma; | ||
36 | int spoon_nr; | ||
37 | }; | ||
38 | |||
39 | struct nvc0_fifo_chan { | ||
40 | struct nouveau_gpuobj *user; | ||
41 | struct nouveau_gpuobj *ramfc; | ||
42 | }; | ||
43 | |||
44 | static void | ||
45 | nvc0_fifo_playlist_update(struct drm_device *dev) | ||
46 | { | ||
47 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
48 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
49 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
50 | struct nvc0_fifo_priv *priv = pfifo->priv; | ||
51 | struct nouveau_gpuobj *cur; | ||
52 | int i, p; | ||
53 | |||
54 | cur = priv->playlist[priv->cur_playlist]; | ||
55 | priv->cur_playlist = !priv->cur_playlist; | ||
56 | |||
57 | for (i = 0, p = 0; i < 128; i++) { | ||
58 | if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1)) | ||
59 | continue; | ||
60 | nv_wo32(cur, p + 0, i); | ||
61 | nv_wo32(cur, p + 4, 0x00000004); | ||
62 | p += 8; | ||
63 | } | ||
64 | pinstmem->flush(dev); | ||
65 | |||
66 | nv_wr32(dev, 0x002270, cur->vinst >> 12); | ||
67 | nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3)); | ||
68 | if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000)) | ||
69 | NV_ERROR(dev, "PFIFO - playlist update failed\n"); | ||
70 | } | ||
71 | |||
72 | void | ||
73 | nvc0_fifo_disable(struct drm_device *dev) | ||
74 | { | ||
75 | } | ||
76 | |||
77 | void | ||
78 | nvc0_fifo_enable(struct drm_device *dev) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | bool | ||
83 | nvc0_fifo_reassign(struct drm_device *dev, bool enable) | ||
84 | { | ||
85 | return false; | ||
86 | } | ||
87 | |||
88 | bool | ||
89 | nvc0_fifo_cache_pull(struct drm_device *dev, bool enable) | ||
90 | { | ||
91 | return false; | ||
92 | } | ||
93 | |||
94 | int | ||
95 | nvc0_fifo_channel_id(struct drm_device *dev) | ||
96 | { | ||
97 | return 127; | ||
98 | } | ||
99 | |||
100 | int | ||
101 | nvc0_fifo_create_context(struct nouveau_channel *chan) | ||
102 | { | ||
103 | struct drm_device *dev = chan->dev; | ||
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
105 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
106 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
107 | struct nvc0_fifo_priv *priv = pfifo->priv; | ||
108 | struct nvc0_fifo_chan *fifoch; | ||
109 | u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; | ||
110 | int ret; | ||
111 | |||
112 | chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); | ||
113 | if (!chan->fifo_priv) | ||
114 | return -ENOMEM; | ||
115 | fifoch = chan->fifo_priv; | ||
116 | |||
117 | /* allocate vram for control regs, map into polling area */ | ||
118 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, | ||
119 | NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user); | ||
120 | if (ret) | ||
121 | goto error; | ||
122 | |||
123 | nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, | ||
124 | *(struct nouveau_mem **)fifoch->user->node); | ||
125 | |||
126 | chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + | ||
127 | priv->user_vma.offset + (chan->id * 0x1000), | ||
128 | PAGE_SIZE); | ||
129 | if (!chan->user) { | ||
130 | ret = -ENOMEM; | ||
131 | goto error; | ||
132 | } | ||
133 | |||
134 | /* ramfc */ | ||
135 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, | ||
136 | chan->ramin->vinst, 0x100, | ||
137 | NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc); | ||
138 | if (ret) | ||
139 | goto error; | ||
140 | |||
141 | nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); | ||
142 | nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); | ||
143 | nv_wo32(fifoch->ramfc, 0x10, 0x0000face); | ||
144 | nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); | ||
145 | nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); | ||
146 | nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | | ||
147 | upper_32_bits(ib_virt)); | ||
148 | nv_wo32(fifoch->ramfc, 0x54, 0x00000002); | ||
149 | nv_wo32(fifoch->ramfc, 0x84, 0x20400000); | ||
150 | nv_wo32(fifoch->ramfc, 0x94, 0x30000001); | ||
151 | nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); | ||
152 | nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); | ||
153 | nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); | ||
154 | nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); | ||
155 | nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); | ||
156 | nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ | ||
157 | nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ | ||
158 | pinstmem->flush(dev); | ||
159 | |||
160 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | | ||
161 | (chan->ramin->vinst >> 12)); | ||
162 | nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); | ||
163 | nvc0_fifo_playlist_update(dev); | ||
164 | return 0; | ||
165 | |||
166 | error: | ||
167 | pfifo->destroy_context(chan); | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | void | ||
172 | nvc0_fifo_destroy_context(struct nouveau_channel *chan) | ||
173 | { | ||
174 | struct drm_device *dev = chan->dev; | ||
175 | struct nvc0_fifo_chan *fifoch; | ||
176 | |||
177 | nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); | ||
178 | nv_wr32(dev, 0x002634, chan->id); | ||
179 | if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) | ||
180 | NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); | ||
181 | |||
182 | nvc0_fifo_playlist_update(dev); | ||
183 | |||
184 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); | ||
185 | |||
186 | if (chan->user) { | ||
187 | iounmap(chan->user); | ||
188 | chan->user = NULL; | ||
189 | } | ||
190 | |||
191 | fifoch = chan->fifo_priv; | ||
192 | chan->fifo_priv = NULL; | ||
193 | if (!fifoch) | ||
194 | return; | ||
195 | |||
196 | nouveau_gpuobj_ref(NULL, &fifoch->ramfc); | ||
197 | nouveau_gpuobj_ref(NULL, &fifoch->user); | ||
198 | kfree(fifoch); | ||
199 | } | ||
200 | |||
201 | int | ||
202 | nvc0_fifo_load_context(struct nouveau_channel *chan) | ||
203 | { | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | int | ||
208 | nvc0_fifo_unload_context(struct drm_device *dev) | ||
209 | { | ||
210 | int i; | ||
211 | |||
212 | for (i = 0; i < 128; i++) { | ||
213 | if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) | ||
214 | continue; | ||
215 | |||
216 | nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); | ||
217 | nv_wr32(dev, 0x002634, i); | ||
218 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | ||
219 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | ||
220 | i, nv_rd32(dev, 0x002634)); | ||
221 | return -EBUSY; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static void | ||
229 | nvc0_fifo_destroy(struct drm_device *dev) | ||
230 | { | ||
231 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
232 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
233 | struct nvc0_fifo_priv *priv; | ||
234 | |||
235 | priv = pfifo->priv; | ||
236 | if (!priv) | ||
237 | return; | ||
238 | |||
239 | nouveau_vm_put(&priv->user_vma); | ||
240 | nouveau_gpuobj_ref(NULL, &priv->playlist[1]); | ||
241 | nouveau_gpuobj_ref(NULL, &priv->playlist[0]); | ||
242 | kfree(priv); | ||
243 | } | ||
244 | |||
245 | void | ||
246 | nvc0_fifo_takedown(struct drm_device *dev) | ||
247 | { | ||
248 | nv_wr32(dev, 0x002140, 0x00000000); | ||
249 | nvc0_fifo_destroy(dev); | ||
250 | } | ||
251 | |||
252 | static int | ||
253 | nvc0_fifo_create(struct drm_device *dev) | ||
254 | { | ||
255 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
256 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
257 | struct nvc0_fifo_priv *priv; | ||
258 | int ret; | ||
259 | |||
260 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
261 | if (!priv) | ||
262 | return -ENOMEM; | ||
263 | pfifo->priv = priv; | ||
264 | |||
265 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, | ||
266 | &priv->playlist[0]); | ||
267 | if (ret) | ||
268 | goto error; | ||
269 | |||
270 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, | ||
271 | &priv->playlist[1]); | ||
272 | if (ret) | ||
273 | goto error; | ||
274 | |||
275 | ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000, | ||
276 | 12, NV_MEM_ACCESS_RW, &priv->user_vma); | ||
277 | if (ret) | ||
278 | goto error; | ||
279 | |||
280 | nouveau_irq_register(dev, 8, nvc0_fifo_isr); | ||
281 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
282 | return 0; | ||
283 | |||
284 | error: | ||
285 | nvc0_fifo_destroy(dev); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | int | ||
290 | nvc0_fifo_init(struct drm_device *dev) | ||
291 | { | ||
292 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
293 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
294 | struct nouveau_channel *chan; | ||
295 | struct nvc0_fifo_priv *priv; | ||
296 | int ret, i; | ||
297 | |||
298 | if (!pfifo->priv) { | ||
299 | ret = nvc0_fifo_create(dev); | ||
300 | if (ret) | ||
301 | return ret; | ||
302 | } | ||
303 | priv = pfifo->priv; | ||
304 | |||
305 | /* reset PFIFO, enable all available PSUBFIFO areas */ | ||
306 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | ||
307 | nv_mask(dev, 0x000200, 0x00000100, 0x00000100); | ||
308 | nv_wr32(dev, 0x000204, 0xffffffff); | ||
309 | nv_wr32(dev, 0x002204, 0xffffffff); | ||
310 | |||
311 | priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204)); | ||
312 | NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr); | ||
313 | |||
314 | /* assign engines to subfifos */ | ||
315 | if (priv->spoon_nr >= 3) { | ||
316 | nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */ | ||
317 | nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */ | ||
318 | nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */ | ||
319 | nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */ | ||
320 | nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */ | ||
321 | nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */ | ||
322 | } | ||
323 | |||
324 | /* PSUBFIFO[n] */ | ||
325 | for (i = 0; i < 3; i++) { | ||
326 | nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); | ||
327 | nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ | ||
328 | nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ | ||
329 | } | ||
330 | |||
331 | nv_mask(dev, 0x002200, 0x00000001, 0x00000001); | ||
332 | nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12); | ||
333 | |||
334 | nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ | ||
335 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
336 | nv_wr32(dev, 0x002140, 0xbfffffff); | ||
337 | |||
338 | /* restore PFIFO context table */ | ||
339 | for (i = 0; i < 128; i++) { | ||
340 | chan = dev_priv->channels.ptr[i]; | ||
341 | if (!chan || !chan->fifo_priv) | ||
342 | continue; | ||
343 | |||
344 | nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | | ||
345 | (chan->ramin->vinst >> 12)); | ||
346 | nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001); | ||
347 | } | ||
348 | nvc0_fifo_playlist_update(dev); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | struct nouveau_enum nvc0_fifo_fault_unit[] = { | ||
354 | { 0x00, "PGRAPH" }, | ||
355 | { 0x03, "PEEPHOLE" }, | ||
356 | { 0x04, "BAR1" }, | ||
357 | { 0x05, "BAR3" }, | ||
358 | { 0x07, "PFIFO" }, | ||
359 | { 0x10, "PBSP" }, | ||
360 | { 0x11, "PPPP" }, | ||
361 | { 0x13, "PCOUNTER" }, | ||
362 | { 0x14, "PVP" }, | ||
363 | { 0x15, "PCOPY0" }, | ||
364 | { 0x16, "PCOPY1" }, | ||
365 | { 0x17, "PDAEMON" }, | ||
366 | {} | ||
367 | }; | ||
368 | |||
369 | struct nouveau_enum nvc0_fifo_fault_reason[] = { | ||
370 | { 0x00, "PT_NOT_PRESENT" }, | ||
371 | { 0x01, "PT_TOO_SHORT" }, | ||
372 | { 0x02, "PAGE_NOT_PRESENT" }, | ||
373 | { 0x03, "VM_LIMIT_EXCEEDED" }, | ||
374 | { 0x04, "NO_CHANNEL" }, | ||
375 | { 0x05, "PAGE_SYSTEM_ONLY" }, | ||
376 | { 0x06, "PAGE_READ_ONLY" }, | ||
377 | { 0x0a, "COMPRESSED_SYSRAM" }, | ||
378 | { 0x0c, "INVALID_STORAGE_TYPE" }, | ||
379 | {} | ||
380 | }; | ||
381 | |||
382 | struct nouveau_enum nvc0_fifo_fault_hubclient[] = { | ||
383 | { 0x01, "PCOPY0" }, | ||
384 | { 0x02, "PCOPY1" }, | ||
385 | { 0x04, "DISPATCH" }, | ||
386 | { 0x05, "CTXCTL" }, | ||
387 | { 0x06, "PFIFO" }, | ||
388 | { 0x07, "BAR_READ" }, | ||
389 | { 0x08, "BAR_WRITE" }, | ||
390 | { 0x0b, "PVP" }, | ||
391 | { 0x0c, "PPPP" }, | ||
392 | { 0x0d, "PBSP" }, | ||
393 | { 0x11, "PCOUNTER" }, | ||
394 | { 0x12, "PDAEMON" }, | ||
395 | { 0x14, "CCACHE" }, | ||
396 | { 0x15, "CCACHE_POST" }, | ||
397 | {} | ||
398 | }; | ||
399 | |||
400 | struct nouveau_enum nvc0_fifo_fault_gpcclient[] = { | ||
401 | { 0x01, "TEX" }, | ||
402 | { 0x0c, "ESETUP" }, | ||
403 | { 0x0e, "CTXCTL" }, | ||
404 | { 0x0f, "PROP" }, | ||
405 | {} | ||
406 | }; | ||
407 | |||
408 | struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = { | ||
409 | /* { 0x00008000, "" } seen with null ib push */ | ||
410 | { 0x00200000, "ILLEGAL_MTHD" }, | ||
411 | { 0x00800000, "EMPTY_SUBC" }, | ||
412 | {} | ||
413 | }; | ||
414 | |||
415 | static void | ||
416 | nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit) | ||
417 | { | ||
418 | u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10)); | ||
419 | u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); | ||
420 | u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); | ||
421 | u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); | ||
422 | u32 client = (stat & 0x00001f00) >> 8; | ||
423 | |||
424 | NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", | ||
425 | (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); | ||
426 | nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); | ||
427 | printk("] from "); | ||
428 | nouveau_enum_print(nvc0_fifo_fault_unit, unit); | ||
429 | if (stat & 0x00000040) { | ||
430 | printk("/"); | ||
431 | nouveau_enum_print(nvc0_fifo_fault_hubclient, client); | ||
432 | } else { | ||
433 | printk("/GPC%d/", (stat & 0x1f000000) >> 24); | ||
434 | nouveau_enum_print(nvc0_fifo_fault_gpcclient, client); | ||
435 | } | ||
436 | printk(" on channel 0x%010llx\n", (u64)inst << 12); | ||
437 | } | ||
438 | |||
439 | static void | ||
440 | nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) | ||
441 | { | ||
442 | u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000)); | ||
443 | u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000)); | ||
444 | u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000)); | ||
445 | u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; | ||
446 | u32 subc = (addr & 0x00070000); | ||
447 | u32 mthd = (addr & 0x00003ffc); | ||
448 | |||
449 | NV_INFO(dev, "PSUBFIFO %d:", unit); | ||
450 | nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat); | ||
451 | NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", | ||
452 | unit, chid, subc, mthd, data); | ||
453 | |||
454 | nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); | ||
455 | nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); | ||
456 | } | ||
457 | |||
458 | static void | ||
459 | nvc0_fifo_isr(struct drm_device *dev) | ||
460 | { | ||
461 | u32 stat = nv_rd32(dev, 0x002100); | ||
462 | |||
463 | if (stat & 0x00000100) { | ||
464 | NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); | ||
465 | nv_wr32(dev, 0x002100, 0x00000100); | ||
466 | stat &= ~0x00000100; | ||
467 | } | ||
468 | |||
469 | if (stat & 0x10000000) { | ||
470 | u32 units = nv_rd32(dev, 0x00259c); | ||
471 | u32 u = units; | ||
472 | |||
473 | while (u) { | ||
474 | int i = ffs(u) - 1; | ||
475 | nvc0_fifo_isr_vm_fault(dev, i); | ||
476 | u &= ~(1 << i); | ||
477 | } | ||
478 | |||
479 | nv_wr32(dev, 0x00259c, units); | ||
480 | stat &= ~0x10000000; | ||
481 | } | ||
482 | |||
483 | if (stat & 0x20000000) { | ||
484 | u32 units = nv_rd32(dev, 0x0025a0); | ||
485 | u32 u = units; | ||
486 | |||
487 | while (u) { | ||
488 | int i = ffs(u) - 1; | ||
489 | nvc0_fifo_isr_subfifo_intr(dev, i); | ||
490 | u &= ~(1 << i); | ||
491 | } | ||
492 | |||
493 | nv_wr32(dev, 0x0025a0, units); | ||
494 | stat &= ~0x20000000; | ||
495 | } | ||
496 | |||
497 | if (stat & 0x40000000) { | ||
498 | NV_INFO(dev, "PFIFO: unknown status 0x40000000\n"); | ||
499 | nv_mask(dev, 0x002a00, 0x00000000, 0x00000000); | ||
500 | stat &= ~0x40000000; | ||
501 | } | ||
502 | |||
503 | if (stat) { | ||
504 | NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat); | ||
505 | nv_wr32(dev, 0x002100, stat); | ||
506 | nv_wr32(dev, 0x002140, 0); | ||
507 | } | ||
508 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c new file mode 100644 index 00000000000..5b2f6f42046 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -0,0 +1,908 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | |||
27 | #include "drmP.h" | ||
28 | |||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_mm.h" | ||
31 | |||
32 | #include "nvc0_graph.h" | ||
33 | #include "nvc0_grhub.fuc.h" | ||
34 | #include "nvc0_grgpc.fuc.h" | ||
35 | |||
36 | static void | ||
37 | nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base) | ||
38 | { | ||
39 | NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base, | ||
40 | nv_rd32(dev, base + 0x400)); | ||
41 | NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, | ||
42 | nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804), | ||
43 | nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c)); | ||
44 | NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, | ||
45 | nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814), | ||
46 | nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c)); | ||
47 | } | ||
48 | |||
49 | static void | ||
50 | nvc0_graph_ctxctl_debug(struct drm_device *dev) | ||
51 | { | ||
52 | u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff; | ||
53 | u32 gpc; | ||
54 | |||
55 | nvc0_graph_ctxctl_debug_unit(dev, 0x409000); | ||
56 | for (gpc = 0; gpc < gpcnr; gpc++) | ||
57 | nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000)); | ||
58 | } | ||
59 | |||
60 | static int | ||
61 | nvc0_graph_load_context(struct nouveau_channel *chan) | ||
62 | { | ||
63 | struct drm_device *dev = chan->dev; | ||
64 | |||
65 | nv_wr32(dev, 0x409840, 0x00000030); | ||
66 | nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); | ||
67 | nv_wr32(dev, 0x409504, 0x00000003); | ||
68 | if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) | ||
69 | NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static int | ||
75 | nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) | ||
76 | { | ||
77 | nv_wr32(dev, 0x409840, 0x00000003); | ||
78 | nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12); | ||
79 | nv_wr32(dev, 0x409504, 0x00000009); | ||
80 | if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) { | ||
81 | NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n"); | ||
82 | return -EBUSY; | ||
83 | } | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static int | ||
89 | nvc0_graph_construct_context(struct nouveau_channel *chan) | ||
90 | { | ||
91 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
92 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); | ||
93 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | ||
94 | struct drm_device *dev = chan->dev; | ||
95 | int ret, i; | ||
96 | u32 *ctx; | ||
97 | |||
98 | ctx = kmalloc(priv->grctx_size, GFP_KERNEL); | ||
99 | if (!ctx) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | if (!nouveau_ctxfw) { | ||
103 | nv_wr32(dev, 0x409840, 0x80000000); | ||
104 | nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); | ||
105 | nv_wr32(dev, 0x409504, 0x00000001); | ||
106 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { | ||
107 | NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); | ||
108 | nvc0_graph_ctxctl_debug(dev); | ||
109 | ret = -EBUSY; | ||
110 | goto err; | ||
111 | } | ||
112 | } else { | ||
113 | nvc0_graph_load_context(chan); | ||
114 | |||
115 | nv_wo32(grch->grctx, 0x1c, 1); | ||
116 | nv_wo32(grch->grctx, 0x20, 0); | ||
117 | nv_wo32(grch->grctx, 0x28, 0); | ||
118 | nv_wo32(grch->grctx, 0x2c, 0); | ||
119 | dev_priv->engine.instmem.flush(dev); | ||
120 | } | ||
121 | |||
122 | ret = nvc0_grctx_generate(chan); | ||
123 | if (ret) | ||
124 | goto err; | ||
125 | |||
126 | if (!nouveau_ctxfw) { | ||
127 | nv_wr32(dev, 0x409840, 0x80000000); | ||
128 | nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); | ||
129 | nv_wr32(dev, 0x409504, 0x00000002); | ||
130 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { | ||
131 | NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n"); | ||
132 | nvc0_graph_ctxctl_debug(dev); | ||
133 | ret = -EBUSY; | ||
134 | goto err; | ||
135 | } | ||
136 | } else { | ||
137 | ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); | ||
138 | if (ret) | ||
139 | goto err; | ||
140 | } | ||
141 | |||
142 | for (i = 0; i < priv->grctx_size; i += 4) | ||
143 | ctx[i / 4] = nv_ro32(grch->grctx, i); | ||
144 | |||
145 | priv->grctx_vals = ctx; | ||
146 | return 0; | ||
147 | |||
148 | err: | ||
149 | kfree(ctx); | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | static int | ||
154 | nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | ||
155 | { | ||
156 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); | ||
157 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | ||
158 | struct drm_device *dev = chan->dev; | ||
159 | int i = 0, gpc, tp, ret; | ||
160 | u32 magic; | ||
161 | |||
162 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, | ||
163 | &grch->unk408004); | ||
164 | if (ret) | ||
165 | return ret; | ||
166 | |||
167 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM, | ||
168 | &grch->unk40800c); | ||
169 | if (ret) | ||
170 | return ret; | ||
171 | |||
172 | ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096, | ||
173 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, | ||
174 | &grch->unk418810); | ||
175 | if (ret) | ||
176 | return ret; | ||
177 | |||
178 | ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM, | ||
179 | &grch->mmio); | ||
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | |||
184 | nv_wo32(grch->mmio, i++ * 4, 0x00408004); | ||
185 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); | ||
186 | nv_wo32(grch->mmio, i++ * 4, 0x00408008); | ||
187 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | ||
188 | |||
189 | nv_wo32(grch->mmio, i++ * 4, 0x0040800c); | ||
190 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); | ||
191 | nv_wo32(grch->mmio, i++ * 4, 0x00408010); | ||
192 | nv_wo32(grch->mmio, i++ * 4, 0x80000000); | ||
193 | |||
194 | nv_wo32(grch->mmio, i++ * 4, 0x00418810); | ||
195 | nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12); | ||
196 | nv_wo32(grch->mmio, i++ * 4, 0x00419848); | ||
197 | nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12); | ||
198 | |||
199 | nv_wo32(grch->mmio, i++ * 4, 0x00419004); | ||
200 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); | ||
201 | nv_wo32(grch->mmio, i++ * 4, 0x00419008); | ||
202 | nv_wo32(grch->mmio, i++ * 4, 0x00000000); | ||
203 | |||
204 | nv_wo32(grch->mmio, i++ * 4, 0x00418808); | ||
205 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); | ||
206 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); | ||
207 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | ||
208 | |||
209 | magic = 0x02180000; | ||
210 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | ||
211 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
212 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
213 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { | ||
214 | u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); | ||
215 | nv_wo32(grch->mmio, i++ * 4, reg); | ||
216 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | grch->mmio_nr = i / 2; | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int | ||
225 | nvc0_graph_context_new(struct nouveau_channel *chan, int engine) | ||
226 | { | ||
227 | struct drm_device *dev = chan->dev; | ||
228 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
229 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
230 | struct nvc0_graph_priv *priv = nv_engine(dev, engine); | ||
231 | struct nvc0_graph_chan *grch; | ||
232 | struct nouveau_gpuobj *grctx; | ||
233 | int ret, i; | ||
234 | |||
235 | grch = kzalloc(sizeof(*grch), GFP_KERNEL); | ||
236 | if (!grch) | ||
237 | return -ENOMEM; | ||
238 | chan->engctx[NVOBJ_ENGINE_GR] = grch; | ||
239 | |||
240 | ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256, | ||
241 | NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, | ||
242 | &grch->grctx); | ||
243 | if (ret) | ||
244 | goto error; | ||
245 | grctx = grch->grctx; | ||
246 | |||
247 | ret = nvc0_graph_create_context_mmio_list(chan); | ||
248 | if (ret) | ||
249 | goto error; | ||
250 | |||
251 | nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4); | ||
252 | nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst)); | ||
253 | pinstmem->flush(dev); | ||
254 | |||
255 | if (!priv->grctx_vals) { | ||
256 | ret = nvc0_graph_construct_context(chan); | ||
257 | if (ret) | ||
258 | goto error; | ||
259 | } | ||
260 | |||
261 | for (i = 0; i < priv->grctx_size; i += 4) | ||
262 | nv_wo32(grctx, i, priv->grctx_vals[i / 4]); | ||
263 | |||
264 | if (!nouveau_ctxfw) { | ||
265 | nv_wo32(grctx, 0x00, grch->mmio_nr); | ||
266 | nv_wo32(grctx, 0x04, grch->mmio->linst >> 8); | ||
267 | } else { | ||
268 | nv_wo32(grctx, 0xf4, 0); | ||
269 | nv_wo32(grctx, 0xf8, 0); | ||
270 | nv_wo32(grctx, 0x10, grch->mmio_nr); | ||
271 | nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst)); | ||
272 | nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst)); | ||
273 | nv_wo32(grctx, 0x1c, 1); | ||
274 | nv_wo32(grctx, 0x20, 0); | ||
275 | nv_wo32(grctx, 0x28, 0); | ||
276 | nv_wo32(grctx, 0x2c, 0); | ||
277 | } | ||
278 | pinstmem->flush(dev); | ||
279 | return 0; | ||
280 | |||
281 | error: | ||
282 | priv->base.context_del(chan, engine); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static void | ||
287 | nvc0_graph_context_del(struct nouveau_channel *chan, int engine) | ||
288 | { | ||
289 | struct nvc0_graph_chan *grch = chan->engctx[engine]; | ||
290 | |||
291 | nouveau_gpuobj_ref(NULL, &grch->mmio); | ||
292 | nouveau_gpuobj_ref(NULL, &grch->unk418810); | ||
293 | nouveau_gpuobj_ref(NULL, &grch->unk40800c); | ||
294 | nouveau_gpuobj_ref(NULL, &grch->unk408004); | ||
295 | nouveau_gpuobj_ref(NULL, &grch->grctx); | ||
296 | chan->engctx[engine] = NULL; | ||
297 | } | ||
298 | |||
299 | static int | ||
300 | nvc0_graph_object_new(struct nouveau_channel *chan, int engine, | ||
301 | u32 handle, u16 class) | ||
302 | { | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int | ||
307 | nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend) | ||
308 | { | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int | ||
313 | nvc0_graph_mthd_page_flip(struct nouveau_channel *chan, | ||
314 | u32 class, u32 mthd, u32 data) | ||
315 | { | ||
316 | nouveau_finish_page_flip(chan, NULL); | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | static void | ||
321 | nvc0_graph_init_obj418880(struct drm_device *dev) | ||
322 | { | ||
323 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
324 | int i; | ||
325 | |||
326 | nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); | ||
327 | nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); | ||
328 | for (i = 0; i < 4; i++) | ||
329 | nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); | ||
330 | nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8); | ||
331 | nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8); | ||
332 | } | ||
333 | |||
334 | static void | ||
335 | nvc0_graph_init_regs(struct drm_device *dev) | ||
336 | { | ||
337 | nv_wr32(dev, 0x400080, 0x003083c2); | ||
338 | nv_wr32(dev, 0x400088, 0x00006fe7); | ||
339 | nv_wr32(dev, 0x40008c, 0x00000000); | ||
340 | nv_wr32(dev, 0x400090, 0x00000030); | ||
341 | nv_wr32(dev, 0x40013c, 0x013901f7); | ||
342 | nv_wr32(dev, 0x400140, 0x00000100); | ||
343 | nv_wr32(dev, 0x400144, 0x00000000); | ||
344 | nv_wr32(dev, 0x400148, 0x00000110); | ||
345 | nv_wr32(dev, 0x400138, 0x00000000); | ||
346 | nv_wr32(dev, 0x400130, 0x00000000); | ||
347 | nv_wr32(dev, 0x400134, 0x00000000); | ||
348 | nv_wr32(dev, 0x400124, 0x00000002); | ||
349 | } | ||
350 | |||
351 | static void | ||
352 | nvc0_graph_init_gpc_0(struct drm_device *dev) | ||
353 | { | ||
354 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
355 | const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total); | ||
356 | u32 data[TP_MAX / 8]; | ||
357 | u8 tpnr[GPC_MAX]; | ||
358 | int i, gpc, tpc; | ||
359 | |||
360 | /* | ||
361 | * TP ROP UNKVAL(magic_not_rop_nr) | ||
362 | * 450: 4/0/0/0 2 3 | ||
363 | * 460: 3/4/0/0 4 1 | ||
364 | * 465: 3/4/4/0 4 7 | ||
365 | * 470: 3/3/4/4 5 5 | ||
366 | * 480: 3/4/4/4 6 6 | ||
367 | */ | ||
368 | |||
369 | memset(data, 0x00, sizeof(data)); | ||
370 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | ||
371 | for (i = 0, gpc = -1; i < priv->tp_total; i++) { | ||
372 | do { | ||
373 | gpc = (gpc + 1) % priv->gpc_nr; | ||
374 | } while (!tpnr[gpc]); | ||
375 | tpc = priv->tp_nr[gpc] - tpnr[gpc]--; | ||
376 | |||
377 | data[i / 8] |= tpc << ((i % 8) * 4); | ||
378 | } | ||
379 | |||
380 | nv_wr32(dev, GPC_BCAST(0x0980), data[0]); | ||
381 | nv_wr32(dev, GPC_BCAST(0x0984), data[1]); | ||
382 | nv_wr32(dev, GPC_BCAST(0x0988), data[2]); | ||
383 | nv_wr32(dev, GPC_BCAST(0x098c), data[3]); | ||
384 | |||
385 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
386 | nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | | ||
387 | priv->tp_nr[gpc]); | ||
388 | nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); | ||
389 | nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918); | ||
390 | } | ||
391 | |||
392 | nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); | ||
393 | nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | nvc0_graph_init_units(struct drm_device *dev) | ||
398 | { | ||
399 | nv_wr32(dev, 0x409c24, 0x000f0000); | ||
400 | nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */ | ||
401 | nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */ | ||
402 | nv_wr32(dev, 0x408030, 0xc0000000); | ||
403 | nv_wr32(dev, 0x40601c, 0xc0000000); | ||
404 | nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */ | ||
405 | nv_wr32(dev, 0x406018, 0xc0000000); | ||
406 | nv_wr32(dev, 0x405840, 0xc0000000); | ||
407 | nv_wr32(dev, 0x405844, 0x00ffffff); | ||
408 | nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008); | ||
409 | nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000); | ||
410 | } | ||
411 | |||
412 | static void | ||
413 | nvc0_graph_init_gpc_1(struct drm_device *dev) | ||
414 | { | ||
415 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
416 | int gpc, tp; | ||
417 | |||
418 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
419 | nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000); | ||
420 | nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000); | ||
421 | nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000); | ||
422 | nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000); | ||
423 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { | ||
424 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff); | ||
425 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff); | ||
426 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); | ||
427 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); | ||
428 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); | ||
429 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe); | ||
430 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f); | ||
431 | } | ||
432 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); | ||
433 | nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); | ||
434 | } | ||
435 | } | ||
436 | |||
437 | static void | ||
438 | nvc0_graph_init_rop(struct drm_device *dev) | ||
439 | { | ||
440 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
441 | int rop; | ||
442 | |||
443 | for (rop = 0; rop < priv->rop_nr; rop++) { | ||
444 | nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000); | ||
445 | nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000); | ||
446 | nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff); | ||
447 | nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff); | ||
448 | } | ||
449 | } | ||
450 | |||
451 | static void | ||
452 | nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, | ||
453 | struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data) | ||
454 | { | ||
455 | int i; | ||
456 | |||
457 | nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); | ||
458 | for (i = 0; i < data->size / 4; i++) | ||
459 | nv_wr32(dev, fuc_base + 0x01c4, data->data[i]); | ||
460 | |||
461 | nv_wr32(dev, fuc_base + 0x0180, 0x01000000); | ||
462 | for (i = 0; i < code->size / 4; i++) { | ||
463 | if ((i & 0x3f) == 0) | ||
464 | nv_wr32(dev, fuc_base + 0x0188, i >> 6); | ||
465 | nv_wr32(dev, fuc_base + 0x0184, code->data[i]); | ||
466 | } | ||
467 | } | ||
468 | |||
469 | static int | ||
470 | nvc0_graph_init_ctxctl(struct drm_device *dev) | ||
471 | { | ||
472 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
473 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | ||
474 | u32 r000260; | ||
475 | int i; | ||
476 | |||
477 | if (!nouveau_ctxfw) { | ||
478 | /* load HUB microcode */ | ||
479 | r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); | ||
480 | nv_wr32(dev, 0x4091c0, 0x01000000); | ||
481 | for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++) | ||
482 | nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]); | ||
483 | |||
484 | nv_wr32(dev, 0x409180, 0x01000000); | ||
485 | for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) { | ||
486 | if ((i & 0x3f) == 0) | ||
487 | nv_wr32(dev, 0x409188, i >> 6); | ||
488 | nv_wr32(dev, 0x409184, nvc0_grhub_code[i]); | ||
489 | } | ||
490 | |||
491 | /* load GPC microcode */ | ||
492 | nv_wr32(dev, 0x41a1c0, 0x01000000); | ||
493 | for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++) | ||
494 | nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]); | ||
495 | |||
496 | nv_wr32(dev, 0x41a180, 0x01000000); | ||
497 | for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) { | ||
498 | if ((i & 0x3f) == 0) | ||
499 | nv_wr32(dev, 0x41a188, i >> 6); | ||
500 | nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]); | ||
501 | } | ||
502 | nv_wr32(dev, 0x000260, r000260); | ||
503 | |||
504 | /* start HUB ucode running, it'll init the GPCs */ | ||
505 | nv_wr32(dev, 0x409800, dev_priv->chipset); | ||
506 | nv_wr32(dev, 0x40910c, 0x00000000); | ||
507 | nv_wr32(dev, 0x409100, 0x00000002); | ||
508 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { | ||
509 | NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n"); | ||
510 | nvc0_graph_ctxctl_debug(dev); | ||
511 | return -EBUSY; | ||
512 | } | ||
513 | |||
514 | priv->grctx_size = nv_rd32(dev, 0x409804); | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /* load fuc microcode */ | ||
519 | r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); | ||
520 | nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d); | ||
521 | nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad); | ||
522 | nv_wr32(dev, 0x000260, r000260); | ||
523 | |||
524 | /* start both of them running */ | ||
525 | nv_wr32(dev, 0x409840, 0xffffffff); | ||
526 | nv_wr32(dev, 0x41a10c, 0x00000000); | ||
527 | nv_wr32(dev, 0x40910c, 0x00000000); | ||
528 | nv_wr32(dev, 0x41a100, 0x00000002); | ||
529 | nv_wr32(dev, 0x409100, 0x00000002); | ||
530 | if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001)) | ||
531 | NV_INFO(dev, "0x409800 wait failed\n"); | ||
532 | |||
533 | nv_wr32(dev, 0x409840, 0xffffffff); | ||
534 | nv_wr32(dev, 0x409500, 0x7fffffff); | ||
535 | nv_wr32(dev, 0x409504, 0x00000021); | ||
536 | |||
537 | nv_wr32(dev, 0x409840, 0xffffffff); | ||
538 | nv_wr32(dev, 0x409500, 0x00000000); | ||
539 | nv_wr32(dev, 0x409504, 0x00000010); | ||
540 | if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { | ||
541 | NV_ERROR(dev, "fuc09 req 0x10 timeout\n"); | ||
542 | return -EBUSY; | ||
543 | } | ||
544 | priv->grctx_size = nv_rd32(dev, 0x409800); | ||
545 | |||
546 | nv_wr32(dev, 0x409840, 0xffffffff); | ||
547 | nv_wr32(dev, 0x409500, 0x00000000); | ||
548 | nv_wr32(dev, 0x409504, 0x00000016); | ||
549 | if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { | ||
550 | NV_ERROR(dev, "fuc09 req 0x16 timeout\n"); | ||
551 | return -EBUSY; | ||
552 | } | ||
553 | |||
554 | nv_wr32(dev, 0x409840, 0xffffffff); | ||
555 | nv_wr32(dev, 0x409500, 0x00000000); | ||
556 | nv_wr32(dev, 0x409504, 0x00000025); | ||
557 | if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { | ||
558 | NV_ERROR(dev, "fuc09 req 0x25 timeout\n"); | ||
559 | return -EBUSY; | ||
560 | } | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int | ||
566 | nvc0_graph_init(struct drm_device *dev, int engine) | ||
567 | { | ||
568 | int ret; | ||
569 | |||
570 | nv_mask(dev, 0x000200, 0x18001000, 0x00000000); | ||
571 | nv_mask(dev, 0x000200, 0x18001000, 0x18001000); | ||
572 | |||
573 | nvc0_graph_init_obj418880(dev); | ||
574 | nvc0_graph_init_regs(dev); | ||
575 | /*nvc0_graph_init_unitplemented_magics(dev);*/ | ||
576 | nvc0_graph_init_gpc_0(dev); | ||
577 | /*nvc0_graph_init_unitplemented_c242(dev);*/ | ||
578 | |||
579 | nv_wr32(dev, 0x400500, 0x00010001); | ||
580 | nv_wr32(dev, 0x400100, 0xffffffff); | ||
581 | nv_wr32(dev, 0x40013c, 0xffffffff); | ||
582 | |||
583 | nvc0_graph_init_units(dev); | ||
584 | nvc0_graph_init_gpc_1(dev); | ||
585 | nvc0_graph_init_rop(dev); | ||
586 | |||
587 | nv_wr32(dev, 0x400108, 0xffffffff); | ||
588 | nv_wr32(dev, 0x400138, 0xffffffff); | ||
589 | nv_wr32(dev, 0x400118, 0xffffffff); | ||
590 | nv_wr32(dev, 0x400130, 0xffffffff); | ||
591 | nv_wr32(dev, 0x40011c, 0xffffffff); | ||
592 | nv_wr32(dev, 0x400134, 0xffffffff); | ||
593 | nv_wr32(dev, 0x400054, 0x34ce3464); | ||
594 | |||
595 | ret = nvc0_graph_init_ctxctl(dev); | ||
596 | if (ret) | ||
597 | return ret; | ||
598 | |||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | int | ||
603 | nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) | ||
604 | { | ||
605 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
606 | struct nouveau_channel *chan; | ||
607 | unsigned long flags; | ||
608 | int i; | ||
609 | |||
610 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
611 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
612 | chan = dev_priv->channels.ptr[i]; | ||
613 | if (!chan || !chan->ramin) | ||
614 | continue; | ||
615 | |||
616 | if (inst == chan->ramin->vinst) | ||
617 | break; | ||
618 | } | ||
619 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
620 | return i; | ||
621 | } | ||
622 | |||
623 | static void | ||
624 | nvc0_graph_ctxctl_isr(struct drm_device *dev) | ||
625 | { | ||
626 | u32 ustat = nv_rd32(dev, 0x409c18); | ||
627 | |||
628 | if (ustat & 0x00000001) | ||
629 | NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n"); | ||
630 | if (ustat & 0x00080000) | ||
631 | NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n"); | ||
632 | if (ustat & ~0x00080001) | ||
633 | NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat); | ||
634 | |||
635 | nvc0_graph_ctxctl_debug(dev); | ||
636 | nv_wr32(dev, 0x409c20, ustat); | ||
637 | } | ||
638 | |||
639 | static void | ||
640 | nvc0_graph_isr(struct drm_device *dev) | ||
641 | { | ||
642 | u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; | ||
643 | u32 chid = nvc0_graph_isr_chid(dev, inst); | ||
644 | u32 stat = nv_rd32(dev, 0x400100); | ||
645 | u32 addr = nv_rd32(dev, 0x400704); | ||
646 | u32 mthd = (addr & 0x00003ffc); | ||
647 | u32 subc = (addr & 0x00070000) >> 16; | ||
648 | u32 data = nv_rd32(dev, 0x400708); | ||
649 | u32 code = nv_rd32(dev, 0x400110); | ||
650 | u32 class = nv_rd32(dev, 0x404200 + (subc * 4)); | ||
651 | |||
652 | if (stat & 0x00000010) { | ||
653 | if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) { | ||
654 | NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] " | ||
655 | "subc %d class 0x%04x mthd 0x%04x " | ||
656 | "data 0x%08x\n", | ||
657 | chid, inst, subc, class, mthd, data); | ||
658 | } | ||
659 | nv_wr32(dev, 0x400100, 0x00000010); | ||
660 | stat &= ~0x00000010; | ||
661 | } | ||
662 | |||
663 | if (stat & 0x00000020) { | ||
664 | NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d " | ||
665 | "class 0x%04x mthd 0x%04x data 0x%08x\n", | ||
666 | chid, inst, subc, class, mthd, data); | ||
667 | nv_wr32(dev, 0x400100, 0x00000020); | ||
668 | stat &= ~0x00000020; | ||
669 | } | ||
670 | |||
671 | if (stat & 0x00100000) { | ||
672 | NV_INFO(dev, "PGRAPH: DATA_ERROR ["); | ||
673 | nouveau_enum_print(nv50_data_error_names, code); | ||
674 | printk("] ch %d [0x%010llx] subc %d class 0x%04x " | ||
675 | "mthd 0x%04x data 0x%08x\n", | ||
676 | chid, inst, subc, class, mthd, data); | ||
677 | nv_wr32(dev, 0x400100, 0x00100000); | ||
678 | stat &= ~0x00100000; | ||
679 | } | ||
680 | |||
681 | if (stat & 0x00200000) { | ||
682 | u32 trap = nv_rd32(dev, 0x400108); | ||
683 | NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap); | ||
684 | nv_wr32(dev, 0x400108, trap); | ||
685 | nv_wr32(dev, 0x400100, 0x00200000); | ||
686 | stat &= ~0x00200000; | ||
687 | } | ||
688 | |||
689 | if (stat & 0x00080000) { | ||
690 | nvc0_graph_ctxctl_isr(dev); | ||
691 | nv_wr32(dev, 0x400100, 0x00080000); | ||
692 | stat &= ~0x00080000; | ||
693 | } | ||
694 | |||
695 | if (stat) { | ||
696 | NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat); | ||
697 | nv_wr32(dev, 0x400100, stat); | ||
698 | } | ||
699 | |||
700 | nv_wr32(dev, 0x400500, 0x00010001); | ||
701 | } | ||
702 | |||
703 | static void | ||
704 | nvc0_runk140_isr(struct drm_device *dev) | ||
705 | { | ||
706 | u32 units = nv_rd32(dev, 0x00017c) & 0x1f; | ||
707 | |||
708 | while (units) { | ||
709 | u32 unit = ffs(units) - 1; | ||
710 | u32 reg = 0x140000 + unit * 0x2000; | ||
711 | u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); | ||
712 | u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); | ||
713 | |||
714 | NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); | ||
715 | units &= ~(1 << unit); | ||
716 | } | ||
717 | } | ||
718 | |||
719 | static int | ||
720 | nvc0_graph_create_fw(struct drm_device *dev, const char *fwname, | ||
721 | struct nvc0_graph_fuc *fuc) | ||
722 | { | ||
723 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
724 | const struct firmware *fw; | ||
725 | char f[32]; | ||
726 | int ret; | ||
727 | |||
728 | snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname); | ||
729 | ret = request_firmware(&fw, f, &dev->pdev->dev); | ||
730 | if (ret) { | ||
731 | snprintf(f, sizeof(f), "nouveau/%s", fwname); | ||
732 | ret = request_firmware(&fw, f, &dev->pdev->dev); | ||
733 | if (ret) { | ||
734 | NV_ERROR(dev, "failed to load %s\n", fwname); | ||
735 | return ret; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | fuc->size = fw->size; | ||
740 | fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); | ||
741 | release_firmware(fw); | ||
742 | return (fuc->data != NULL) ? 0 : -ENOMEM; | ||
743 | } | ||
744 | |||
745 | static void | ||
746 | nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc) | ||
747 | { | ||
748 | if (fuc->data) { | ||
749 | kfree(fuc->data); | ||
750 | fuc->data = NULL; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | static void | ||
755 | nvc0_graph_destroy(struct drm_device *dev, int engine) | ||
756 | { | ||
757 | struct nvc0_graph_priv *priv = nv_engine(dev, engine); | ||
758 | |||
759 | if (nouveau_ctxfw) { | ||
760 | nvc0_graph_destroy_fw(&priv->fuc409c); | ||
761 | nvc0_graph_destroy_fw(&priv->fuc409d); | ||
762 | nvc0_graph_destroy_fw(&priv->fuc41ac); | ||
763 | nvc0_graph_destroy_fw(&priv->fuc41ad); | ||
764 | } | ||
765 | |||
766 | nouveau_irq_unregister(dev, 12); | ||
767 | nouveau_irq_unregister(dev, 25); | ||
768 | |||
769 | nouveau_gpuobj_ref(NULL, &priv->unk4188b8); | ||
770 | nouveau_gpuobj_ref(NULL, &priv->unk4188b4); | ||
771 | |||
772 | if (priv->grctx_vals) | ||
773 | kfree(priv->grctx_vals); | ||
774 | |||
775 | NVOBJ_ENGINE_DEL(dev, GR); | ||
776 | kfree(priv); | ||
777 | } | ||
778 | |||
779 | int | ||
780 | nvc0_graph_create(struct drm_device *dev) | ||
781 | { | ||
782 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
783 | struct nvc0_graph_priv *priv; | ||
784 | int ret, gpc, i; | ||
785 | u32 fermi; | ||
786 | |||
787 | fermi = nvc0_graph_class(dev); | ||
788 | if (!fermi) { | ||
789 | NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); | ||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
794 | if (!priv) | ||
795 | return -ENOMEM; | ||
796 | |||
797 | priv->base.destroy = nvc0_graph_destroy; | ||
798 | priv->base.init = nvc0_graph_init; | ||
799 | priv->base.fini = nvc0_graph_fini; | ||
800 | priv->base.context_new = nvc0_graph_context_new; | ||
801 | priv->base.context_del = nvc0_graph_context_del; | ||
802 | priv->base.object_new = nvc0_graph_object_new; | ||
803 | |||
804 | NVOBJ_ENGINE_ADD(dev, GR, &priv->base); | ||
805 | nouveau_irq_register(dev, 12, nvc0_graph_isr); | ||
806 | nouveau_irq_register(dev, 25, nvc0_runk140_isr); | ||
807 | |||
808 | if (nouveau_ctxfw) { | ||
809 | NV_INFO(dev, "PGRAPH: using external firmware\n"); | ||
810 | if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || | ||
811 | nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || | ||
812 | nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || | ||
813 | nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { | ||
814 | ret = 0; | ||
815 | goto error; | ||
816 | } | ||
817 | } | ||
818 | |||
819 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); | ||
820 | if (ret) | ||
821 | goto error; | ||
822 | |||
823 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8); | ||
824 | if (ret) | ||
825 | goto error; | ||
826 | |||
827 | for (i = 0; i < 0x1000; i += 4) { | ||
828 | nv_wo32(priv->unk4188b4, i, 0x00000010); | ||
829 | nv_wo32(priv->unk4188b8, i, 0x00000010); | ||
830 | } | ||
831 | |||
832 | priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f; | ||
833 | priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16; | ||
834 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
835 | priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608)); | ||
836 | priv->tp_total += priv->tp_nr[gpc]; | ||
837 | } | ||
838 | |||
839 | /*XXX: these need figuring out... */ | ||
840 | switch (dev_priv->chipset) { | ||
841 | case 0xc0: | ||
842 | if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ | ||
843 | priv->magic_not_rop_nr = 0x07; | ||
844 | } else | ||
845 | if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ | ||
846 | priv->magic_not_rop_nr = 0x05; | ||
847 | } else | ||
848 | if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ | ||
849 | priv->magic_not_rop_nr = 0x06; | ||
850 | } | ||
851 | break; | ||
852 | case 0xc3: /* 450, 4/0/0/0, 2 */ | ||
853 | priv->magic_not_rop_nr = 0x03; | ||
854 | break; | ||
855 | case 0xc4: /* 460, 3/4/0/0, 4 */ | ||
856 | priv->magic_not_rop_nr = 0x01; | ||
857 | break; | ||
858 | case 0xc1: /* 2/0/0/0, 1 */ | ||
859 | priv->magic_not_rop_nr = 0x01; | ||
860 | break; | ||
861 | case 0xc8: /* 4/4/3/4, 5 */ | ||
862 | priv->magic_not_rop_nr = 0x06; | ||
863 | break; | ||
864 | case 0xce: /* 4/4/0/0, 4 */ | ||
865 | priv->magic_not_rop_nr = 0x03; | ||
866 | break; | ||
867 | } | ||
868 | |||
869 | if (!priv->magic_not_rop_nr) { | ||
870 | NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", | ||
871 | priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], | ||
872 | priv->tp_nr[3], priv->rop_nr); | ||
873 | /* use 0xc3's values... */ | ||
874 | priv->magic_not_rop_nr = 0x03; | ||
875 | } | ||
876 | |||
877 | NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ | ||
878 | NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ | ||
879 | NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); | ||
880 | NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ | ||
881 | if (fermi >= 0x9197) | ||
882 | NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */ | ||
883 | if (fermi >= 0x9297) | ||
884 | NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */ | ||
885 | NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ | ||
886 | return 0; | ||
887 | |||
888 | error: | ||
889 | nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR); | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | MODULE_FIRMWARE("nouveau/nvc0_fuc409c"); | ||
894 | MODULE_FIRMWARE("nouveau/nvc0_fuc409d"); | ||
895 | MODULE_FIRMWARE("nouveau/nvc0_fuc41ac"); | ||
896 | MODULE_FIRMWARE("nouveau/nvc0_fuc41ad"); | ||
897 | MODULE_FIRMWARE("nouveau/nvc3_fuc409c"); | ||
898 | MODULE_FIRMWARE("nouveau/nvc3_fuc409d"); | ||
899 | MODULE_FIRMWARE("nouveau/nvc3_fuc41ac"); | ||
900 | MODULE_FIRMWARE("nouveau/nvc3_fuc41ad"); | ||
901 | MODULE_FIRMWARE("nouveau/nvc4_fuc409c"); | ||
902 | MODULE_FIRMWARE("nouveau/nvc4_fuc409d"); | ||
903 | MODULE_FIRMWARE("nouveau/nvc4_fuc41ac"); | ||
904 | MODULE_FIRMWARE("nouveau/nvc4_fuc41ad"); | ||
905 | MODULE_FIRMWARE("nouveau/fuc409c"); | ||
906 | MODULE_FIRMWARE("nouveau/fuc409d"); | ||
907 | MODULE_FIRMWARE("nouveau/fuc41ac"); | ||
908 | MODULE_FIRMWARE("nouveau/fuc41ad"); | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc new file mode 100644 index 00000000000..2a4b6dc8f9d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc | |||
@@ -0,0 +1,400 @@ | |||
1 | /* fuc microcode util functions for nvc0 PGRAPH | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)') | ||
27 | define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))') | ||
28 | |||
29 | ifdef(`include_code', ` | ||
30 | // Error codes | ||
31 | define(`E_BAD_COMMAND', 0x01) | ||
32 | define(`E_CMD_OVERFLOW', 0x02) | ||
33 | |||
34 | // Util macros to help with debugging ucode hangs etc | ||
35 | define(`T_WAIT', 0) | ||
36 | define(`T_MMCTX', 1) | ||
37 | define(`T_STRWAIT', 2) | ||
38 | define(`T_STRINIT', 3) | ||
39 | define(`T_AUTO', 4) | ||
40 | define(`T_CHAN', 5) | ||
41 | define(`T_LOAD', 6) | ||
42 | define(`T_SAVE', 7) | ||
43 | define(`T_LCHAN', 8) | ||
44 | define(`T_LCTXH', 9) | ||
45 | |||
46 | define(`trace_set', ` | ||
47 | mov $r8 0x83c | ||
48 | shl b32 $r8 6 | ||
49 | clear b32 $r9 | ||
50 | bset $r9 $1 | ||
51 | iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] | ||
52 | ') | ||
53 | |||
54 | define(`trace_clr', ` | ||
55 | mov $r8 0x85c | ||
56 | shl b32 $r8 6 | ||
57 | clear b32 $r9 | ||
58 | bset $r9 $1 | ||
59 | iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] | ||
60 | ') | ||
61 | |||
62 | // queue_put - add request to queue | ||
63 | // | ||
64 | // In : $r13 queue pointer | ||
65 | // $r14 command | ||
66 | // $r15 data | ||
67 | // | ||
68 | queue_put: | ||
69 | // make sure we have space.. | ||
70 | ld b32 $r8 D[$r13 + 0x0] // GET | ||
71 | ld b32 $r9 D[$r13 + 0x4] // PUT | ||
72 | xor $r8 8 | ||
73 | cmpu b32 $r8 $r9 | ||
74 | bra ne queue_put_next | ||
75 | mov $r15 E_CMD_OVERFLOW | ||
76 | call error | ||
77 | ret | ||
78 | |||
79 | // store cmd/data on queue | ||
80 | queue_put_next: | ||
81 | and $r8 $r9 7 | ||
82 | shl b32 $r8 3 | ||
83 | add b32 $r8 $r13 | ||
84 | add b32 $r8 8 | ||
85 | st b32 D[$r8 + 0x0] $r14 | ||
86 | st b32 D[$r8 + 0x4] $r15 | ||
87 | |||
88 | // update PUT | ||
89 | add b32 $r9 1 | ||
90 | and $r9 0xf | ||
91 | st b32 D[$r13 + 0x4] $r9 | ||
92 | ret | ||
93 | |||
94 | // queue_get - fetch request from queue | ||
95 | // | ||
96 | // In : $r13 queue pointer | ||
97 | // | ||
98 | // Out: $p1 clear on success (data available) | ||
99 | // $r14 command | ||
100 | // $r15 data | ||
101 | // | ||
102 | queue_get: | ||
103 | bset $flags $p1 | ||
104 | ld b32 $r8 D[$r13 + 0x0] // GET | ||
105 | ld b32 $r9 D[$r13 + 0x4] // PUT | ||
106 | cmpu b32 $r8 $r9 | ||
107 | bra e queue_get_done | ||
108 | // fetch first cmd/data pair | ||
109 | and $r9 $r8 7 | ||
110 | shl b32 $r9 3 | ||
111 | add b32 $r9 $r13 | ||
112 | add b32 $r9 8 | ||
113 | ld b32 $r14 D[$r9 + 0x0] | ||
114 | ld b32 $r15 D[$r9 + 0x4] | ||
115 | |||
116 | // update GET | ||
117 | add b32 $r8 1 | ||
118 | and $r8 0xf | ||
119 | st b32 D[$r13 + 0x0] $r8 | ||
120 | bclr $flags $p1 | ||
121 | queue_get_done: | ||
122 | ret | ||
123 | |||
124 | // nv_rd32 - read 32-bit value from nv register | ||
125 | // | ||
126 | // In : $r14 register | ||
127 | // Out: $r15 value | ||
128 | // | ||
129 | nv_rd32: | ||
130 | mov $r11 0x728 | ||
131 | shl b32 $r11 6 | ||
132 | mov b32 $r12 $r14 | ||
133 | bset $r12 31 // MMIO_CTRL_PENDING | ||
134 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | ||
135 | nv_rd32_wait: | ||
136 | iord $r12 I[$r11 + 0x000] | ||
137 | xbit $r12 $r12 31 | ||
138 | bra ne nv_rd32_wait | ||
139 | mov $r10 6 // DONE_MMIO_RD | ||
140 | call wait_doneo | ||
141 | iord $r15 I[$r11 + 0x100] // MMIO_RDVAL | ||
142 | ret | ||
143 | |||
144 | // nv_wr32 - write 32-bit value to nv register | ||
145 | // | ||
146 | // In : $r14 register | ||
147 | // $r15 value | ||
148 | // | ||
149 | nv_wr32: | ||
150 | mov $r11 0x728 | ||
151 | shl b32 $r11 6 | ||
152 | iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL | ||
153 | mov b32 $r12 $r14 | ||
154 | bset $r12 31 // MMIO_CTRL_PENDING | ||
155 | bset $r12 30 // MMIO_CTRL_WRITE | ||
156 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | ||
157 | nv_wr32_wait: | ||
158 | iord $r12 I[$r11 + 0x000] | ||
159 | xbit $r12 $r12 31 | ||
160 | bra ne nv_wr32_wait | ||
161 | ret | ||
162 | |||
163 | // (re)set watchdog timer | ||
164 | // | ||
165 | // In : $r15 timeout | ||
166 | // | ||
167 | watchdog_reset: | ||
168 | mov $r8 0x430 | ||
169 | shl b32 $r8 6 | ||
170 | bset $r15 31 | ||
171 | iowr I[$r8 + 0x000] $r15 | ||
172 | ret | ||
173 | |||
174 | // clear watchdog timer | ||
175 | watchdog_clear: | ||
176 | mov $r8 0x430 | ||
177 | shl b32 $r8 6 | ||
178 | iowr I[$r8 + 0x000] $r0 | ||
179 | ret | ||
180 | |||
181 | // wait_done{z,o} - wait on FUC_DONE bit to become clear/set | ||
182 | // | ||
183 | // In : $r10 bit to wait on | ||
184 | // | ||
185 | define(`wait_done', ` | ||
186 | $1: | ||
187 | trace_set(T_WAIT); | ||
188 | mov $r8 0x818 | ||
189 | shl b32 $r8 6 | ||
190 | iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit | ||
191 | wait_done_$1: | ||
192 | mov $r8 0x400 | ||
193 | shl b32 $r8 6 | ||
194 | iord $r8 I[$r8 + 0x000] // DONE | ||
195 | xbit $r8 $r8 $r10 | ||
196 | bra $2 wait_done_$1 | ||
197 | trace_clr(T_WAIT) | ||
198 | ret | ||
199 | ') | ||
200 | wait_done(wait_donez, ne) | ||
201 | wait_done(wait_doneo, e) | ||
202 | |||
203 | // mmctx_size - determine size of a mmio list transfer | ||
204 | // | ||
205 | // In : $r14 mmio list head | ||
206 | // $r15 mmio list tail | ||
207 | // Out: $r15 transfer size (in bytes) | ||
208 | // | ||
209 | mmctx_size: | ||
210 | clear b32 $r9 | ||
211 | nv_mmctx_size_loop: | ||
212 | ld b32 $r8 D[$r14] | ||
213 | shr b32 $r8 26 | ||
214 | add b32 $r8 1 | ||
215 | shl b32 $r8 2 | ||
216 | add b32 $r9 $r8 | ||
217 | add b32 $r14 4 | ||
218 | cmpu b32 $r14 $r15 | ||
219 | bra ne nv_mmctx_size_loop | ||
220 | mov b32 $r15 $r9 | ||
221 | ret | ||
222 | |||
223 | // mmctx_xfer - execute a list of mmio transfers | ||
224 | // | ||
225 | // In : $r10 flags | ||
226 | // bit 0: direction (0 = save, 1 = load) | ||
227 | // bit 1: set if first transfer | ||
228 | // bit 2: set if last transfer | ||
229 | // $r11 base | ||
230 | // $r12 mmio list head | ||
231 | // $r13 mmio list tail | ||
232 | // $r14 multi_stride | ||
233 | // $r15 multi_mask | ||
234 | // | ||
235 | mmctx_xfer: | ||
236 | trace_set(T_MMCTX) | ||
237 | mov $r8 0x710 | ||
238 | shl b32 $r8 6 | ||
239 | clear b32 $r9 | ||
240 | or $r11 $r11 | ||
241 | bra e mmctx_base_disabled | ||
242 | iowr I[$r8 + 0x000] $r11 // MMCTX_BASE | ||
243 | bset $r9 0 // BASE_EN | ||
244 | mmctx_base_disabled: | ||
245 | or $r14 $r14 | ||
246 | bra e mmctx_multi_disabled | ||
247 | iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE | ||
248 | iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK | ||
249 | bset $r9 1 // MULTI_EN | ||
250 | mmctx_multi_disabled: | ||
251 | add b32 $r8 0x100 | ||
252 | |||
253 | xbit $r11 $r10 0 | ||
254 | shl b32 $r11 16 // DIR | ||
255 | bset $r11 12 // QLIMIT = 0x10 | ||
256 | xbit $r14 $r10 1 | ||
257 | shl b32 $r14 17 | ||
258 | or $r11 $r14 // START_TRIGGER | ||
259 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | ||
260 | |||
261 | // loop over the mmio list, and send requests to the hw | ||
262 | mmctx_exec_loop: | ||
263 | // wait for space in mmctx queue | ||
264 | mmctx_wait_free: | ||
265 | iord $r14 I[$r8 + 0x000] // MMCTX_CTRL | ||
266 | and $r14 0x1f | ||
267 | bra e mmctx_wait_free | ||
268 | |||
269 | // queue up an entry | ||
270 | ld b32 $r14 D[$r12] | ||
271 | or $r14 $r9 | ||
272 | iowr I[$r8 + 0x300] $r14 | ||
273 | add b32 $r12 4 | ||
274 | cmpu b32 $r12 $r13 | ||
275 | bra ne mmctx_exec_loop | ||
276 | |||
277 | xbit $r11 $r10 2 | ||
278 | bra ne mmctx_stop | ||
279 | // wait for queue to empty | ||
280 | mmctx_fini_wait: | ||
281 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | ||
282 | and $r11 0x1f | ||
283 | cmpu b32 $r11 0x10 | ||
284 | bra ne mmctx_fini_wait | ||
285 | mov $r10 2 // DONE_MMCTX | ||
286 | call wait_donez | ||
287 | bra mmctx_done | ||
288 | mmctx_stop: | ||
289 | xbit $r11 $r10 0 | ||
290 | shl b32 $r11 16 // DIR | ||
291 | bset $r11 12 // QLIMIT = 0x10 | ||
292 | bset $r11 18 // STOP_TRIGGER | ||
293 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | ||
294 | mmctx_stop_wait: | ||
295 | // wait for STOP_TRIGGER to clear | ||
296 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | ||
297 | xbit $r11 $r11 18 | ||
298 | bra ne mmctx_stop_wait | ||
299 | mmctx_done: | ||
300 | trace_clr(T_MMCTX) | ||
301 | ret | ||
302 | |||
303 | // Wait for DONE_STRAND | ||
304 | // | ||
305 | strand_wait: | ||
306 | push $r10 | ||
307 | mov $r10 2 | ||
308 | call wait_donez | ||
309 | pop $r10 | ||
310 | ret | ||
311 | |||
312 | // unknown - call before issuing strand commands | ||
313 | // | ||
314 | strand_pre: | ||
315 | mov $r8 0x4afc | ||
316 | sethi $r8 0x20000 | ||
317 | mov $r9 0xc | ||
318 | iowr I[$r8] $r9 | ||
319 | call strand_wait | ||
320 | ret | ||
321 | |||
322 | // unknown - call after issuing strand commands | ||
323 | // | ||
324 | strand_post: | ||
325 | mov $r8 0x4afc | ||
326 | sethi $r8 0x20000 | ||
327 | mov $r9 0xd | ||
328 | iowr I[$r8] $r9 | ||
329 | call strand_wait | ||
330 | ret | ||
331 | |||
332 | // Selects strand set?! | ||
333 | // | ||
334 | // In: $r14 id | ||
335 | // | ||
336 | strand_set: | ||
337 | mov $r10 0x4ffc | ||
338 | sethi $r10 0x20000 | ||
339 | sub b32 $r11 $r10 0x500 | ||
340 | mov $r12 0xf | ||
341 | iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf | ||
342 | mov $r12 0xb | ||
343 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb | ||
344 | call strand_wait | ||
345 | iowr I[$r10 + 0x000] $r14 // 0x93c = <id> | ||
346 | mov $r12 0xa | ||
347 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa | ||
348 | call strand_wait | ||
349 | ret | ||
350 | |||
351 | // Initialise strand context data | ||
352 | // | ||
353 | // In : $r15 context base | ||
354 | // Out: $r15 context size (in bytes) | ||
355 | // | ||
356 | // Strandset(?) 3 hardcoded currently | ||
357 | // | ||
358 | strand_ctx_init: | ||
359 | trace_set(T_STRINIT) | ||
360 | call strand_pre | ||
361 | mov $r14 3 | ||
362 | call strand_set | ||
363 | mov $r10 0x46fc | ||
364 | sethi $r10 0x20000 | ||
365 | add b32 $r11 $r10 0x400 | ||
366 | iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 | ||
367 | mov $r12 1 | ||
368 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE | ||
369 | call strand_wait | ||
370 | sub b32 $r12 $r0 1 | ||
371 | iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff | ||
372 | mov $r12 2 | ||
373 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT | ||
374 | call strand_wait | ||
375 | call strand_post | ||
376 | |||
377 | // read the size of each strand, poke the context offset of | ||
378 | // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry | ||
379 | // about it later then. | ||
380 | mov $r8 0x880 | ||
381 | shl b32 $r8 6 | ||
382 | iord $r9 I[$r8 + 0x000] // STRANDS | ||
383 | add b32 $r8 0x2200 | ||
384 | shr b32 $r14 $r15 8 | ||
385 | ctx_init_strand_loop: | ||
386 | iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE | ||
387 | iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE | ||
388 | iord $r10 I[$r8 + 0x200] // STRAND_SIZE | ||
389 | shr b32 $r10 6 | ||
390 | add b32 $r10 1 | ||
391 | add b32 $r14 $r10 | ||
392 | add b32 $r8 4 | ||
393 | sub b32 $r9 1 | ||
394 | bra ne ctx_init_strand_loop | ||
395 | |||
396 | shl b32 $r14 8 | ||
397 | sub b32 $r15 $r14 $r15 | ||
398 | trace_clr(T_STRINIT) | ||
399 | ret | ||
400 | ') | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h new file mode 100644 index 00000000000..55689e99728 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #ifndef __NVC0_GRAPH_H__ | ||
26 | #define __NVC0_GRAPH_H__ | ||
27 | |||
28 | #define GPC_MAX 4 | ||
29 | #define TP_MAX 32 | ||
30 | |||
31 | #define ROP_BCAST(r) (0x408800 + (r)) | ||
32 | #define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r)) | ||
33 | #define GPC_BCAST(r) (0x418000 + (r)) | ||
34 | #define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r)) | ||
35 | #define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) | ||
36 | |||
37 | struct nvc0_graph_fuc { | ||
38 | u32 *data; | ||
39 | u32 size; | ||
40 | }; | ||
41 | |||
42 | struct nvc0_graph_priv { | ||
43 | struct nouveau_exec_engine base; | ||
44 | |||
45 | struct nvc0_graph_fuc fuc409c; | ||
46 | struct nvc0_graph_fuc fuc409d; | ||
47 | struct nvc0_graph_fuc fuc41ac; | ||
48 | struct nvc0_graph_fuc fuc41ad; | ||
49 | |||
50 | u8 gpc_nr; | ||
51 | u8 rop_nr; | ||
52 | u8 tp_nr[GPC_MAX]; | ||
53 | u8 tp_total; | ||
54 | |||
55 | u32 grctx_size; | ||
56 | u32 *grctx_vals; | ||
57 | struct nouveau_gpuobj *unk4188b4; | ||
58 | struct nouveau_gpuobj *unk4188b8; | ||
59 | |||
60 | u8 magic_not_rop_nr; | ||
61 | }; | ||
62 | |||
63 | struct nvc0_graph_chan { | ||
64 | struct nouveau_gpuobj *grctx; | ||
65 | struct nouveau_gpuobj *unk408004; /* 0x418810 too */ | ||
66 | struct nouveau_gpuobj *unk40800c; /* 0x419004 too */ | ||
67 | struct nouveau_gpuobj *unk418810; /* 0x419848 too */ | ||
68 | struct nouveau_gpuobj *mmio; | ||
69 | int mmio_nr; | ||
70 | }; | ||
71 | |||
72 | int nvc0_grctx_generate(struct nouveau_channel *); | ||
73 | |||
74 | /* nvc0_graph.c uses this also to determine supported chipsets */ | ||
75 | static inline u32 | ||
76 | nvc0_graph_class(struct drm_device *dev) | ||
77 | { | ||
78 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
79 | |||
80 | switch (dev_priv->chipset) { | ||
81 | case 0xc0: | ||
82 | case 0xc3: | ||
83 | case 0xc4: | ||
84 | case 0xce: /* guess, mmio trace shows only 0x9097 state */ | ||
85 | return 0x9097; | ||
86 | case 0xc1: | ||
87 | return 0x9197; | ||
88 | case 0xc8: | ||
89 | return 0x9297; | ||
90 | default: | ||
91 | return 0; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c new file mode 100644 index 00000000000..31018eaf527 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -0,0 +1,2807 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | #include "nvc0_graph.h" | ||
29 | |||
30 | static void | ||
31 | nv_icmd(struct drm_device *dev, u32 icmd, u32 data) | ||
32 | { | ||
33 | nv_wr32(dev, 0x400204, data); | ||
34 | nv_wr32(dev, 0x400200, icmd); | ||
35 | while (nv_rd32(dev, 0x400700) & 2) {} | ||
36 | } | ||
37 | |||
38 | static void | ||
39 | nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) | ||
40 | { | ||
41 | nv_wr32(dev, 0x40448c, data); | ||
42 | nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class); | ||
43 | } | ||
44 | |||
45 | static void | ||
46 | nvc0_grctx_generate_9097(struct drm_device *dev) | ||
47 | { | ||
48 | u32 fermi = nvc0_graph_class(dev); | ||
49 | u32 mthd; | ||
50 | |||
51 | nv_mthd(dev, 0x9097, 0x0800, 0x00000000); | ||
52 | nv_mthd(dev, 0x9097, 0x0840, 0x00000000); | ||
53 | nv_mthd(dev, 0x9097, 0x0880, 0x00000000); | ||
54 | nv_mthd(dev, 0x9097, 0x08c0, 0x00000000); | ||
55 | nv_mthd(dev, 0x9097, 0x0900, 0x00000000); | ||
56 | nv_mthd(dev, 0x9097, 0x0940, 0x00000000); | ||
57 | nv_mthd(dev, 0x9097, 0x0980, 0x00000000); | ||
58 | nv_mthd(dev, 0x9097, 0x09c0, 0x00000000); | ||
59 | nv_mthd(dev, 0x9097, 0x0804, 0x00000000); | ||
60 | nv_mthd(dev, 0x9097, 0x0844, 0x00000000); | ||
61 | nv_mthd(dev, 0x9097, 0x0884, 0x00000000); | ||
62 | nv_mthd(dev, 0x9097, 0x08c4, 0x00000000); | ||
63 | nv_mthd(dev, 0x9097, 0x0904, 0x00000000); | ||
64 | nv_mthd(dev, 0x9097, 0x0944, 0x00000000); | ||
65 | nv_mthd(dev, 0x9097, 0x0984, 0x00000000); | ||
66 | nv_mthd(dev, 0x9097, 0x09c4, 0x00000000); | ||
67 | nv_mthd(dev, 0x9097, 0x0808, 0x00000400); | ||
68 | nv_mthd(dev, 0x9097, 0x0848, 0x00000400); | ||
69 | nv_mthd(dev, 0x9097, 0x0888, 0x00000400); | ||
70 | nv_mthd(dev, 0x9097, 0x08c8, 0x00000400); | ||
71 | nv_mthd(dev, 0x9097, 0x0908, 0x00000400); | ||
72 | nv_mthd(dev, 0x9097, 0x0948, 0x00000400); | ||
73 | nv_mthd(dev, 0x9097, 0x0988, 0x00000400); | ||
74 | nv_mthd(dev, 0x9097, 0x09c8, 0x00000400); | ||
75 | nv_mthd(dev, 0x9097, 0x080c, 0x00000300); | ||
76 | nv_mthd(dev, 0x9097, 0x084c, 0x00000300); | ||
77 | nv_mthd(dev, 0x9097, 0x088c, 0x00000300); | ||
78 | nv_mthd(dev, 0x9097, 0x08cc, 0x00000300); | ||
79 | nv_mthd(dev, 0x9097, 0x090c, 0x00000300); | ||
80 | nv_mthd(dev, 0x9097, 0x094c, 0x00000300); | ||
81 | nv_mthd(dev, 0x9097, 0x098c, 0x00000300); | ||
82 | nv_mthd(dev, 0x9097, 0x09cc, 0x00000300); | ||
83 | nv_mthd(dev, 0x9097, 0x0810, 0x000000cf); | ||
84 | nv_mthd(dev, 0x9097, 0x0850, 0x00000000); | ||
85 | nv_mthd(dev, 0x9097, 0x0890, 0x00000000); | ||
86 | nv_mthd(dev, 0x9097, 0x08d0, 0x00000000); | ||
87 | nv_mthd(dev, 0x9097, 0x0910, 0x00000000); | ||
88 | nv_mthd(dev, 0x9097, 0x0950, 0x00000000); | ||
89 | nv_mthd(dev, 0x9097, 0x0990, 0x00000000); | ||
90 | nv_mthd(dev, 0x9097, 0x09d0, 0x00000000); | ||
91 | nv_mthd(dev, 0x9097, 0x0814, 0x00000040); | ||
92 | nv_mthd(dev, 0x9097, 0x0854, 0x00000040); | ||
93 | nv_mthd(dev, 0x9097, 0x0894, 0x00000040); | ||
94 | nv_mthd(dev, 0x9097, 0x08d4, 0x00000040); | ||
95 | nv_mthd(dev, 0x9097, 0x0914, 0x00000040); | ||
96 | nv_mthd(dev, 0x9097, 0x0954, 0x00000040); | ||
97 | nv_mthd(dev, 0x9097, 0x0994, 0x00000040); | ||
98 | nv_mthd(dev, 0x9097, 0x09d4, 0x00000040); | ||
99 | nv_mthd(dev, 0x9097, 0x0818, 0x00000001); | ||
100 | nv_mthd(dev, 0x9097, 0x0858, 0x00000001); | ||
101 | nv_mthd(dev, 0x9097, 0x0898, 0x00000001); | ||
102 | nv_mthd(dev, 0x9097, 0x08d8, 0x00000001); | ||
103 | nv_mthd(dev, 0x9097, 0x0918, 0x00000001); | ||
104 | nv_mthd(dev, 0x9097, 0x0958, 0x00000001); | ||
105 | nv_mthd(dev, 0x9097, 0x0998, 0x00000001); | ||
106 | nv_mthd(dev, 0x9097, 0x09d8, 0x00000001); | ||
107 | nv_mthd(dev, 0x9097, 0x081c, 0x00000000); | ||
108 | nv_mthd(dev, 0x9097, 0x085c, 0x00000000); | ||
109 | nv_mthd(dev, 0x9097, 0x089c, 0x00000000); | ||
110 | nv_mthd(dev, 0x9097, 0x08dc, 0x00000000); | ||
111 | nv_mthd(dev, 0x9097, 0x091c, 0x00000000); | ||
112 | nv_mthd(dev, 0x9097, 0x095c, 0x00000000); | ||
113 | nv_mthd(dev, 0x9097, 0x099c, 0x00000000); | ||
114 | nv_mthd(dev, 0x9097, 0x09dc, 0x00000000); | ||
115 | nv_mthd(dev, 0x9097, 0x0820, 0x00000000); | ||
116 | nv_mthd(dev, 0x9097, 0x0860, 0x00000000); | ||
117 | nv_mthd(dev, 0x9097, 0x08a0, 0x00000000); | ||
118 | nv_mthd(dev, 0x9097, 0x08e0, 0x00000000); | ||
119 | nv_mthd(dev, 0x9097, 0x0920, 0x00000000); | ||
120 | nv_mthd(dev, 0x9097, 0x0960, 0x00000000); | ||
121 | nv_mthd(dev, 0x9097, 0x09a0, 0x00000000); | ||
122 | nv_mthd(dev, 0x9097, 0x09e0, 0x00000000); | ||
123 | nv_mthd(dev, 0x9097, 0x2700, 0x00000000); | ||
124 | nv_mthd(dev, 0x9097, 0x2720, 0x00000000); | ||
125 | nv_mthd(dev, 0x9097, 0x2740, 0x00000000); | ||
126 | nv_mthd(dev, 0x9097, 0x2760, 0x00000000); | ||
127 | nv_mthd(dev, 0x9097, 0x2780, 0x00000000); | ||
128 | nv_mthd(dev, 0x9097, 0x27a0, 0x00000000); | ||
129 | nv_mthd(dev, 0x9097, 0x27c0, 0x00000000); | ||
130 | nv_mthd(dev, 0x9097, 0x27e0, 0x00000000); | ||
131 | nv_mthd(dev, 0x9097, 0x2704, 0x00000000); | ||
132 | nv_mthd(dev, 0x9097, 0x2724, 0x00000000); | ||
133 | nv_mthd(dev, 0x9097, 0x2744, 0x00000000); | ||
134 | nv_mthd(dev, 0x9097, 0x2764, 0x00000000); | ||
135 | nv_mthd(dev, 0x9097, 0x2784, 0x00000000); | ||
136 | nv_mthd(dev, 0x9097, 0x27a4, 0x00000000); | ||
137 | nv_mthd(dev, 0x9097, 0x27c4, 0x00000000); | ||
138 | nv_mthd(dev, 0x9097, 0x27e4, 0x00000000); | ||
139 | nv_mthd(dev, 0x9097, 0x2708, 0x00000000); | ||
140 | nv_mthd(dev, 0x9097, 0x2728, 0x00000000); | ||
141 | nv_mthd(dev, 0x9097, 0x2748, 0x00000000); | ||
142 | nv_mthd(dev, 0x9097, 0x2768, 0x00000000); | ||
143 | nv_mthd(dev, 0x9097, 0x2788, 0x00000000); | ||
144 | nv_mthd(dev, 0x9097, 0x27a8, 0x00000000); | ||
145 | nv_mthd(dev, 0x9097, 0x27c8, 0x00000000); | ||
146 | nv_mthd(dev, 0x9097, 0x27e8, 0x00000000); | ||
147 | nv_mthd(dev, 0x9097, 0x270c, 0x00000000); | ||
148 | nv_mthd(dev, 0x9097, 0x272c, 0x00000000); | ||
149 | nv_mthd(dev, 0x9097, 0x274c, 0x00000000); | ||
150 | nv_mthd(dev, 0x9097, 0x276c, 0x00000000); | ||
151 | nv_mthd(dev, 0x9097, 0x278c, 0x00000000); | ||
152 | nv_mthd(dev, 0x9097, 0x27ac, 0x00000000); | ||
153 | nv_mthd(dev, 0x9097, 0x27cc, 0x00000000); | ||
154 | nv_mthd(dev, 0x9097, 0x27ec, 0x00000000); | ||
155 | nv_mthd(dev, 0x9097, 0x2710, 0x00014000); | ||
156 | nv_mthd(dev, 0x9097, 0x2730, 0x00014000); | ||
157 | nv_mthd(dev, 0x9097, 0x2750, 0x00014000); | ||
158 | nv_mthd(dev, 0x9097, 0x2770, 0x00014000); | ||
159 | nv_mthd(dev, 0x9097, 0x2790, 0x00014000); | ||
160 | nv_mthd(dev, 0x9097, 0x27b0, 0x00014000); | ||
161 | nv_mthd(dev, 0x9097, 0x27d0, 0x00014000); | ||
162 | nv_mthd(dev, 0x9097, 0x27f0, 0x00014000); | ||
163 | nv_mthd(dev, 0x9097, 0x2714, 0x00000040); | ||
164 | nv_mthd(dev, 0x9097, 0x2734, 0x00000040); | ||
165 | nv_mthd(dev, 0x9097, 0x2754, 0x00000040); | ||
166 | nv_mthd(dev, 0x9097, 0x2774, 0x00000040); | ||
167 | nv_mthd(dev, 0x9097, 0x2794, 0x00000040); | ||
168 | nv_mthd(dev, 0x9097, 0x27b4, 0x00000040); | ||
169 | nv_mthd(dev, 0x9097, 0x27d4, 0x00000040); | ||
170 | nv_mthd(dev, 0x9097, 0x27f4, 0x00000040); | ||
171 | nv_mthd(dev, 0x9097, 0x1c00, 0x00000000); | ||
172 | nv_mthd(dev, 0x9097, 0x1c10, 0x00000000); | ||
173 | nv_mthd(dev, 0x9097, 0x1c20, 0x00000000); | ||
174 | nv_mthd(dev, 0x9097, 0x1c30, 0x00000000); | ||
175 | nv_mthd(dev, 0x9097, 0x1c40, 0x00000000); | ||
176 | nv_mthd(dev, 0x9097, 0x1c50, 0x00000000); | ||
177 | nv_mthd(dev, 0x9097, 0x1c60, 0x00000000); | ||
178 | nv_mthd(dev, 0x9097, 0x1c70, 0x00000000); | ||
179 | nv_mthd(dev, 0x9097, 0x1c80, 0x00000000); | ||
180 | nv_mthd(dev, 0x9097, 0x1c90, 0x00000000); | ||
181 | nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000); | ||
182 | nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000); | ||
183 | nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000); | ||
184 | nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000); | ||
185 | nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000); | ||
186 | nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000); | ||
187 | nv_mthd(dev, 0x9097, 0x1c04, 0x00000000); | ||
188 | nv_mthd(dev, 0x9097, 0x1c14, 0x00000000); | ||
189 | nv_mthd(dev, 0x9097, 0x1c24, 0x00000000); | ||
190 | nv_mthd(dev, 0x9097, 0x1c34, 0x00000000); | ||
191 | nv_mthd(dev, 0x9097, 0x1c44, 0x00000000); | ||
192 | nv_mthd(dev, 0x9097, 0x1c54, 0x00000000); | ||
193 | nv_mthd(dev, 0x9097, 0x1c64, 0x00000000); | ||
194 | nv_mthd(dev, 0x9097, 0x1c74, 0x00000000); | ||
195 | nv_mthd(dev, 0x9097, 0x1c84, 0x00000000); | ||
196 | nv_mthd(dev, 0x9097, 0x1c94, 0x00000000); | ||
197 | nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000); | ||
198 | nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000); | ||
199 | nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000); | ||
200 | nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000); | ||
201 | nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000); | ||
202 | nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000); | ||
203 | nv_mthd(dev, 0x9097, 0x1c08, 0x00000000); | ||
204 | nv_mthd(dev, 0x9097, 0x1c18, 0x00000000); | ||
205 | nv_mthd(dev, 0x9097, 0x1c28, 0x00000000); | ||
206 | nv_mthd(dev, 0x9097, 0x1c38, 0x00000000); | ||
207 | nv_mthd(dev, 0x9097, 0x1c48, 0x00000000); | ||
208 | nv_mthd(dev, 0x9097, 0x1c58, 0x00000000); | ||
209 | nv_mthd(dev, 0x9097, 0x1c68, 0x00000000); | ||
210 | nv_mthd(dev, 0x9097, 0x1c78, 0x00000000); | ||
211 | nv_mthd(dev, 0x9097, 0x1c88, 0x00000000); | ||
212 | nv_mthd(dev, 0x9097, 0x1c98, 0x00000000); | ||
213 | nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000); | ||
214 | nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000); | ||
215 | nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000); | ||
216 | nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000); | ||
217 | nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000); | ||
218 | nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000); | ||
219 | nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000); | ||
220 | nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000); | ||
221 | nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000); | ||
222 | nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000); | ||
223 | nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000); | ||
224 | nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000); | ||
225 | nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000); | ||
226 | nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000); | ||
227 | nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000); | ||
228 | nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000); | ||
229 | nv_mthd(dev, 0x9097, 0x1cac, 0x00000000); | ||
230 | nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000); | ||
231 | nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000); | ||
232 | nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000); | ||
233 | nv_mthd(dev, 0x9097, 0x1cec, 0x00000000); | ||
234 | nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000); | ||
235 | nv_mthd(dev, 0x9097, 0x1d00, 0x00000000); | ||
236 | nv_mthd(dev, 0x9097, 0x1d10, 0x00000000); | ||
237 | nv_mthd(dev, 0x9097, 0x1d20, 0x00000000); | ||
238 | nv_mthd(dev, 0x9097, 0x1d30, 0x00000000); | ||
239 | nv_mthd(dev, 0x9097, 0x1d40, 0x00000000); | ||
240 | nv_mthd(dev, 0x9097, 0x1d50, 0x00000000); | ||
241 | nv_mthd(dev, 0x9097, 0x1d60, 0x00000000); | ||
242 | nv_mthd(dev, 0x9097, 0x1d70, 0x00000000); | ||
243 | nv_mthd(dev, 0x9097, 0x1d80, 0x00000000); | ||
244 | nv_mthd(dev, 0x9097, 0x1d90, 0x00000000); | ||
245 | nv_mthd(dev, 0x9097, 0x1da0, 0x00000000); | ||
246 | nv_mthd(dev, 0x9097, 0x1db0, 0x00000000); | ||
247 | nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000); | ||
248 | nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000); | ||
249 | nv_mthd(dev, 0x9097, 0x1de0, 0x00000000); | ||
250 | nv_mthd(dev, 0x9097, 0x1df0, 0x00000000); | ||
251 | nv_mthd(dev, 0x9097, 0x1d04, 0x00000000); | ||
252 | nv_mthd(dev, 0x9097, 0x1d14, 0x00000000); | ||
253 | nv_mthd(dev, 0x9097, 0x1d24, 0x00000000); | ||
254 | nv_mthd(dev, 0x9097, 0x1d34, 0x00000000); | ||
255 | nv_mthd(dev, 0x9097, 0x1d44, 0x00000000); | ||
256 | nv_mthd(dev, 0x9097, 0x1d54, 0x00000000); | ||
257 | nv_mthd(dev, 0x9097, 0x1d64, 0x00000000); | ||
258 | nv_mthd(dev, 0x9097, 0x1d74, 0x00000000); | ||
259 | nv_mthd(dev, 0x9097, 0x1d84, 0x00000000); | ||
260 | nv_mthd(dev, 0x9097, 0x1d94, 0x00000000); | ||
261 | nv_mthd(dev, 0x9097, 0x1da4, 0x00000000); | ||
262 | nv_mthd(dev, 0x9097, 0x1db4, 0x00000000); | ||
263 | nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000); | ||
264 | nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000); | ||
265 | nv_mthd(dev, 0x9097, 0x1de4, 0x00000000); | ||
266 | nv_mthd(dev, 0x9097, 0x1df4, 0x00000000); | ||
267 | nv_mthd(dev, 0x9097, 0x1d08, 0x00000000); | ||
268 | nv_mthd(dev, 0x9097, 0x1d18, 0x00000000); | ||
269 | nv_mthd(dev, 0x9097, 0x1d28, 0x00000000); | ||
270 | nv_mthd(dev, 0x9097, 0x1d38, 0x00000000); | ||
271 | nv_mthd(dev, 0x9097, 0x1d48, 0x00000000); | ||
272 | nv_mthd(dev, 0x9097, 0x1d58, 0x00000000); | ||
273 | nv_mthd(dev, 0x9097, 0x1d68, 0x00000000); | ||
274 | nv_mthd(dev, 0x9097, 0x1d78, 0x00000000); | ||
275 | nv_mthd(dev, 0x9097, 0x1d88, 0x00000000); | ||
276 | nv_mthd(dev, 0x9097, 0x1d98, 0x00000000); | ||
277 | nv_mthd(dev, 0x9097, 0x1da8, 0x00000000); | ||
278 | nv_mthd(dev, 0x9097, 0x1db8, 0x00000000); | ||
279 | nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000); | ||
280 | nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000); | ||
281 | nv_mthd(dev, 0x9097, 0x1de8, 0x00000000); | ||
282 | nv_mthd(dev, 0x9097, 0x1df8, 0x00000000); | ||
283 | nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000); | ||
284 | nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000); | ||
285 | nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000); | ||
286 | nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000); | ||
287 | nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000); | ||
288 | nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000); | ||
289 | nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000); | ||
290 | nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000); | ||
291 | nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000); | ||
292 | nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000); | ||
293 | nv_mthd(dev, 0x9097, 0x1dac, 0x00000000); | ||
294 | nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000); | ||
295 | nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000); | ||
296 | nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000); | ||
297 | nv_mthd(dev, 0x9097, 0x1dec, 0x00000000); | ||
298 | nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000); | ||
299 | nv_mthd(dev, 0x9097, 0x1f00, 0x00000000); | ||
300 | nv_mthd(dev, 0x9097, 0x1f08, 0x00000000); | ||
301 | nv_mthd(dev, 0x9097, 0x1f10, 0x00000000); | ||
302 | nv_mthd(dev, 0x9097, 0x1f18, 0x00000000); | ||
303 | nv_mthd(dev, 0x9097, 0x1f20, 0x00000000); | ||
304 | nv_mthd(dev, 0x9097, 0x1f28, 0x00000000); | ||
305 | nv_mthd(dev, 0x9097, 0x1f30, 0x00000000); | ||
306 | nv_mthd(dev, 0x9097, 0x1f38, 0x00000000); | ||
307 | nv_mthd(dev, 0x9097, 0x1f40, 0x00000000); | ||
308 | nv_mthd(dev, 0x9097, 0x1f48, 0x00000000); | ||
309 | nv_mthd(dev, 0x9097, 0x1f50, 0x00000000); | ||
310 | nv_mthd(dev, 0x9097, 0x1f58, 0x00000000); | ||
311 | nv_mthd(dev, 0x9097, 0x1f60, 0x00000000); | ||
312 | nv_mthd(dev, 0x9097, 0x1f68, 0x00000000); | ||
313 | nv_mthd(dev, 0x9097, 0x1f70, 0x00000000); | ||
314 | nv_mthd(dev, 0x9097, 0x1f78, 0x00000000); | ||
315 | nv_mthd(dev, 0x9097, 0x1f04, 0x00000000); | ||
316 | nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000); | ||
317 | nv_mthd(dev, 0x9097, 0x1f14, 0x00000000); | ||
318 | nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000); | ||
319 | nv_mthd(dev, 0x9097, 0x1f24, 0x00000000); | ||
320 | nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000); | ||
321 | nv_mthd(dev, 0x9097, 0x1f34, 0x00000000); | ||
322 | nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000); | ||
323 | nv_mthd(dev, 0x9097, 0x1f44, 0x00000000); | ||
324 | nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000); | ||
325 | nv_mthd(dev, 0x9097, 0x1f54, 0x00000000); | ||
326 | nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000); | ||
327 | nv_mthd(dev, 0x9097, 0x1f64, 0x00000000); | ||
328 | nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000); | ||
329 | nv_mthd(dev, 0x9097, 0x1f74, 0x00000000); | ||
330 | nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000); | ||
331 | nv_mthd(dev, 0x9097, 0x1f80, 0x00000000); | ||
332 | nv_mthd(dev, 0x9097, 0x1f88, 0x00000000); | ||
333 | nv_mthd(dev, 0x9097, 0x1f90, 0x00000000); | ||
334 | nv_mthd(dev, 0x9097, 0x1f98, 0x00000000); | ||
335 | nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000); | ||
336 | nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000); | ||
337 | nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000); | ||
338 | nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000); | ||
339 | nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000); | ||
340 | nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000); | ||
341 | nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000); | ||
342 | nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000); | ||
343 | nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000); | ||
344 | nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000); | ||
345 | nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000); | ||
346 | nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000); | ||
347 | nv_mthd(dev, 0x9097, 0x1f84, 0x00000000); | ||
348 | nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000); | ||
349 | nv_mthd(dev, 0x9097, 0x1f94, 0x00000000); | ||
350 | nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000); | ||
351 | nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000); | ||
352 | nv_mthd(dev, 0x9097, 0x1fac, 0x00000000); | ||
353 | nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000); | ||
354 | nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000); | ||
355 | nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000); | ||
356 | nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000); | ||
357 | nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000); | ||
358 | nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000); | ||
359 | nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000); | ||
360 | nv_mthd(dev, 0x9097, 0x1fec, 0x00000000); | ||
361 | nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000); | ||
362 | nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000); | ||
363 | nv_mthd(dev, 0x9097, 0x2200, 0x00000022); | ||
364 | nv_mthd(dev, 0x9097, 0x2210, 0x00000022); | ||
365 | nv_mthd(dev, 0x9097, 0x2220, 0x00000022); | ||
366 | nv_mthd(dev, 0x9097, 0x2230, 0x00000022); | ||
367 | nv_mthd(dev, 0x9097, 0x2240, 0x00000022); | ||
368 | nv_mthd(dev, 0x9097, 0x2000, 0x00000000); | ||
369 | nv_mthd(dev, 0x9097, 0x2040, 0x00000011); | ||
370 | nv_mthd(dev, 0x9097, 0x2080, 0x00000020); | ||
371 | nv_mthd(dev, 0x9097, 0x20c0, 0x00000030); | ||
372 | nv_mthd(dev, 0x9097, 0x2100, 0x00000040); | ||
373 | nv_mthd(dev, 0x9097, 0x2140, 0x00000051); | ||
374 | nv_mthd(dev, 0x9097, 0x200c, 0x00000001); | ||
375 | nv_mthd(dev, 0x9097, 0x204c, 0x00000001); | ||
376 | nv_mthd(dev, 0x9097, 0x208c, 0x00000001); | ||
377 | nv_mthd(dev, 0x9097, 0x20cc, 0x00000001); | ||
378 | nv_mthd(dev, 0x9097, 0x210c, 0x00000001); | ||
379 | nv_mthd(dev, 0x9097, 0x214c, 0x00000001); | ||
380 | nv_mthd(dev, 0x9097, 0x2010, 0x00000000); | ||
381 | nv_mthd(dev, 0x9097, 0x2050, 0x00000000); | ||
382 | nv_mthd(dev, 0x9097, 0x2090, 0x00000001); | ||
383 | nv_mthd(dev, 0x9097, 0x20d0, 0x00000002); | ||
384 | nv_mthd(dev, 0x9097, 0x2110, 0x00000003); | ||
385 | nv_mthd(dev, 0x9097, 0x2150, 0x00000004); | ||
386 | nv_mthd(dev, 0x9097, 0x0380, 0x00000000); | ||
387 | nv_mthd(dev, 0x9097, 0x03a0, 0x00000000); | ||
388 | nv_mthd(dev, 0x9097, 0x03c0, 0x00000000); | ||
389 | nv_mthd(dev, 0x9097, 0x03e0, 0x00000000); | ||
390 | nv_mthd(dev, 0x9097, 0x0384, 0x00000000); | ||
391 | nv_mthd(dev, 0x9097, 0x03a4, 0x00000000); | ||
392 | nv_mthd(dev, 0x9097, 0x03c4, 0x00000000); | ||
393 | nv_mthd(dev, 0x9097, 0x03e4, 0x00000000); | ||
394 | nv_mthd(dev, 0x9097, 0x0388, 0x00000000); | ||
395 | nv_mthd(dev, 0x9097, 0x03a8, 0x00000000); | ||
396 | nv_mthd(dev, 0x9097, 0x03c8, 0x00000000); | ||
397 | nv_mthd(dev, 0x9097, 0x03e8, 0x00000000); | ||
398 | nv_mthd(dev, 0x9097, 0x038c, 0x00000000); | ||
399 | nv_mthd(dev, 0x9097, 0x03ac, 0x00000000); | ||
400 | nv_mthd(dev, 0x9097, 0x03cc, 0x00000000); | ||
401 | nv_mthd(dev, 0x9097, 0x03ec, 0x00000000); | ||
402 | nv_mthd(dev, 0x9097, 0x0700, 0x00000000); | ||
403 | nv_mthd(dev, 0x9097, 0x0710, 0x00000000); | ||
404 | nv_mthd(dev, 0x9097, 0x0720, 0x00000000); | ||
405 | nv_mthd(dev, 0x9097, 0x0730, 0x00000000); | ||
406 | nv_mthd(dev, 0x9097, 0x0704, 0x00000000); | ||
407 | nv_mthd(dev, 0x9097, 0x0714, 0x00000000); | ||
408 | nv_mthd(dev, 0x9097, 0x0724, 0x00000000); | ||
409 | nv_mthd(dev, 0x9097, 0x0734, 0x00000000); | ||
410 | nv_mthd(dev, 0x9097, 0x0708, 0x00000000); | ||
411 | nv_mthd(dev, 0x9097, 0x0718, 0x00000000); | ||
412 | nv_mthd(dev, 0x9097, 0x0728, 0x00000000); | ||
413 | nv_mthd(dev, 0x9097, 0x0738, 0x00000000); | ||
414 | nv_mthd(dev, 0x9097, 0x2800, 0x00000000); | ||
415 | nv_mthd(dev, 0x9097, 0x2804, 0x00000000); | ||
416 | nv_mthd(dev, 0x9097, 0x2808, 0x00000000); | ||
417 | nv_mthd(dev, 0x9097, 0x280c, 0x00000000); | ||
418 | nv_mthd(dev, 0x9097, 0x2810, 0x00000000); | ||
419 | nv_mthd(dev, 0x9097, 0x2814, 0x00000000); | ||
420 | nv_mthd(dev, 0x9097, 0x2818, 0x00000000); | ||
421 | nv_mthd(dev, 0x9097, 0x281c, 0x00000000); | ||
422 | nv_mthd(dev, 0x9097, 0x2820, 0x00000000); | ||
423 | nv_mthd(dev, 0x9097, 0x2824, 0x00000000); | ||
424 | nv_mthd(dev, 0x9097, 0x2828, 0x00000000); | ||
425 | nv_mthd(dev, 0x9097, 0x282c, 0x00000000); | ||
426 | nv_mthd(dev, 0x9097, 0x2830, 0x00000000); | ||
427 | nv_mthd(dev, 0x9097, 0x2834, 0x00000000); | ||
428 | nv_mthd(dev, 0x9097, 0x2838, 0x00000000); | ||
429 | nv_mthd(dev, 0x9097, 0x283c, 0x00000000); | ||
430 | nv_mthd(dev, 0x9097, 0x2840, 0x00000000); | ||
431 | nv_mthd(dev, 0x9097, 0x2844, 0x00000000); | ||
432 | nv_mthd(dev, 0x9097, 0x2848, 0x00000000); | ||
433 | nv_mthd(dev, 0x9097, 0x284c, 0x00000000); | ||
434 | nv_mthd(dev, 0x9097, 0x2850, 0x00000000); | ||
435 | nv_mthd(dev, 0x9097, 0x2854, 0x00000000); | ||
436 | nv_mthd(dev, 0x9097, 0x2858, 0x00000000); | ||
437 | nv_mthd(dev, 0x9097, 0x285c, 0x00000000); | ||
438 | nv_mthd(dev, 0x9097, 0x2860, 0x00000000); | ||
439 | nv_mthd(dev, 0x9097, 0x2864, 0x00000000); | ||
440 | nv_mthd(dev, 0x9097, 0x2868, 0x00000000); | ||
441 | nv_mthd(dev, 0x9097, 0x286c, 0x00000000); | ||
442 | nv_mthd(dev, 0x9097, 0x2870, 0x00000000); | ||
443 | nv_mthd(dev, 0x9097, 0x2874, 0x00000000); | ||
444 | nv_mthd(dev, 0x9097, 0x2878, 0x00000000); | ||
445 | nv_mthd(dev, 0x9097, 0x287c, 0x00000000); | ||
446 | nv_mthd(dev, 0x9097, 0x2880, 0x00000000); | ||
447 | nv_mthd(dev, 0x9097, 0x2884, 0x00000000); | ||
448 | nv_mthd(dev, 0x9097, 0x2888, 0x00000000); | ||
449 | nv_mthd(dev, 0x9097, 0x288c, 0x00000000); | ||
450 | nv_mthd(dev, 0x9097, 0x2890, 0x00000000); | ||
451 | nv_mthd(dev, 0x9097, 0x2894, 0x00000000); | ||
452 | nv_mthd(dev, 0x9097, 0x2898, 0x00000000); | ||
453 | nv_mthd(dev, 0x9097, 0x289c, 0x00000000); | ||
454 | nv_mthd(dev, 0x9097, 0x28a0, 0x00000000); | ||
455 | nv_mthd(dev, 0x9097, 0x28a4, 0x00000000); | ||
456 | nv_mthd(dev, 0x9097, 0x28a8, 0x00000000); | ||
457 | nv_mthd(dev, 0x9097, 0x28ac, 0x00000000); | ||
458 | nv_mthd(dev, 0x9097, 0x28b0, 0x00000000); | ||
459 | nv_mthd(dev, 0x9097, 0x28b4, 0x00000000); | ||
460 | nv_mthd(dev, 0x9097, 0x28b8, 0x00000000); | ||
461 | nv_mthd(dev, 0x9097, 0x28bc, 0x00000000); | ||
462 | nv_mthd(dev, 0x9097, 0x28c0, 0x00000000); | ||
463 | nv_mthd(dev, 0x9097, 0x28c4, 0x00000000); | ||
464 | nv_mthd(dev, 0x9097, 0x28c8, 0x00000000); | ||
465 | nv_mthd(dev, 0x9097, 0x28cc, 0x00000000); | ||
466 | nv_mthd(dev, 0x9097, 0x28d0, 0x00000000); | ||
467 | nv_mthd(dev, 0x9097, 0x28d4, 0x00000000); | ||
468 | nv_mthd(dev, 0x9097, 0x28d8, 0x00000000); | ||
469 | nv_mthd(dev, 0x9097, 0x28dc, 0x00000000); | ||
470 | nv_mthd(dev, 0x9097, 0x28e0, 0x00000000); | ||
471 | nv_mthd(dev, 0x9097, 0x28e4, 0x00000000); | ||
472 | nv_mthd(dev, 0x9097, 0x28e8, 0x00000000); | ||
473 | nv_mthd(dev, 0x9097, 0x28ec, 0x00000000); | ||
474 | nv_mthd(dev, 0x9097, 0x28f0, 0x00000000); | ||
475 | nv_mthd(dev, 0x9097, 0x28f4, 0x00000000); | ||
476 | nv_mthd(dev, 0x9097, 0x28f8, 0x00000000); | ||
477 | nv_mthd(dev, 0x9097, 0x28fc, 0x00000000); | ||
478 | nv_mthd(dev, 0x9097, 0x2900, 0x00000000); | ||
479 | nv_mthd(dev, 0x9097, 0x2904, 0x00000000); | ||
480 | nv_mthd(dev, 0x9097, 0x2908, 0x00000000); | ||
481 | nv_mthd(dev, 0x9097, 0x290c, 0x00000000); | ||
482 | nv_mthd(dev, 0x9097, 0x2910, 0x00000000); | ||
483 | nv_mthd(dev, 0x9097, 0x2914, 0x00000000); | ||
484 | nv_mthd(dev, 0x9097, 0x2918, 0x00000000); | ||
485 | nv_mthd(dev, 0x9097, 0x291c, 0x00000000); | ||
486 | nv_mthd(dev, 0x9097, 0x2920, 0x00000000); | ||
487 | nv_mthd(dev, 0x9097, 0x2924, 0x00000000); | ||
488 | nv_mthd(dev, 0x9097, 0x2928, 0x00000000); | ||
489 | nv_mthd(dev, 0x9097, 0x292c, 0x00000000); | ||
490 | nv_mthd(dev, 0x9097, 0x2930, 0x00000000); | ||
491 | nv_mthd(dev, 0x9097, 0x2934, 0x00000000); | ||
492 | nv_mthd(dev, 0x9097, 0x2938, 0x00000000); | ||
493 | nv_mthd(dev, 0x9097, 0x293c, 0x00000000); | ||
494 | nv_mthd(dev, 0x9097, 0x2940, 0x00000000); | ||
495 | nv_mthd(dev, 0x9097, 0x2944, 0x00000000); | ||
496 | nv_mthd(dev, 0x9097, 0x2948, 0x00000000); | ||
497 | nv_mthd(dev, 0x9097, 0x294c, 0x00000000); | ||
498 | nv_mthd(dev, 0x9097, 0x2950, 0x00000000); | ||
499 | nv_mthd(dev, 0x9097, 0x2954, 0x00000000); | ||
500 | nv_mthd(dev, 0x9097, 0x2958, 0x00000000); | ||
501 | nv_mthd(dev, 0x9097, 0x295c, 0x00000000); | ||
502 | nv_mthd(dev, 0x9097, 0x2960, 0x00000000); | ||
503 | nv_mthd(dev, 0x9097, 0x2964, 0x00000000); | ||
504 | nv_mthd(dev, 0x9097, 0x2968, 0x00000000); | ||
505 | nv_mthd(dev, 0x9097, 0x296c, 0x00000000); | ||
506 | nv_mthd(dev, 0x9097, 0x2970, 0x00000000); | ||
507 | nv_mthd(dev, 0x9097, 0x2974, 0x00000000); | ||
508 | nv_mthd(dev, 0x9097, 0x2978, 0x00000000); | ||
509 | nv_mthd(dev, 0x9097, 0x297c, 0x00000000); | ||
510 | nv_mthd(dev, 0x9097, 0x2980, 0x00000000); | ||
511 | nv_mthd(dev, 0x9097, 0x2984, 0x00000000); | ||
512 | nv_mthd(dev, 0x9097, 0x2988, 0x00000000); | ||
513 | nv_mthd(dev, 0x9097, 0x298c, 0x00000000); | ||
514 | nv_mthd(dev, 0x9097, 0x2990, 0x00000000); | ||
515 | nv_mthd(dev, 0x9097, 0x2994, 0x00000000); | ||
516 | nv_mthd(dev, 0x9097, 0x2998, 0x00000000); | ||
517 | nv_mthd(dev, 0x9097, 0x299c, 0x00000000); | ||
518 | nv_mthd(dev, 0x9097, 0x29a0, 0x00000000); | ||
519 | nv_mthd(dev, 0x9097, 0x29a4, 0x00000000); | ||
520 | nv_mthd(dev, 0x9097, 0x29a8, 0x00000000); | ||
521 | nv_mthd(dev, 0x9097, 0x29ac, 0x00000000); | ||
522 | nv_mthd(dev, 0x9097, 0x29b0, 0x00000000); | ||
523 | nv_mthd(dev, 0x9097, 0x29b4, 0x00000000); | ||
524 | nv_mthd(dev, 0x9097, 0x29b8, 0x00000000); | ||
525 | nv_mthd(dev, 0x9097, 0x29bc, 0x00000000); | ||
526 | nv_mthd(dev, 0x9097, 0x29c0, 0x00000000); | ||
527 | nv_mthd(dev, 0x9097, 0x29c4, 0x00000000); | ||
528 | nv_mthd(dev, 0x9097, 0x29c8, 0x00000000); | ||
529 | nv_mthd(dev, 0x9097, 0x29cc, 0x00000000); | ||
530 | nv_mthd(dev, 0x9097, 0x29d0, 0x00000000); | ||
531 | nv_mthd(dev, 0x9097, 0x29d4, 0x00000000); | ||
532 | nv_mthd(dev, 0x9097, 0x29d8, 0x00000000); | ||
533 | nv_mthd(dev, 0x9097, 0x29dc, 0x00000000); | ||
534 | nv_mthd(dev, 0x9097, 0x29e0, 0x00000000); | ||
535 | nv_mthd(dev, 0x9097, 0x29e4, 0x00000000); | ||
536 | nv_mthd(dev, 0x9097, 0x29e8, 0x00000000); | ||
537 | nv_mthd(dev, 0x9097, 0x29ec, 0x00000000); | ||
538 | nv_mthd(dev, 0x9097, 0x29f0, 0x00000000); | ||
539 | nv_mthd(dev, 0x9097, 0x29f4, 0x00000000); | ||
540 | nv_mthd(dev, 0x9097, 0x29f8, 0x00000000); | ||
541 | nv_mthd(dev, 0x9097, 0x29fc, 0x00000000); | ||
542 | nv_mthd(dev, 0x9097, 0x0a00, 0x00000000); | ||
543 | nv_mthd(dev, 0x9097, 0x0a20, 0x00000000); | ||
544 | nv_mthd(dev, 0x9097, 0x0a40, 0x00000000); | ||
545 | nv_mthd(dev, 0x9097, 0x0a60, 0x00000000); | ||
546 | nv_mthd(dev, 0x9097, 0x0a80, 0x00000000); | ||
547 | nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000); | ||
548 | nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000); | ||
549 | nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000); | ||
550 | nv_mthd(dev, 0x9097, 0x0b00, 0x00000000); | ||
551 | nv_mthd(dev, 0x9097, 0x0b20, 0x00000000); | ||
552 | nv_mthd(dev, 0x9097, 0x0b40, 0x00000000); | ||
553 | nv_mthd(dev, 0x9097, 0x0b60, 0x00000000); | ||
554 | nv_mthd(dev, 0x9097, 0x0b80, 0x00000000); | ||
555 | nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000); | ||
556 | nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000); | ||
557 | nv_mthd(dev, 0x9097, 0x0be0, 0x00000000); | ||
558 | nv_mthd(dev, 0x9097, 0x0a04, 0x00000000); | ||
559 | nv_mthd(dev, 0x9097, 0x0a24, 0x00000000); | ||
560 | nv_mthd(dev, 0x9097, 0x0a44, 0x00000000); | ||
561 | nv_mthd(dev, 0x9097, 0x0a64, 0x00000000); | ||
562 | nv_mthd(dev, 0x9097, 0x0a84, 0x00000000); | ||
563 | nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000); | ||
564 | nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000); | ||
565 | nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000); | ||
566 | nv_mthd(dev, 0x9097, 0x0b04, 0x00000000); | ||
567 | nv_mthd(dev, 0x9097, 0x0b24, 0x00000000); | ||
568 | nv_mthd(dev, 0x9097, 0x0b44, 0x00000000); | ||
569 | nv_mthd(dev, 0x9097, 0x0b64, 0x00000000); | ||
570 | nv_mthd(dev, 0x9097, 0x0b84, 0x00000000); | ||
571 | nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000); | ||
572 | nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000); | ||
573 | nv_mthd(dev, 0x9097, 0x0be4, 0x00000000); | ||
574 | nv_mthd(dev, 0x9097, 0x0a08, 0x00000000); | ||
575 | nv_mthd(dev, 0x9097, 0x0a28, 0x00000000); | ||
576 | nv_mthd(dev, 0x9097, 0x0a48, 0x00000000); | ||
577 | nv_mthd(dev, 0x9097, 0x0a68, 0x00000000); | ||
578 | nv_mthd(dev, 0x9097, 0x0a88, 0x00000000); | ||
579 | nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000); | ||
580 | nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000); | ||
581 | nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000); | ||
582 | nv_mthd(dev, 0x9097, 0x0b08, 0x00000000); | ||
583 | nv_mthd(dev, 0x9097, 0x0b28, 0x00000000); | ||
584 | nv_mthd(dev, 0x9097, 0x0b48, 0x00000000); | ||
585 | nv_mthd(dev, 0x9097, 0x0b68, 0x00000000); | ||
586 | nv_mthd(dev, 0x9097, 0x0b88, 0x00000000); | ||
587 | nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000); | ||
588 | nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000); | ||
589 | nv_mthd(dev, 0x9097, 0x0be8, 0x00000000); | ||
590 | nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000); | ||
591 | nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000); | ||
592 | nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000); | ||
593 | nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000); | ||
594 | nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000); | ||
595 | nv_mthd(dev, 0x9097, 0x0aac, 0x00000000); | ||
596 | nv_mthd(dev, 0x9097, 0x0acc, 0x00000000); | ||
597 | nv_mthd(dev, 0x9097, 0x0aec, 0x00000000); | ||
598 | nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000); | ||
599 | nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000); | ||
600 | nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000); | ||
601 | nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000); | ||
602 | nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000); | ||
603 | nv_mthd(dev, 0x9097, 0x0bac, 0x00000000); | ||
604 | nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000); | ||
605 | nv_mthd(dev, 0x9097, 0x0bec, 0x00000000); | ||
606 | nv_mthd(dev, 0x9097, 0x0a10, 0x00000000); | ||
607 | nv_mthd(dev, 0x9097, 0x0a30, 0x00000000); | ||
608 | nv_mthd(dev, 0x9097, 0x0a50, 0x00000000); | ||
609 | nv_mthd(dev, 0x9097, 0x0a70, 0x00000000); | ||
610 | nv_mthd(dev, 0x9097, 0x0a90, 0x00000000); | ||
611 | nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000); | ||
612 | nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000); | ||
613 | nv_mthd(dev, 0x9097, 0x0af0, 0x00000000); | ||
614 | nv_mthd(dev, 0x9097, 0x0b10, 0x00000000); | ||
615 | nv_mthd(dev, 0x9097, 0x0b30, 0x00000000); | ||
616 | nv_mthd(dev, 0x9097, 0x0b50, 0x00000000); | ||
617 | nv_mthd(dev, 0x9097, 0x0b70, 0x00000000); | ||
618 | nv_mthd(dev, 0x9097, 0x0b90, 0x00000000); | ||
619 | nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000); | ||
620 | nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000); | ||
621 | nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000); | ||
622 | nv_mthd(dev, 0x9097, 0x0a14, 0x00000000); | ||
623 | nv_mthd(dev, 0x9097, 0x0a34, 0x00000000); | ||
624 | nv_mthd(dev, 0x9097, 0x0a54, 0x00000000); | ||
625 | nv_mthd(dev, 0x9097, 0x0a74, 0x00000000); | ||
626 | nv_mthd(dev, 0x9097, 0x0a94, 0x00000000); | ||
627 | nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000); | ||
628 | nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000); | ||
629 | nv_mthd(dev, 0x9097, 0x0af4, 0x00000000); | ||
630 | nv_mthd(dev, 0x9097, 0x0b14, 0x00000000); | ||
631 | nv_mthd(dev, 0x9097, 0x0b34, 0x00000000); | ||
632 | nv_mthd(dev, 0x9097, 0x0b54, 0x00000000); | ||
633 | nv_mthd(dev, 0x9097, 0x0b74, 0x00000000); | ||
634 | nv_mthd(dev, 0x9097, 0x0b94, 0x00000000); | ||
635 | nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000); | ||
636 | nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000); | ||
637 | nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000); | ||
638 | nv_mthd(dev, 0x9097, 0x0c00, 0x00000000); | ||
639 | nv_mthd(dev, 0x9097, 0x0c10, 0x00000000); | ||
640 | nv_mthd(dev, 0x9097, 0x0c20, 0x00000000); | ||
641 | nv_mthd(dev, 0x9097, 0x0c30, 0x00000000); | ||
642 | nv_mthd(dev, 0x9097, 0x0c40, 0x00000000); | ||
643 | nv_mthd(dev, 0x9097, 0x0c50, 0x00000000); | ||
644 | nv_mthd(dev, 0x9097, 0x0c60, 0x00000000); | ||
645 | nv_mthd(dev, 0x9097, 0x0c70, 0x00000000); | ||
646 | nv_mthd(dev, 0x9097, 0x0c80, 0x00000000); | ||
647 | nv_mthd(dev, 0x9097, 0x0c90, 0x00000000); | ||
648 | nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000); | ||
649 | nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000); | ||
650 | nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000); | ||
651 | nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000); | ||
652 | nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000); | ||
653 | nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000); | ||
654 | nv_mthd(dev, 0x9097, 0x0c04, 0x00000000); | ||
655 | nv_mthd(dev, 0x9097, 0x0c14, 0x00000000); | ||
656 | nv_mthd(dev, 0x9097, 0x0c24, 0x00000000); | ||
657 | nv_mthd(dev, 0x9097, 0x0c34, 0x00000000); | ||
658 | nv_mthd(dev, 0x9097, 0x0c44, 0x00000000); | ||
659 | nv_mthd(dev, 0x9097, 0x0c54, 0x00000000); | ||
660 | nv_mthd(dev, 0x9097, 0x0c64, 0x00000000); | ||
661 | nv_mthd(dev, 0x9097, 0x0c74, 0x00000000); | ||
662 | nv_mthd(dev, 0x9097, 0x0c84, 0x00000000); | ||
663 | nv_mthd(dev, 0x9097, 0x0c94, 0x00000000); | ||
664 | nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000); | ||
665 | nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000); | ||
666 | nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000); | ||
667 | nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000); | ||
668 | nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000); | ||
669 | nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000); | ||
670 | nv_mthd(dev, 0x9097, 0x0c08, 0x00000000); | ||
671 | nv_mthd(dev, 0x9097, 0x0c18, 0x00000000); | ||
672 | nv_mthd(dev, 0x9097, 0x0c28, 0x00000000); | ||
673 | nv_mthd(dev, 0x9097, 0x0c38, 0x00000000); | ||
674 | nv_mthd(dev, 0x9097, 0x0c48, 0x00000000); | ||
675 | nv_mthd(dev, 0x9097, 0x0c58, 0x00000000); | ||
676 | nv_mthd(dev, 0x9097, 0x0c68, 0x00000000); | ||
677 | nv_mthd(dev, 0x9097, 0x0c78, 0x00000000); | ||
678 | nv_mthd(dev, 0x9097, 0x0c88, 0x00000000); | ||
679 | nv_mthd(dev, 0x9097, 0x0c98, 0x00000000); | ||
680 | nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000); | ||
681 | nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000); | ||
682 | nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000); | ||
683 | nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000); | ||
684 | nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000); | ||
685 | nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000); | ||
686 | nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000); | ||
687 | nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000); | ||
688 | nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000); | ||
689 | nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000); | ||
690 | nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000); | ||
691 | nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000); | ||
692 | nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000); | ||
693 | nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000); | ||
694 | nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000); | ||
695 | nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000); | ||
696 | nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000); | ||
697 | nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000); | ||
698 | nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000); | ||
699 | nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000); | ||
700 | nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000); | ||
701 | nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000); | ||
702 | nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000); | ||
703 | nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000); | ||
704 | nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000); | ||
705 | nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000); | ||
706 | nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000); | ||
707 | nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000); | ||
708 | nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000); | ||
709 | nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000); | ||
710 | nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000); | ||
711 | nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000); | ||
712 | nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000); | ||
713 | nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000); | ||
714 | nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000); | ||
715 | nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000); | ||
716 | nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000); | ||
717 | nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000); | ||
718 | nv_mthd(dev, 0x9097, 0x0e00, 0x00000000); | ||
719 | nv_mthd(dev, 0x9097, 0x0e10, 0x00000000); | ||
720 | nv_mthd(dev, 0x9097, 0x0e20, 0x00000000); | ||
721 | nv_mthd(dev, 0x9097, 0x0e30, 0x00000000); | ||
722 | nv_mthd(dev, 0x9097, 0x0e40, 0x00000000); | ||
723 | nv_mthd(dev, 0x9097, 0x0e50, 0x00000000); | ||
724 | nv_mthd(dev, 0x9097, 0x0e60, 0x00000000); | ||
725 | nv_mthd(dev, 0x9097, 0x0e70, 0x00000000); | ||
726 | nv_mthd(dev, 0x9097, 0x0e80, 0x00000000); | ||
727 | nv_mthd(dev, 0x9097, 0x0e90, 0x00000000); | ||
728 | nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000); | ||
729 | nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000); | ||
730 | nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000); | ||
731 | nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000); | ||
732 | nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000); | ||
733 | nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000); | ||
734 | nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000); | ||
735 | nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000); | ||
736 | nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000); | ||
737 | nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000); | ||
738 | nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000); | ||
739 | nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000); | ||
740 | nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000); | ||
741 | nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000); | ||
742 | nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000); | ||
743 | nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000); | ||
744 | nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000); | ||
745 | nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000); | ||
746 | nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000); | ||
747 | nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000); | ||
748 | nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000); | ||
749 | nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000); | ||
750 | nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000); | ||
751 | nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000); | ||
752 | nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000); | ||
753 | nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000); | ||
754 | nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000); | ||
755 | nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000); | ||
756 | nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000); | ||
757 | nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000); | ||
758 | nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000); | ||
759 | nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000); | ||
760 | nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000); | ||
761 | nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000); | ||
762 | nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000); | ||
763 | nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000); | ||
764 | nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000); | ||
765 | nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000); | ||
766 | nv_mthd(dev, 0x9097, 0x0d40, 0x00000000); | ||
767 | nv_mthd(dev, 0x9097, 0x0d48, 0x00000000); | ||
768 | nv_mthd(dev, 0x9097, 0x0d50, 0x00000000); | ||
769 | nv_mthd(dev, 0x9097, 0x0d58, 0x00000000); | ||
770 | nv_mthd(dev, 0x9097, 0x0d44, 0x00000000); | ||
771 | nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000); | ||
772 | nv_mthd(dev, 0x9097, 0x0d54, 0x00000000); | ||
773 | nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000); | ||
774 | nv_mthd(dev, 0x9097, 0x1e00, 0x00000001); | ||
775 | nv_mthd(dev, 0x9097, 0x1e20, 0x00000001); | ||
776 | nv_mthd(dev, 0x9097, 0x1e40, 0x00000001); | ||
777 | nv_mthd(dev, 0x9097, 0x1e60, 0x00000001); | ||
778 | nv_mthd(dev, 0x9097, 0x1e80, 0x00000001); | ||
779 | nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001); | ||
780 | nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001); | ||
781 | nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001); | ||
782 | nv_mthd(dev, 0x9097, 0x1e04, 0x00000001); | ||
783 | nv_mthd(dev, 0x9097, 0x1e24, 0x00000001); | ||
784 | nv_mthd(dev, 0x9097, 0x1e44, 0x00000001); | ||
785 | nv_mthd(dev, 0x9097, 0x1e64, 0x00000001); | ||
786 | nv_mthd(dev, 0x9097, 0x1e84, 0x00000001); | ||
787 | nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001); | ||
788 | nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001); | ||
789 | nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001); | ||
790 | nv_mthd(dev, 0x9097, 0x1e08, 0x00000002); | ||
791 | nv_mthd(dev, 0x9097, 0x1e28, 0x00000002); | ||
792 | nv_mthd(dev, 0x9097, 0x1e48, 0x00000002); | ||
793 | nv_mthd(dev, 0x9097, 0x1e68, 0x00000002); | ||
794 | nv_mthd(dev, 0x9097, 0x1e88, 0x00000002); | ||
795 | nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002); | ||
796 | nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002); | ||
797 | nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002); | ||
798 | nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001); | ||
799 | nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001); | ||
800 | nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001); | ||
801 | nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001); | ||
802 | nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001); | ||
803 | nv_mthd(dev, 0x9097, 0x1eac, 0x00000001); | ||
804 | nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001); | ||
805 | nv_mthd(dev, 0x9097, 0x1eec, 0x00000001); | ||
806 | nv_mthd(dev, 0x9097, 0x1e10, 0x00000001); | ||
807 | nv_mthd(dev, 0x9097, 0x1e30, 0x00000001); | ||
808 | nv_mthd(dev, 0x9097, 0x1e50, 0x00000001); | ||
809 | nv_mthd(dev, 0x9097, 0x1e70, 0x00000001); | ||
810 | nv_mthd(dev, 0x9097, 0x1e90, 0x00000001); | ||
811 | nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001); | ||
812 | nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001); | ||
813 | nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001); | ||
814 | nv_mthd(dev, 0x9097, 0x1e14, 0x00000002); | ||
815 | nv_mthd(dev, 0x9097, 0x1e34, 0x00000002); | ||
816 | nv_mthd(dev, 0x9097, 0x1e54, 0x00000002); | ||
817 | nv_mthd(dev, 0x9097, 0x1e74, 0x00000002); | ||
818 | nv_mthd(dev, 0x9097, 0x1e94, 0x00000002); | ||
819 | nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002); | ||
820 | nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002); | ||
821 | nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002); | ||
822 | nv_mthd(dev, 0x9097, 0x1e18, 0x00000001); | ||
823 | nv_mthd(dev, 0x9097, 0x1e38, 0x00000001); | ||
824 | nv_mthd(dev, 0x9097, 0x1e58, 0x00000001); | ||
825 | nv_mthd(dev, 0x9097, 0x1e78, 0x00000001); | ||
826 | nv_mthd(dev, 0x9097, 0x1e98, 0x00000001); | ||
827 | nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); | ||
828 | nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); | ||
829 | nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); | ||
830 | if (fermi == 0x9097) { | ||
831 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) | ||
832 | nv_mthd(dev, 0x9097, mthd, 0x00000000); | ||
833 | } | ||
834 | nv_mthd(dev, 0x9097, 0x030c, 0x00000001); | ||
835 | nv_mthd(dev, 0x9097, 0x1944, 0x00000000); | ||
836 | nv_mthd(dev, 0x9097, 0x1514, 0x00000000); | ||
837 | nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff); | ||
838 | nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881); | ||
839 | nv_mthd(dev, 0x9097, 0x0fac, 0x00000001); | ||
840 | nv_mthd(dev, 0x9097, 0x1538, 0x00000001); | ||
841 | nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000); | ||
842 | nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000); | ||
843 | nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014); | ||
844 | nv_mthd(dev, 0x9097, 0x0fec, 0x00000040); | ||
845 | nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000); | ||
846 | nv_mthd(dev, 0x9097, 0x179c, 0x00000000); | ||
847 | nv_mthd(dev, 0x9097, 0x1228, 0x00000400); | ||
848 | nv_mthd(dev, 0x9097, 0x122c, 0x00000300); | ||
849 | nv_mthd(dev, 0x9097, 0x1230, 0x00010001); | ||
850 | nv_mthd(dev, 0x9097, 0x07f8, 0x00000000); | ||
851 | nv_mthd(dev, 0x9097, 0x15b4, 0x00000001); | ||
852 | nv_mthd(dev, 0x9097, 0x15cc, 0x00000000); | ||
853 | nv_mthd(dev, 0x9097, 0x1534, 0x00000000); | ||
854 | nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000); | ||
855 | nv_mthd(dev, 0x9097, 0x15d0, 0x00000000); | ||
856 | nv_mthd(dev, 0x9097, 0x153c, 0x00000000); | ||
857 | nv_mthd(dev, 0x9097, 0x16b4, 0x00000003); | ||
858 | nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff); | ||
859 | nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff); | ||
860 | nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff); | ||
861 | nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff); | ||
862 | nv_mthd(dev, 0x9097, 0x0df8, 0x00000000); | ||
863 | nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000); | ||
864 | nv_mthd(dev, 0x9097, 0x1948, 0x00000000); | ||
865 | nv_mthd(dev, 0x9097, 0x1970, 0x00000001); | ||
866 | nv_mthd(dev, 0x9097, 0x161c, 0x000009f0); | ||
867 | nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010); | ||
868 | nv_mthd(dev, 0x9097, 0x163c, 0x00000000); | ||
869 | nv_mthd(dev, 0x9097, 0x15e4, 0x00000000); | ||
870 | nv_mthd(dev, 0x9097, 0x1160, 0x25e00040); | ||
871 | nv_mthd(dev, 0x9097, 0x1164, 0x25e00040); | ||
872 | nv_mthd(dev, 0x9097, 0x1168, 0x25e00040); | ||
873 | nv_mthd(dev, 0x9097, 0x116c, 0x25e00040); | ||
874 | nv_mthd(dev, 0x9097, 0x1170, 0x25e00040); | ||
875 | nv_mthd(dev, 0x9097, 0x1174, 0x25e00040); | ||
876 | nv_mthd(dev, 0x9097, 0x1178, 0x25e00040); | ||
877 | nv_mthd(dev, 0x9097, 0x117c, 0x25e00040); | ||
878 | nv_mthd(dev, 0x9097, 0x1180, 0x25e00040); | ||
879 | nv_mthd(dev, 0x9097, 0x1184, 0x25e00040); | ||
880 | nv_mthd(dev, 0x9097, 0x1188, 0x25e00040); | ||
881 | nv_mthd(dev, 0x9097, 0x118c, 0x25e00040); | ||
882 | nv_mthd(dev, 0x9097, 0x1190, 0x25e00040); | ||
883 | nv_mthd(dev, 0x9097, 0x1194, 0x25e00040); | ||
884 | nv_mthd(dev, 0x9097, 0x1198, 0x25e00040); | ||
885 | nv_mthd(dev, 0x9097, 0x119c, 0x25e00040); | ||
886 | nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040); | ||
887 | nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040); | ||
888 | nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040); | ||
889 | nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040); | ||
890 | nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040); | ||
891 | nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040); | ||
892 | nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040); | ||
893 | nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040); | ||
894 | nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040); | ||
895 | nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040); | ||
896 | nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040); | ||
897 | nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040); | ||
898 | nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040); | ||
899 | nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040); | ||
900 | nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040); | ||
901 | nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040); | ||
902 | nv_mthd(dev, 0x9097, 0x1880, 0x00000000); | ||
903 | nv_mthd(dev, 0x9097, 0x1884, 0x00000000); | ||
904 | nv_mthd(dev, 0x9097, 0x1888, 0x00000000); | ||
905 | nv_mthd(dev, 0x9097, 0x188c, 0x00000000); | ||
906 | nv_mthd(dev, 0x9097, 0x1890, 0x00000000); | ||
907 | nv_mthd(dev, 0x9097, 0x1894, 0x00000000); | ||
908 | nv_mthd(dev, 0x9097, 0x1898, 0x00000000); | ||
909 | nv_mthd(dev, 0x9097, 0x189c, 0x00000000); | ||
910 | nv_mthd(dev, 0x9097, 0x18a0, 0x00000000); | ||
911 | nv_mthd(dev, 0x9097, 0x18a4, 0x00000000); | ||
912 | nv_mthd(dev, 0x9097, 0x18a8, 0x00000000); | ||
913 | nv_mthd(dev, 0x9097, 0x18ac, 0x00000000); | ||
914 | nv_mthd(dev, 0x9097, 0x18b0, 0x00000000); | ||
915 | nv_mthd(dev, 0x9097, 0x18b4, 0x00000000); | ||
916 | nv_mthd(dev, 0x9097, 0x18b8, 0x00000000); | ||
917 | nv_mthd(dev, 0x9097, 0x18bc, 0x00000000); | ||
918 | nv_mthd(dev, 0x9097, 0x18c0, 0x00000000); | ||
919 | nv_mthd(dev, 0x9097, 0x18c4, 0x00000000); | ||
920 | nv_mthd(dev, 0x9097, 0x18c8, 0x00000000); | ||
921 | nv_mthd(dev, 0x9097, 0x18cc, 0x00000000); | ||
922 | nv_mthd(dev, 0x9097, 0x18d0, 0x00000000); | ||
923 | nv_mthd(dev, 0x9097, 0x18d4, 0x00000000); | ||
924 | nv_mthd(dev, 0x9097, 0x18d8, 0x00000000); | ||
925 | nv_mthd(dev, 0x9097, 0x18dc, 0x00000000); | ||
926 | nv_mthd(dev, 0x9097, 0x18e0, 0x00000000); | ||
927 | nv_mthd(dev, 0x9097, 0x18e4, 0x00000000); | ||
928 | nv_mthd(dev, 0x9097, 0x18e8, 0x00000000); | ||
929 | nv_mthd(dev, 0x9097, 0x18ec, 0x00000000); | ||
930 | nv_mthd(dev, 0x9097, 0x18f0, 0x00000000); | ||
931 | nv_mthd(dev, 0x9097, 0x18f4, 0x00000000); | ||
932 | nv_mthd(dev, 0x9097, 0x18f8, 0x00000000); | ||
933 | nv_mthd(dev, 0x9097, 0x18fc, 0x00000000); | ||
934 | nv_mthd(dev, 0x9097, 0x0f84, 0x00000000); | ||
935 | nv_mthd(dev, 0x9097, 0x0f88, 0x00000000); | ||
936 | nv_mthd(dev, 0x9097, 0x17c8, 0x00000000); | ||
937 | nv_mthd(dev, 0x9097, 0x17cc, 0x00000000); | ||
938 | nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff); | ||
939 | nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff); | ||
940 | nv_mthd(dev, 0x9097, 0x17d8, 0x00000002); | ||
941 | nv_mthd(dev, 0x9097, 0x17dc, 0x00000000); | ||
942 | nv_mthd(dev, 0x9097, 0x15f4, 0x00000000); | ||
943 | nv_mthd(dev, 0x9097, 0x15f8, 0x00000000); | ||
944 | nv_mthd(dev, 0x9097, 0x1434, 0x00000000); | ||
945 | nv_mthd(dev, 0x9097, 0x1438, 0x00000000); | ||
946 | nv_mthd(dev, 0x9097, 0x0d74, 0x00000000); | ||
947 | nv_mthd(dev, 0x9097, 0x0dec, 0x00000001); | ||
948 | nv_mthd(dev, 0x9097, 0x13a4, 0x00000000); | ||
949 | nv_mthd(dev, 0x9097, 0x1318, 0x00000001); | ||
950 | nv_mthd(dev, 0x9097, 0x1644, 0x00000000); | ||
951 | nv_mthd(dev, 0x9097, 0x0748, 0x00000000); | ||
952 | nv_mthd(dev, 0x9097, 0x0de8, 0x00000000); | ||
953 | nv_mthd(dev, 0x9097, 0x1648, 0x00000000); | ||
954 | nv_mthd(dev, 0x9097, 0x12a4, 0x00000000); | ||
955 | nv_mthd(dev, 0x9097, 0x1120, 0x00000000); | ||
956 | nv_mthd(dev, 0x9097, 0x1124, 0x00000000); | ||
957 | nv_mthd(dev, 0x9097, 0x1128, 0x00000000); | ||
958 | nv_mthd(dev, 0x9097, 0x112c, 0x00000000); | ||
959 | nv_mthd(dev, 0x9097, 0x1118, 0x00000000); | ||
960 | nv_mthd(dev, 0x9097, 0x164c, 0x00000000); | ||
961 | nv_mthd(dev, 0x9097, 0x1658, 0x00000000); | ||
962 | nv_mthd(dev, 0x9097, 0x1910, 0x00000290); | ||
963 | nv_mthd(dev, 0x9097, 0x1518, 0x00000000); | ||
964 | nv_mthd(dev, 0x9097, 0x165c, 0x00000001); | ||
965 | nv_mthd(dev, 0x9097, 0x1520, 0x00000000); | ||
966 | nv_mthd(dev, 0x9097, 0x1604, 0x00000000); | ||
967 | nv_mthd(dev, 0x9097, 0x1570, 0x00000000); | ||
968 | nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000); | ||
969 | nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000); | ||
970 | nv_mthd(dev, 0x9097, 0x020c, 0x00000000); | ||
971 | nv_mthd(dev, 0x9097, 0x1670, 0x30201000); | ||
972 | nv_mthd(dev, 0x9097, 0x1674, 0x70605040); | ||
973 | nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888); | ||
974 | nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8); | ||
975 | nv_mthd(dev, 0x9097, 0x166c, 0x00000000); | ||
976 | nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00); | ||
977 | nv_mthd(dev, 0x9097, 0x12d0, 0x00000003); | ||
978 | nv_mthd(dev, 0x9097, 0x12d4, 0x00000002); | ||
979 | nv_mthd(dev, 0x9097, 0x1684, 0x00000000); | ||
980 | nv_mthd(dev, 0x9097, 0x1688, 0x00000000); | ||
981 | nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02); | ||
982 | nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02); | ||
983 | nv_mthd(dev, 0x9097, 0x0db4, 0x00000000); | ||
984 | nv_mthd(dev, 0x9097, 0x168c, 0x00000000); | ||
985 | nv_mthd(dev, 0x9097, 0x15bc, 0x00000000); | ||
986 | nv_mthd(dev, 0x9097, 0x156c, 0x00000000); | ||
987 | nv_mthd(dev, 0x9097, 0x187c, 0x00000000); | ||
988 | nv_mthd(dev, 0x9097, 0x1110, 0x00000001); | ||
989 | nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000); | ||
990 | nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000); | ||
991 | nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000); | ||
992 | nv_mthd(dev, 0x9097, 0x1234, 0x00000000); | ||
993 | nv_mthd(dev, 0x9097, 0x1690, 0x00000000); | ||
994 | nv_mthd(dev, 0x9097, 0x12ac, 0x00000001); | ||
995 | nv_mthd(dev, 0x9097, 0x02c4, 0x00000000); | ||
996 | nv_mthd(dev, 0x9097, 0x0790, 0x00000000); | ||
997 | nv_mthd(dev, 0x9097, 0x0794, 0x00000000); | ||
998 | nv_mthd(dev, 0x9097, 0x0798, 0x00000000); | ||
999 | nv_mthd(dev, 0x9097, 0x079c, 0x00000000); | ||
1000 | nv_mthd(dev, 0x9097, 0x07a0, 0x00000000); | ||
1001 | nv_mthd(dev, 0x9097, 0x077c, 0x00000000); | ||
1002 | nv_mthd(dev, 0x9097, 0x1000, 0x00000010); | ||
1003 | nv_mthd(dev, 0x9097, 0x10fc, 0x00000000); | ||
1004 | nv_mthd(dev, 0x9097, 0x1290, 0x00000000); | ||
1005 | nv_mthd(dev, 0x9097, 0x0218, 0x00000010); | ||
1006 | nv_mthd(dev, 0x9097, 0x12d8, 0x00000000); | ||
1007 | nv_mthd(dev, 0x9097, 0x12dc, 0x00000010); | ||
1008 | nv_mthd(dev, 0x9097, 0x0d94, 0x00000001); | ||
1009 | nv_mthd(dev, 0x9097, 0x155c, 0x00000000); | ||
1010 | nv_mthd(dev, 0x9097, 0x1560, 0x00000000); | ||
1011 | nv_mthd(dev, 0x9097, 0x1564, 0x00001fff); | ||
1012 | nv_mthd(dev, 0x9097, 0x1574, 0x00000000); | ||
1013 | nv_mthd(dev, 0x9097, 0x1578, 0x00000000); | ||
1014 | nv_mthd(dev, 0x9097, 0x157c, 0x003fffff); | ||
1015 | nv_mthd(dev, 0x9097, 0x1354, 0x00000000); | ||
1016 | nv_mthd(dev, 0x9097, 0x1664, 0x00000000); | ||
1017 | nv_mthd(dev, 0x9097, 0x1610, 0x00000012); | ||
1018 | nv_mthd(dev, 0x9097, 0x1608, 0x00000000); | ||
1019 | nv_mthd(dev, 0x9097, 0x160c, 0x00000000); | ||
1020 | nv_mthd(dev, 0x9097, 0x162c, 0x00000003); | ||
1021 | nv_mthd(dev, 0x9097, 0x0210, 0x00000000); | ||
1022 | nv_mthd(dev, 0x9097, 0x0320, 0x00000000); | ||
1023 | nv_mthd(dev, 0x9097, 0x0324, 0x3f800000); | ||
1024 | nv_mthd(dev, 0x9097, 0x0328, 0x3f800000); | ||
1025 | nv_mthd(dev, 0x9097, 0x032c, 0x3f800000); | ||
1026 | nv_mthd(dev, 0x9097, 0x0330, 0x3f800000); | ||
1027 | nv_mthd(dev, 0x9097, 0x0334, 0x3f800000); | ||
1028 | nv_mthd(dev, 0x9097, 0x0338, 0x3f800000); | ||
1029 | nv_mthd(dev, 0x9097, 0x0750, 0x00000000); | ||
1030 | nv_mthd(dev, 0x9097, 0x0760, 0x39291909); | ||
1031 | nv_mthd(dev, 0x9097, 0x0764, 0x79695949); | ||
1032 | nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989); | ||
1033 | nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9); | ||
1034 | nv_mthd(dev, 0x9097, 0x0770, 0x30201000); | ||
1035 | nv_mthd(dev, 0x9097, 0x0774, 0x70605040); | ||
1036 | nv_mthd(dev, 0x9097, 0x0778, 0x00009080); | ||
1037 | nv_mthd(dev, 0x9097, 0x0780, 0x39291909); | ||
1038 | nv_mthd(dev, 0x9097, 0x0784, 0x79695949); | ||
1039 | nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989); | ||
1040 | nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9); | ||
1041 | nv_mthd(dev, 0x9097, 0x07d0, 0x30201000); | ||
1042 | nv_mthd(dev, 0x9097, 0x07d4, 0x70605040); | ||
1043 | nv_mthd(dev, 0x9097, 0x07d8, 0x00009080); | ||
1044 | nv_mthd(dev, 0x9097, 0x037c, 0x00000001); | ||
1045 | nv_mthd(dev, 0x9097, 0x0740, 0x00000000); | ||
1046 | nv_mthd(dev, 0x9097, 0x0744, 0x00000000); | ||
1047 | nv_mthd(dev, 0x9097, 0x2600, 0x00000000); | ||
1048 | nv_mthd(dev, 0x9097, 0x1918, 0x00000000); | ||
1049 | nv_mthd(dev, 0x9097, 0x191c, 0x00000900); | ||
1050 | nv_mthd(dev, 0x9097, 0x1920, 0x00000405); | ||
1051 | nv_mthd(dev, 0x9097, 0x1308, 0x00000001); | ||
1052 | nv_mthd(dev, 0x9097, 0x1924, 0x00000000); | ||
1053 | nv_mthd(dev, 0x9097, 0x13ac, 0x00000000); | ||
1054 | nv_mthd(dev, 0x9097, 0x192c, 0x00000001); | ||
1055 | nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c); | ||
1056 | nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000); | ||
1057 | nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000); | ||
1058 | nv_mthd(dev, 0x9097, 0x02c0, 0x00000001); | ||
1059 | nv_mthd(dev, 0x9097, 0x1510, 0x00000000); | ||
1060 | nv_mthd(dev, 0x9097, 0x1940, 0x00000000); | ||
1061 | nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000); | ||
1062 | nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000); | ||
1063 | nv_mthd(dev, 0x9097, 0x194c, 0x00000000); | ||
1064 | nv_mthd(dev, 0x9097, 0x1950, 0x00000000); | ||
1065 | nv_mthd(dev, 0x9097, 0x1968, 0x00000000); | ||
1066 | nv_mthd(dev, 0x9097, 0x1590, 0x0000003f); | ||
1067 | nv_mthd(dev, 0x9097, 0x07e8, 0x00000000); | ||
1068 | nv_mthd(dev, 0x9097, 0x07ec, 0x00000000); | ||
1069 | nv_mthd(dev, 0x9097, 0x07f0, 0x00000000); | ||
1070 | nv_mthd(dev, 0x9097, 0x07f4, 0x00000000); | ||
1071 | nv_mthd(dev, 0x9097, 0x196c, 0x00000011); | ||
1072 | nv_mthd(dev, 0x9097, 0x197c, 0x00000000); | ||
1073 | nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000); | ||
1074 | nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000); | ||
1075 | nv_mthd(dev, 0x9097, 0x02d8, 0x00000040); | ||
1076 | nv_mthd(dev, 0x9097, 0x1980, 0x00000080); | ||
1077 | nv_mthd(dev, 0x9097, 0x1504, 0x00000080); | ||
1078 | nv_mthd(dev, 0x9097, 0x1984, 0x00000000); | ||
1079 | nv_mthd(dev, 0x9097, 0x0300, 0x00000001); | ||
1080 | nv_mthd(dev, 0x9097, 0x13a8, 0x00000000); | ||
1081 | nv_mthd(dev, 0x9097, 0x12ec, 0x00000000); | ||
1082 | nv_mthd(dev, 0x9097, 0x1310, 0x00000000); | ||
1083 | nv_mthd(dev, 0x9097, 0x1314, 0x00000001); | ||
1084 | nv_mthd(dev, 0x9097, 0x1380, 0x00000000); | ||
1085 | nv_mthd(dev, 0x9097, 0x1384, 0x00000001); | ||
1086 | nv_mthd(dev, 0x9097, 0x1388, 0x00000001); | ||
1087 | nv_mthd(dev, 0x9097, 0x138c, 0x00000001); | ||
1088 | nv_mthd(dev, 0x9097, 0x1390, 0x00000001); | ||
1089 | nv_mthd(dev, 0x9097, 0x1394, 0x00000000); | ||
1090 | nv_mthd(dev, 0x9097, 0x139c, 0x00000000); | ||
1091 | nv_mthd(dev, 0x9097, 0x1398, 0x00000000); | ||
1092 | nv_mthd(dev, 0x9097, 0x1594, 0x00000000); | ||
1093 | nv_mthd(dev, 0x9097, 0x1598, 0x00000001); | ||
1094 | nv_mthd(dev, 0x9097, 0x159c, 0x00000001); | ||
1095 | nv_mthd(dev, 0x9097, 0x15a0, 0x00000001); | ||
1096 | nv_mthd(dev, 0x9097, 0x15a4, 0x00000001); | ||
1097 | nv_mthd(dev, 0x9097, 0x0f54, 0x00000000); | ||
1098 | nv_mthd(dev, 0x9097, 0x0f58, 0x00000000); | ||
1099 | nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000); | ||
1100 | nv_mthd(dev, 0x9097, 0x19bc, 0x00000000); | ||
1101 | nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000); | ||
1102 | nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000); | ||
1103 | nv_mthd(dev, 0x9097, 0x12cc, 0x00000000); | ||
1104 | nv_mthd(dev, 0x9097, 0x12e8, 0x00000000); | ||
1105 | nv_mthd(dev, 0x9097, 0x130c, 0x00000001); | ||
1106 | nv_mthd(dev, 0x9097, 0x1360, 0x00000000); | ||
1107 | nv_mthd(dev, 0x9097, 0x1364, 0x00000000); | ||
1108 | nv_mthd(dev, 0x9097, 0x1368, 0x00000000); | ||
1109 | nv_mthd(dev, 0x9097, 0x136c, 0x00000000); | ||
1110 | nv_mthd(dev, 0x9097, 0x1370, 0x00000000); | ||
1111 | nv_mthd(dev, 0x9097, 0x1374, 0x00000000); | ||
1112 | nv_mthd(dev, 0x9097, 0x1378, 0x00000000); | ||
1113 | nv_mthd(dev, 0x9097, 0x137c, 0x00000000); | ||
1114 | nv_mthd(dev, 0x9097, 0x133c, 0x00000001); | ||
1115 | nv_mthd(dev, 0x9097, 0x1340, 0x00000001); | ||
1116 | nv_mthd(dev, 0x9097, 0x1344, 0x00000002); | ||
1117 | nv_mthd(dev, 0x9097, 0x1348, 0x00000001); | ||
1118 | nv_mthd(dev, 0x9097, 0x134c, 0x00000001); | ||
1119 | nv_mthd(dev, 0x9097, 0x1350, 0x00000002); | ||
1120 | nv_mthd(dev, 0x9097, 0x1358, 0x00000001); | ||
1121 | nv_mthd(dev, 0x9097, 0x12e4, 0x00000000); | ||
1122 | nv_mthd(dev, 0x9097, 0x131c, 0x00000000); | ||
1123 | nv_mthd(dev, 0x9097, 0x1320, 0x00000000); | ||
1124 | nv_mthd(dev, 0x9097, 0x1324, 0x00000000); | ||
1125 | nv_mthd(dev, 0x9097, 0x1328, 0x00000000); | ||
1126 | nv_mthd(dev, 0x9097, 0x19c0, 0x00000000); | ||
1127 | nv_mthd(dev, 0x9097, 0x1140, 0x00000000); | ||
1128 | nv_mthd(dev, 0x9097, 0x19c4, 0x00000000); | ||
1129 | nv_mthd(dev, 0x9097, 0x19c8, 0x00001500); | ||
1130 | nv_mthd(dev, 0x9097, 0x135c, 0x00000000); | ||
1131 | nv_mthd(dev, 0x9097, 0x0f90, 0x00000000); | ||
1132 | nv_mthd(dev, 0x9097, 0x19e0, 0x00000001); | ||
1133 | nv_mthd(dev, 0x9097, 0x19e4, 0x00000001); | ||
1134 | nv_mthd(dev, 0x9097, 0x19e8, 0x00000001); | ||
1135 | nv_mthd(dev, 0x9097, 0x19ec, 0x00000001); | ||
1136 | nv_mthd(dev, 0x9097, 0x19f0, 0x00000001); | ||
1137 | nv_mthd(dev, 0x9097, 0x19f4, 0x00000001); | ||
1138 | nv_mthd(dev, 0x9097, 0x19f8, 0x00000001); | ||
1139 | nv_mthd(dev, 0x9097, 0x19fc, 0x00000001); | ||
1140 | nv_mthd(dev, 0x9097, 0x19cc, 0x00000001); | ||
1141 | nv_mthd(dev, 0x9097, 0x15b8, 0x00000000); | ||
1142 | nv_mthd(dev, 0x9097, 0x1a00, 0x00001111); | ||
1143 | nv_mthd(dev, 0x9097, 0x1a04, 0x00000000); | ||
1144 | nv_mthd(dev, 0x9097, 0x1a08, 0x00000000); | ||
1145 | nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000); | ||
1146 | nv_mthd(dev, 0x9097, 0x1a10, 0x00000000); | ||
1147 | nv_mthd(dev, 0x9097, 0x1a14, 0x00000000); | ||
1148 | nv_mthd(dev, 0x9097, 0x1a18, 0x00000000); | ||
1149 | nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000); | ||
1150 | nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000); | ||
1151 | nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000); | ||
1152 | nv_mthd(dev, 0x9097, 0x10f8, 0x00001010); | ||
1153 | nv_mthd(dev, 0x9097, 0x0d80, 0x00000000); | ||
1154 | nv_mthd(dev, 0x9097, 0x0d84, 0x00000000); | ||
1155 | nv_mthd(dev, 0x9097, 0x0d88, 0x00000000); | ||
1156 | nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000); | ||
1157 | nv_mthd(dev, 0x9097, 0x0d90, 0x00000000); | ||
1158 | nv_mthd(dev, 0x9097, 0x0da0, 0x00000000); | ||
1159 | nv_mthd(dev, 0x9097, 0x1508, 0x80000000); | ||
1160 | nv_mthd(dev, 0x9097, 0x150c, 0x40000000); | ||
1161 | nv_mthd(dev, 0x9097, 0x1668, 0x00000000); | ||
1162 | nv_mthd(dev, 0x9097, 0x0318, 0x00000008); | ||
1163 | nv_mthd(dev, 0x9097, 0x031c, 0x00000008); | ||
1164 | nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001); | ||
1165 | nv_mthd(dev, 0x9097, 0x07dc, 0x00000000); | ||
1166 | nv_mthd(dev, 0x9097, 0x074c, 0x00000055); | ||
1167 | nv_mthd(dev, 0x9097, 0x1420, 0x00000003); | ||
1168 | nv_mthd(dev, 0x9097, 0x17bc, 0x00000000); | ||
1169 | nv_mthd(dev, 0x9097, 0x17c0, 0x00000000); | ||
1170 | nv_mthd(dev, 0x9097, 0x17c4, 0x00000001); | ||
1171 | nv_mthd(dev, 0x9097, 0x1008, 0x00000008); | ||
1172 | nv_mthd(dev, 0x9097, 0x100c, 0x00000040); | ||
1173 | nv_mthd(dev, 0x9097, 0x1010, 0x0000012c); | ||
1174 | nv_mthd(dev, 0x9097, 0x0d60, 0x00000040); | ||
1175 | nv_mthd(dev, 0x9097, 0x075c, 0x00000003); | ||
1176 | nv_mthd(dev, 0x9097, 0x1018, 0x00000020); | ||
1177 | nv_mthd(dev, 0x9097, 0x101c, 0x00000001); | ||
1178 | nv_mthd(dev, 0x9097, 0x1020, 0x00000020); | ||
1179 | nv_mthd(dev, 0x9097, 0x1024, 0x00000001); | ||
1180 | nv_mthd(dev, 0x9097, 0x1444, 0x00000000); | ||
1181 | nv_mthd(dev, 0x9097, 0x1448, 0x00000000); | ||
1182 | nv_mthd(dev, 0x9097, 0x144c, 0x00000000); | ||
1183 | nv_mthd(dev, 0x9097, 0x0360, 0x20164010); | ||
1184 | nv_mthd(dev, 0x9097, 0x0364, 0x00000020); | ||
1185 | nv_mthd(dev, 0x9097, 0x0368, 0x00000000); | ||
1186 | nv_mthd(dev, 0x9097, 0x0de4, 0x00000000); | ||
1187 | nv_mthd(dev, 0x9097, 0x0204, 0x00000006); | ||
1188 | nv_mthd(dev, 0x9097, 0x0208, 0x00000000); | ||
1189 | nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff); | ||
1190 | nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48); | ||
1191 | nv_mthd(dev, 0x9097, 0x1220, 0x00000005); | ||
1192 | nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000); | ||
1193 | nv_mthd(dev, 0x9097, 0x0f98, 0x00300008); | ||
1194 | nv_mthd(dev, 0x9097, 0x1284, 0x04000080); | ||
1195 | nv_mthd(dev, 0x9097, 0x1450, 0x00300008); | ||
1196 | nv_mthd(dev, 0x9097, 0x1454, 0x04000080); | ||
1197 | nv_mthd(dev, 0x9097, 0x0214, 0x00000000); | ||
1198 | /* in trace, right after 0x90c0, not here */ | ||
1199 | nv_mthd(dev, 0x9097, 0x3410, 0x80002006); | ||
1200 | } | ||
1201 | |||
1202 | static void | ||
1203 | nvc0_grctx_generate_9197(struct drm_device *dev) | ||
1204 | { | ||
1205 | u32 fermi = nvc0_graph_class(dev); | ||
1206 | u32 mthd; | ||
1207 | |||
1208 | if (fermi == 0x9197) { | ||
1209 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) | ||
1210 | nv_mthd(dev, 0x9197, mthd, 0x00000000); | ||
1211 | } | ||
1212 | nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001); | ||
1213 | } | ||
1214 | |||
1215 | static void | ||
1216 | nvc0_grctx_generate_9297(struct drm_device *dev) | ||
1217 | { | ||
1218 | u32 fermi = nvc0_graph_class(dev); | ||
1219 | u32 mthd; | ||
1220 | |||
1221 | if (fermi == 0x9297) { | ||
1222 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) | ||
1223 | nv_mthd(dev, 0x9297, mthd, 0x00000000); | ||
1224 | } | ||
1225 | nv_mthd(dev, 0x9297, 0x036c, 0x00000000); | ||
1226 | nv_mthd(dev, 0x9297, 0x0370, 0x00000000); | ||
1227 | nv_mthd(dev, 0x9297, 0x07a4, 0x00000000); | ||
1228 | nv_mthd(dev, 0x9297, 0x07a8, 0x00000000); | ||
1229 | nv_mthd(dev, 0x9297, 0x0374, 0x00000000); | ||
1230 | nv_mthd(dev, 0x9297, 0x0378, 0x00000020); | ||
1231 | } | ||
1232 | |||
1233 | static void | ||
1234 | nvc0_grctx_generate_902d(struct drm_device *dev) | ||
1235 | { | ||
1236 | nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); | ||
1237 | nv_mthd(dev, 0x902d, 0x0204, 0x00000001); | ||
1238 | nv_mthd(dev, 0x902d, 0x0208, 0x00000020); | ||
1239 | nv_mthd(dev, 0x902d, 0x020c, 0x00000001); | ||
1240 | nv_mthd(dev, 0x902d, 0x0210, 0x00000000); | ||
1241 | nv_mthd(dev, 0x902d, 0x0214, 0x00000080); | ||
1242 | nv_mthd(dev, 0x902d, 0x0218, 0x00000100); | ||
1243 | nv_mthd(dev, 0x902d, 0x021c, 0x00000100); | ||
1244 | nv_mthd(dev, 0x902d, 0x0220, 0x00000000); | ||
1245 | nv_mthd(dev, 0x902d, 0x0224, 0x00000000); | ||
1246 | nv_mthd(dev, 0x902d, 0x0230, 0x000000cf); | ||
1247 | nv_mthd(dev, 0x902d, 0x0234, 0x00000001); | ||
1248 | nv_mthd(dev, 0x902d, 0x0238, 0x00000020); | ||
1249 | nv_mthd(dev, 0x902d, 0x023c, 0x00000001); | ||
1250 | nv_mthd(dev, 0x902d, 0x0244, 0x00000080); | ||
1251 | nv_mthd(dev, 0x902d, 0x0248, 0x00000100); | ||
1252 | nv_mthd(dev, 0x902d, 0x024c, 0x00000100); | ||
1253 | } | ||
1254 | |||
1255 | static void | ||
1256 | nvc0_grctx_generate_9039(struct drm_device *dev) | ||
1257 | { | ||
1258 | nv_mthd(dev, 0x9039, 0x030c, 0x00000000); | ||
1259 | nv_mthd(dev, 0x9039, 0x0310, 0x00000000); | ||
1260 | nv_mthd(dev, 0x9039, 0x0314, 0x00000000); | ||
1261 | nv_mthd(dev, 0x9039, 0x0320, 0x00000000); | ||
1262 | nv_mthd(dev, 0x9039, 0x0238, 0x00000000); | ||
1263 | nv_mthd(dev, 0x9039, 0x023c, 0x00000000); | ||
1264 | nv_mthd(dev, 0x9039, 0x0318, 0x00000000); | ||
1265 | nv_mthd(dev, 0x9039, 0x031c, 0x00000000); | ||
1266 | } | ||
1267 | |||
1268 | static void | ||
1269 | nvc0_grctx_generate_90c0(struct drm_device *dev) | ||
1270 | { | ||
1271 | nv_mthd(dev, 0x90c0, 0x270c, 0x00000000); | ||
1272 | nv_mthd(dev, 0x90c0, 0x272c, 0x00000000); | ||
1273 | nv_mthd(dev, 0x90c0, 0x274c, 0x00000000); | ||
1274 | nv_mthd(dev, 0x90c0, 0x276c, 0x00000000); | ||
1275 | nv_mthd(dev, 0x90c0, 0x278c, 0x00000000); | ||
1276 | nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000); | ||
1277 | nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000); | ||
1278 | nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000); | ||
1279 | nv_mthd(dev, 0x90c0, 0x030c, 0x00000001); | ||
1280 | nv_mthd(dev, 0x90c0, 0x1944, 0x00000000); | ||
1281 | nv_mthd(dev, 0x90c0, 0x0758, 0x00000100); | ||
1282 | nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000); | ||
1283 | nv_mthd(dev, 0x90c0, 0x0790, 0x00000000); | ||
1284 | nv_mthd(dev, 0x90c0, 0x0794, 0x00000000); | ||
1285 | nv_mthd(dev, 0x90c0, 0x0798, 0x00000000); | ||
1286 | nv_mthd(dev, 0x90c0, 0x079c, 0x00000000); | ||
1287 | nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000); | ||
1288 | nv_mthd(dev, 0x90c0, 0x077c, 0x00000000); | ||
1289 | nv_mthd(dev, 0x90c0, 0x0204, 0x00000000); | ||
1290 | nv_mthd(dev, 0x90c0, 0x0208, 0x00000000); | ||
1291 | nv_mthd(dev, 0x90c0, 0x020c, 0x00000000); | ||
1292 | nv_mthd(dev, 0x90c0, 0x0214, 0x00000000); | ||
1293 | nv_mthd(dev, 0x90c0, 0x024c, 0x00000000); | ||
1294 | nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001); | ||
1295 | nv_mthd(dev, 0x90c0, 0x1608, 0x00000000); | ||
1296 | nv_mthd(dev, 0x90c0, 0x160c, 0x00000000); | ||
1297 | nv_mthd(dev, 0x90c0, 0x1664, 0x00000000); | ||
1298 | } | ||
1299 | |||
1300 | static void | ||
1301 | nvc0_grctx_generate_dispatch(struct drm_device *dev) | ||
1302 | { | ||
1303 | int i; | ||
1304 | |||
1305 | nv_wr32(dev, 0x404004, 0x00000000); | ||
1306 | nv_wr32(dev, 0x404008, 0x00000000); | ||
1307 | nv_wr32(dev, 0x40400c, 0x00000000); | ||
1308 | nv_wr32(dev, 0x404010, 0x00000000); | ||
1309 | nv_wr32(dev, 0x404014, 0x00000000); | ||
1310 | nv_wr32(dev, 0x404018, 0x00000000); | ||
1311 | nv_wr32(dev, 0x40401c, 0x00000000); | ||
1312 | nv_wr32(dev, 0x404020, 0x00000000); | ||
1313 | nv_wr32(dev, 0x404024, 0x00000000); | ||
1314 | nv_wr32(dev, 0x404028, 0x00000000); | ||
1315 | nv_wr32(dev, 0x40402c, 0x00000000); | ||
1316 | nv_wr32(dev, 0x404044, 0x00000000); | ||
1317 | nv_wr32(dev, 0x404094, 0x00000000); | ||
1318 | nv_wr32(dev, 0x404098, 0x00000000); | ||
1319 | nv_wr32(dev, 0x40409c, 0x00000000); | ||
1320 | nv_wr32(dev, 0x4040a0, 0x00000000); | ||
1321 | nv_wr32(dev, 0x4040a4, 0x00000000); | ||
1322 | nv_wr32(dev, 0x4040a8, 0x00000000); | ||
1323 | nv_wr32(dev, 0x4040ac, 0x00000000); | ||
1324 | nv_wr32(dev, 0x4040b0, 0x00000000); | ||
1325 | nv_wr32(dev, 0x4040b4, 0x00000000); | ||
1326 | nv_wr32(dev, 0x4040b8, 0x00000000); | ||
1327 | nv_wr32(dev, 0x4040bc, 0x00000000); | ||
1328 | nv_wr32(dev, 0x4040c0, 0x00000000); | ||
1329 | nv_wr32(dev, 0x4040c4, 0x00000000); | ||
1330 | nv_wr32(dev, 0x4040c8, 0xf0000087); | ||
1331 | nv_wr32(dev, 0x4040d4, 0x00000000); | ||
1332 | nv_wr32(dev, 0x4040d8, 0x00000000); | ||
1333 | nv_wr32(dev, 0x4040dc, 0x00000000); | ||
1334 | nv_wr32(dev, 0x4040e0, 0x00000000); | ||
1335 | nv_wr32(dev, 0x4040e4, 0x00000000); | ||
1336 | nv_wr32(dev, 0x4040e8, 0x00001000); | ||
1337 | nv_wr32(dev, 0x4040f8, 0x00000000); | ||
1338 | nv_wr32(dev, 0x404130, 0x00000000); | ||
1339 | nv_wr32(dev, 0x404134, 0x00000000); | ||
1340 | nv_wr32(dev, 0x404138, 0x20000040); | ||
1341 | nv_wr32(dev, 0x404150, 0x0000002e); | ||
1342 | nv_wr32(dev, 0x404154, 0x00000400); | ||
1343 | nv_wr32(dev, 0x404158, 0x00000200); | ||
1344 | nv_wr32(dev, 0x404164, 0x00000055); | ||
1345 | nv_wr32(dev, 0x404168, 0x00000000); | ||
1346 | nv_wr32(dev, 0x404174, 0x00000000); | ||
1347 | nv_wr32(dev, 0x404178, 0x00000000); | ||
1348 | nv_wr32(dev, 0x40417c, 0x00000000); | ||
1349 | for (i = 0; i < 8; i++) | ||
1350 | nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */ | ||
1351 | } | ||
1352 | |||
1353 | static void | ||
1354 | nvc0_grctx_generate_macro(struct drm_device *dev) | ||
1355 | { | ||
1356 | nv_wr32(dev, 0x404404, 0x00000000); | ||
1357 | nv_wr32(dev, 0x404408, 0x00000000); | ||
1358 | nv_wr32(dev, 0x40440c, 0x00000000); | ||
1359 | nv_wr32(dev, 0x404410, 0x00000000); | ||
1360 | nv_wr32(dev, 0x404414, 0x00000000); | ||
1361 | nv_wr32(dev, 0x404418, 0x00000000); | ||
1362 | nv_wr32(dev, 0x40441c, 0x00000000); | ||
1363 | nv_wr32(dev, 0x404420, 0x00000000); | ||
1364 | nv_wr32(dev, 0x404424, 0x00000000); | ||
1365 | nv_wr32(dev, 0x404428, 0x00000000); | ||
1366 | nv_wr32(dev, 0x40442c, 0x00000000); | ||
1367 | nv_wr32(dev, 0x404430, 0x00000000); | ||
1368 | nv_wr32(dev, 0x404434, 0x00000000); | ||
1369 | nv_wr32(dev, 0x404438, 0x00000000); | ||
1370 | nv_wr32(dev, 0x404460, 0x00000000); | ||
1371 | nv_wr32(dev, 0x404464, 0x00000000); | ||
1372 | nv_wr32(dev, 0x404468, 0x00ffffff); | ||
1373 | nv_wr32(dev, 0x40446c, 0x00000000); | ||
1374 | nv_wr32(dev, 0x404480, 0x00000001); | ||
1375 | nv_wr32(dev, 0x404498, 0x00000001); | ||
1376 | } | ||
1377 | |||
1378 | static void | ||
1379 | nvc0_grctx_generate_m2mf(struct drm_device *dev) | ||
1380 | { | ||
1381 | nv_wr32(dev, 0x404604, 0x00000015); | ||
1382 | nv_wr32(dev, 0x404608, 0x00000000); | ||
1383 | nv_wr32(dev, 0x40460c, 0x00002e00); | ||
1384 | nv_wr32(dev, 0x404610, 0x00000100); | ||
1385 | nv_wr32(dev, 0x404618, 0x00000000); | ||
1386 | nv_wr32(dev, 0x40461c, 0x00000000); | ||
1387 | nv_wr32(dev, 0x404620, 0x00000000); | ||
1388 | nv_wr32(dev, 0x404624, 0x00000000); | ||
1389 | nv_wr32(dev, 0x404628, 0x00000000); | ||
1390 | nv_wr32(dev, 0x40462c, 0x00000000); | ||
1391 | nv_wr32(dev, 0x404630, 0x00000000); | ||
1392 | nv_wr32(dev, 0x404634, 0x00000000); | ||
1393 | nv_wr32(dev, 0x404638, 0x00000004); | ||
1394 | nv_wr32(dev, 0x40463c, 0x00000000); | ||
1395 | nv_wr32(dev, 0x404640, 0x00000000); | ||
1396 | nv_wr32(dev, 0x404644, 0x00000000); | ||
1397 | nv_wr32(dev, 0x404648, 0x00000000); | ||
1398 | nv_wr32(dev, 0x40464c, 0x00000000); | ||
1399 | nv_wr32(dev, 0x404650, 0x00000000); | ||
1400 | nv_wr32(dev, 0x404654, 0x00000000); | ||
1401 | nv_wr32(dev, 0x404658, 0x00000000); | ||
1402 | nv_wr32(dev, 0x40465c, 0x007f0100); | ||
1403 | nv_wr32(dev, 0x404660, 0x00000000); | ||
1404 | nv_wr32(dev, 0x404664, 0x00000000); | ||
1405 | nv_wr32(dev, 0x404668, 0x00000000); | ||
1406 | nv_wr32(dev, 0x40466c, 0x00000000); | ||
1407 | nv_wr32(dev, 0x404670, 0x00000000); | ||
1408 | nv_wr32(dev, 0x404674, 0x00000000); | ||
1409 | nv_wr32(dev, 0x404678, 0x00000000); | ||
1410 | nv_wr32(dev, 0x40467c, 0x00000002); | ||
1411 | nv_wr32(dev, 0x404680, 0x00000000); | ||
1412 | nv_wr32(dev, 0x404684, 0x00000000); | ||
1413 | nv_wr32(dev, 0x404688, 0x00000000); | ||
1414 | nv_wr32(dev, 0x40468c, 0x00000000); | ||
1415 | nv_wr32(dev, 0x404690, 0x00000000); | ||
1416 | nv_wr32(dev, 0x404694, 0x00000000); | ||
1417 | nv_wr32(dev, 0x404698, 0x00000000); | ||
1418 | nv_wr32(dev, 0x40469c, 0x00000000); | ||
1419 | nv_wr32(dev, 0x4046a0, 0x007f0080); | ||
1420 | nv_wr32(dev, 0x4046a4, 0x00000000); | ||
1421 | nv_wr32(dev, 0x4046a8, 0x00000000); | ||
1422 | nv_wr32(dev, 0x4046ac, 0x00000000); | ||
1423 | nv_wr32(dev, 0x4046b0, 0x00000000); | ||
1424 | nv_wr32(dev, 0x4046b4, 0x00000000); | ||
1425 | nv_wr32(dev, 0x4046b8, 0x00000000); | ||
1426 | nv_wr32(dev, 0x4046bc, 0x00000000); | ||
1427 | nv_wr32(dev, 0x4046c0, 0x00000000); | ||
1428 | nv_wr32(dev, 0x4046c4, 0x00000000); | ||
1429 | nv_wr32(dev, 0x4046c8, 0x00000000); | ||
1430 | nv_wr32(dev, 0x4046cc, 0x00000000); | ||
1431 | nv_wr32(dev, 0x4046d0, 0x00000000); | ||
1432 | nv_wr32(dev, 0x4046d4, 0x00000000); | ||
1433 | nv_wr32(dev, 0x4046d8, 0x00000000); | ||
1434 | nv_wr32(dev, 0x4046dc, 0x00000000); | ||
1435 | nv_wr32(dev, 0x4046e0, 0x00000000); | ||
1436 | nv_wr32(dev, 0x4046e4, 0x00000000); | ||
1437 | nv_wr32(dev, 0x4046e8, 0x00000000); | ||
1438 | nv_wr32(dev, 0x4046f0, 0x00000000); | ||
1439 | nv_wr32(dev, 0x4046f4, 0x00000000); | ||
1440 | } | ||
1441 | |||
1442 | static void | ||
1443 | nvc0_grctx_generate_unk47xx(struct drm_device *dev) | ||
1444 | { | ||
1445 | nv_wr32(dev, 0x404700, 0x00000000); | ||
1446 | nv_wr32(dev, 0x404704, 0x00000000); | ||
1447 | nv_wr32(dev, 0x404708, 0x00000000); | ||
1448 | nv_wr32(dev, 0x40470c, 0x00000000); | ||
1449 | nv_wr32(dev, 0x404710, 0x00000000); | ||
1450 | nv_wr32(dev, 0x404714, 0x00000000); | ||
1451 | nv_wr32(dev, 0x404718, 0x00000000); | ||
1452 | nv_wr32(dev, 0x40471c, 0x00000000); | ||
1453 | nv_wr32(dev, 0x404720, 0x00000000); | ||
1454 | nv_wr32(dev, 0x404724, 0x00000000); | ||
1455 | nv_wr32(dev, 0x404728, 0x00000000); | ||
1456 | nv_wr32(dev, 0x40472c, 0x00000000); | ||
1457 | nv_wr32(dev, 0x404730, 0x00000000); | ||
1458 | nv_wr32(dev, 0x404734, 0x00000100); | ||
1459 | nv_wr32(dev, 0x404738, 0x00000000); | ||
1460 | nv_wr32(dev, 0x40473c, 0x00000000); | ||
1461 | nv_wr32(dev, 0x404740, 0x00000000); | ||
1462 | nv_wr32(dev, 0x404744, 0x00000000); | ||
1463 | nv_wr32(dev, 0x404748, 0x00000000); | ||
1464 | nv_wr32(dev, 0x40474c, 0x00000000); | ||
1465 | nv_wr32(dev, 0x404750, 0x00000000); | ||
1466 | nv_wr32(dev, 0x404754, 0x00000000); | ||
1467 | } | ||
1468 | |||
1469 | static void | ||
1470 | nvc0_grctx_generate_shaders(struct drm_device *dev) | ||
1471 | { | ||
1472 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1473 | |||
1474 | if (dev_priv->chipset != 0xc1) { | ||
1475 | nv_wr32(dev, 0x405800, 0x078000bf); | ||
1476 | nv_wr32(dev, 0x405830, 0x02180000); | ||
1477 | } else { | ||
1478 | nv_wr32(dev, 0x405800, 0x0f8000bf); | ||
1479 | nv_wr32(dev, 0x405830, 0x02180218); | ||
1480 | } | ||
1481 | nv_wr32(dev, 0x405834, 0x00000000); | ||
1482 | nv_wr32(dev, 0x405838, 0x00000000); | ||
1483 | nv_wr32(dev, 0x405854, 0x00000000); | ||
1484 | nv_wr32(dev, 0x405870, 0x00000001); | ||
1485 | nv_wr32(dev, 0x405874, 0x00000001); | ||
1486 | nv_wr32(dev, 0x405878, 0x00000001); | ||
1487 | nv_wr32(dev, 0x40587c, 0x00000001); | ||
1488 | nv_wr32(dev, 0x405a00, 0x00000000); | ||
1489 | nv_wr32(dev, 0x405a04, 0x00000000); | ||
1490 | nv_wr32(dev, 0x405a18, 0x00000000); | ||
1491 | } | ||
1492 | |||
1493 | static void | ||
1494 | nvc0_grctx_generate_unk60xx(struct drm_device *dev) | ||
1495 | { | ||
1496 | nv_wr32(dev, 0x406020, 0x000103c1); | ||
1497 | nv_wr32(dev, 0x406028, 0x00000001); | ||
1498 | nv_wr32(dev, 0x40602c, 0x00000001); | ||
1499 | nv_wr32(dev, 0x406030, 0x00000001); | ||
1500 | nv_wr32(dev, 0x406034, 0x00000001); | ||
1501 | } | ||
1502 | |||
1503 | static void | ||
1504 | nvc0_grctx_generate_unk64xx(struct drm_device *dev) | ||
1505 | { | ||
1506 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1507 | |||
1508 | nv_wr32(dev, 0x4064a8, 0x00000000); | ||
1509 | nv_wr32(dev, 0x4064ac, 0x00003fff); | ||
1510 | nv_wr32(dev, 0x4064b4, 0x00000000); | ||
1511 | nv_wr32(dev, 0x4064b8, 0x00000000); | ||
1512 | if (dev_priv->chipset == 0xc1) { | ||
1513 | nv_wr32(dev, 0x4064c0, 0x80140078); | ||
1514 | nv_wr32(dev, 0x4064c4, 0x0086ffff); | ||
1515 | } | ||
1516 | } | ||
1517 | |||
1518 | static void | ||
1519 | nvc0_grctx_generate_tpbus(struct drm_device *dev) | ||
1520 | { | ||
1521 | nv_wr32(dev, 0x407804, 0x00000023); | ||
1522 | nv_wr32(dev, 0x40780c, 0x0a418820); | ||
1523 | nv_wr32(dev, 0x407810, 0x062080e6); | ||
1524 | nv_wr32(dev, 0x407814, 0x020398a4); | ||
1525 | nv_wr32(dev, 0x407818, 0x0e629062); | ||
1526 | nv_wr32(dev, 0x40781c, 0x0a418820); | ||
1527 | nv_wr32(dev, 0x407820, 0x000000e6); | ||
1528 | nv_wr32(dev, 0x4078bc, 0x00000103); | ||
1529 | } | ||
1530 | |||
1531 | static void | ||
1532 | nvc0_grctx_generate_ccache(struct drm_device *dev) | ||
1533 | { | ||
1534 | nv_wr32(dev, 0x408000, 0x00000000); | ||
1535 | nv_wr32(dev, 0x408004, 0x00000000); | ||
1536 | nv_wr32(dev, 0x408008, 0x00000018); | ||
1537 | nv_wr32(dev, 0x40800c, 0x00000000); | ||
1538 | nv_wr32(dev, 0x408010, 0x00000000); | ||
1539 | nv_wr32(dev, 0x408014, 0x00000069); | ||
1540 | nv_wr32(dev, 0x408018, 0xe100e100); | ||
1541 | nv_wr32(dev, 0x408064, 0x00000000); | ||
1542 | } | ||
1543 | |||
1544 | static void | ||
1545 | nvc0_grctx_generate_rop(struct drm_device *dev) | ||
1546 | { | ||
1547 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1548 | int chipset = dev_priv->chipset; | ||
1549 | |||
1550 | /* ROPC_BROADCAST */ | ||
1551 | nv_wr32(dev, 0x408800, 0x02802a3c); | ||
1552 | nv_wr32(dev, 0x408804, 0x00000040); | ||
1553 | nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005); | ||
1554 | nv_wr32(dev, 0x408900, 0x3080b801); | ||
1555 | nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001); | ||
1556 | nv_wr32(dev, 0x408908, 0x00c80929); | ||
1557 | nv_wr32(dev, 0x40890c, 0x00000000); | ||
1558 | nv_wr32(dev, 0x408980, 0x0000011d); | ||
1559 | } | ||
1560 | |||
1561 | static void | ||
1562 | nvc0_grctx_generate_gpc(struct drm_device *dev) | ||
1563 | { | ||
1564 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1565 | int chipset = dev_priv->chipset; | ||
1566 | int i; | ||
1567 | |||
1568 | /* GPC_BROADCAST */ | ||
1569 | nv_wr32(dev, 0x418380, 0x00000016); | ||
1570 | nv_wr32(dev, 0x418400, 0x38004e00); | ||
1571 | nv_wr32(dev, 0x418404, 0x71e0ffff); | ||
1572 | nv_wr32(dev, 0x418408, 0x00000000); | ||
1573 | nv_wr32(dev, 0x41840c, 0x00001008); | ||
1574 | nv_wr32(dev, 0x418410, 0x0fff0fff); | ||
1575 | nv_wr32(dev, 0x418414, 0x00200fff); | ||
1576 | nv_wr32(dev, 0x418450, 0x00000000); | ||
1577 | nv_wr32(dev, 0x418454, 0x00000000); | ||
1578 | nv_wr32(dev, 0x418458, 0x00000000); | ||
1579 | nv_wr32(dev, 0x41845c, 0x00000000); | ||
1580 | nv_wr32(dev, 0x418460, 0x00000000); | ||
1581 | nv_wr32(dev, 0x418464, 0x00000000); | ||
1582 | nv_wr32(dev, 0x418468, 0x00000001); | ||
1583 | nv_wr32(dev, 0x41846c, 0x00000000); | ||
1584 | nv_wr32(dev, 0x418470, 0x00000000); | ||
1585 | nv_wr32(dev, 0x418600, 0x0000001f); | ||
1586 | nv_wr32(dev, 0x418684, 0x0000000f); | ||
1587 | nv_wr32(dev, 0x418700, 0x00000002); | ||
1588 | nv_wr32(dev, 0x418704, 0x00000080); | ||
1589 | nv_wr32(dev, 0x418708, 0x00000000); | ||
1590 | nv_wr32(dev, 0x41870c, 0x07c80000); | ||
1591 | nv_wr32(dev, 0x418710, 0x00000000); | ||
1592 | nv_wr32(dev, 0x418800, 0x0006860a); | ||
1593 | nv_wr32(dev, 0x418808, 0x00000000); | ||
1594 | nv_wr32(dev, 0x41880c, 0x00000000); | ||
1595 | nv_wr32(dev, 0x418810, 0x00000000); | ||
1596 | nv_wr32(dev, 0x418828, 0x00008442); | ||
1597 | nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001); | ||
1598 | nv_wr32(dev, 0x4188d8, 0x00000008); | ||
1599 | nv_wr32(dev, 0x4188e0, 0x01000000); | ||
1600 | nv_wr32(dev, 0x4188e8, 0x00000000); | ||
1601 | nv_wr32(dev, 0x4188ec, 0x00000000); | ||
1602 | nv_wr32(dev, 0x4188f0, 0x00000000); | ||
1603 | nv_wr32(dev, 0x4188f4, 0x00000000); | ||
1604 | nv_wr32(dev, 0x4188f8, 0x00000000); | ||
1605 | nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018); | ||
1606 | nv_wr32(dev, 0x41891c, 0x00ff00ff); | ||
1607 | nv_wr32(dev, 0x418924, 0x00000000); | ||
1608 | nv_wr32(dev, 0x418928, 0x00ffff00); | ||
1609 | nv_wr32(dev, 0x41892c, 0x0000ff00); | ||
1610 | for (i = 0; i < 8; i++) { | ||
1611 | nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000); | ||
1612 | nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000); | ||
1613 | nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000); | ||
1614 | nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000); | ||
1615 | nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000); | ||
1616 | nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000); | ||
1617 | nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000); | ||
1618 | } | ||
1619 | nv_wr32(dev, 0x418b00, 0x00000000); | ||
1620 | nv_wr32(dev, 0x418b08, 0x0a418820); | ||
1621 | nv_wr32(dev, 0x418b0c, 0x062080e6); | ||
1622 | nv_wr32(dev, 0x418b10, 0x020398a4); | ||
1623 | nv_wr32(dev, 0x418b14, 0x0e629062); | ||
1624 | nv_wr32(dev, 0x418b18, 0x0a418820); | ||
1625 | nv_wr32(dev, 0x418b1c, 0x000000e6); | ||
1626 | nv_wr32(dev, 0x418bb8, 0x00000103); | ||
1627 | nv_wr32(dev, 0x418c08, 0x00000001); | ||
1628 | nv_wr32(dev, 0x418c10, 0x00000000); | ||
1629 | nv_wr32(dev, 0x418c14, 0x00000000); | ||
1630 | nv_wr32(dev, 0x418c18, 0x00000000); | ||
1631 | nv_wr32(dev, 0x418c1c, 0x00000000); | ||
1632 | nv_wr32(dev, 0x418c20, 0x00000000); | ||
1633 | nv_wr32(dev, 0x418c24, 0x00000000); | ||
1634 | nv_wr32(dev, 0x418c28, 0x00000000); | ||
1635 | nv_wr32(dev, 0x418c2c, 0x00000000); | ||
1636 | if (chipset == 0xc1) | ||
1637 | nv_wr32(dev, 0x418c6c, 0x00000001); | ||
1638 | nv_wr32(dev, 0x418c80, 0x20200004); | ||
1639 | nv_wr32(dev, 0x418c8c, 0x00000001); | ||
1640 | nv_wr32(dev, 0x419000, 0x00000780); | ||
1641 | nv_wr32(dev, 0x419004, 0x00000000); | ||
1642 | nv_wr32(dev, 0x419008, 0x00000000); | ||
1643 | nv_wr32(dev, 0x419014, 0x00000004); | ||
1644 | } | ||
1645 | |||
1646 | static void | ||
1647 | nvc0_grctx_generate_tp(struct drm_device *dev) | ||
1648 | { | ||
1649 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1650 | int chipset = dev_priv->chipset; | ||
1651 | |||
1652 | /* GPC_BROADCAST.TP_BROADCAST */ | ||
1653 | nv_wr32(dev, 0x419818, 0x00000000); | ||
1654 | nv_wr32(dev, 0x41983c, 0x00038bc7); | ||
1655 | nv_wr32(dev, 0x419848, 0x00000000); | ||
1656 | nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129); | ||
1657 | nv_wr32(dev, 0x419888, 0x00000000); | ||
1658 | nv_wr32(dev, 0x419a00, 0x000001f0); | ||
1659 | nv_wr32(dev, 0x419a04, 0x00000001); | ||
1660 | nv_wr32(dev, 0x419a08, 0x00000023); | ||
1661 | nv_wr32(dev, 0x419a0c, 0x00020000); | ||
1662 | nv_wr32(dev, 0x419a10, 0x00000000); | ||
1663 | nv_wr32(dev, 0x419a14, 0x00000200); | ||
1664 | nv_wr32(dev, 0x419a1c, 0x00000000); | ||
1665 | nv_wr32(dev, 0x419a20, 0x00000800); | ||
1666 | if (chipset != 0xc0 && chipset != 0xc8) | ||
1667 | nv_wr32(dev, 0x00419ac4, 0x0007f440); | ||
1668 | nv_wr32(dev, 0x419b00, 0x0a418820); | ||
1669 | nv_wr32(dev, 0x419b04, 0x062080e6); | ||
1670 | nv_wr32(dev, 0x419b08, 0x020398a4); | ||
1671 | nv_wr32(dev, 0x419b0c, 0x0e629062); | ||
1672 | nv_wr32(dev, 0x419b10, 0x0a418820); | ||
1673 | nv_wr32(dev, 0x419b14, 0x000000e6); | ||
1674 | nv_wr32(dev, 0x419bd0, 0x00900103); | ||
1675 | nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001); | ||
1676 | nv_wr32(dev, 0x419be4, 0x00000000); | ||
1677 | nv_wr32(dev, 0x419c00, 0x00000002); | ||
1678 | nv_wr32(dev, 0x419c04, 0x00000006); | ||
1679 | nv_wr32(dev, 0x419c08, 0x00000002); | ||
1680 | nv_wr32(dev, 0x419c20, 0x00000000); | ||
1681 | nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 | ||
1682 | nv_wr32(dev, 0x419ce8, 0x00000000); | ||
1683 | nv_wr32(dev, 0x419cf4, 0x00000183); | ||
1684 | nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); | ||
1685 | nv_wr32(dev, 0x419d24, 0x00001fff); | ||
1686 | if (chipset == 0xc1) | ||
1687 | nv_wr32(dev, 0x419d44, 0x02180218); | ||
1688 | nv_wr32(dev, 0x419e04, 0x00000000); | ||
1689 | nv_wr32(dev, 0x419e08, 0x00000000); | ||
1690 | nv_wr32(dev, 0x419e0c, 0x00000000); | ||
1691 | nv_wr32(dev, 0x419e10, 0x00000002); | ||
1692 | nv_wr32(dev, 0x419e44, 0x001beff2); | ||
1693 | nv_wr32(dev, 0x419e48, 0x00000000); | ||
1694 | nv_wr32(dev, 0x419e4c, 0x0000000f); | ||
1695 | nv_wr32(dev, 0x419e50, 0x00000000); | ||
1696 | nv_wr32(dev, 0x419e54, 0x00000000); | ||
1697 | nv_wr32(dev, 0x419e58, 0x00000000); | ||
1698 | nv_wr32(dev, 0x419e5c, 0x00000000); | ||
1699 | nv_wr32(dev, 0x419e60, 0x00000000); | ||
1700 | nv_wr32(dev, 0x419e64, 0x00000000); | ||
1701 | nv_wr32(dev, 0x419e68, 0x00000000); | ||
1702 | nv_wr32(dev, 0x419e6c, 0x00000000); | ||
1703 | nv_wr32(dev, 0x419e70, 0x00000000); | ||
1704 | nv_wr32(dev, 0x419e74, 0x00000000); | ||
1705 | nv_wr32(dev, 0x419e78, 0x00000000); | ||
1706 | nv_wr32(dev, 0x419e7c, 0x00000000); | ||
1707 | nv_wr32(dev, 0x419e80, 0x00000000); | ||
1708 | nv_wr32(dev, 0x419e84, 0x00000000); | ||
1709 | nv_wr32(dev, 0x419e88, 0x00000000); | ||
1710 | nv_wr32(dev, 0x419e8c, 0x00000000); | ||
1711 | nv_wr32(dev, 0x419e90, 0x00000000); | ||
1712 | nv_wr32(dev, 0x419e98, 0x00000000); | ||
1713 | if (chipset != 0xc0 && chipset != 0xc8) | ||
1714 | nv_wr32(dev, 0x419ee0, 0x00011110); | ||
1715 | nv_wr32(dev, 0x419f50, 0x00000000); | ||
1716 | nv_wr32(dev, 0x419f54, 0x00000000); | ||
1717 | if (chipset != 0xc0 && chipset != 0xc8) | ||
1718 | nv_wr32(dev, 0x419f58, 0x00000000); | ||
1719 | } | ||
1720 | |||
1721 | int | ||
1722 | nvc0_grctx_generate(struct nouveau_channel *chan) | ||
1723 | { | ||
1724 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
1725 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); | ||
1726 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | ||
1727 | struct drm_device *dev = chan->dev; | ||
1728 | int i, gpc, tp, id; | ||
1729 | u32 fermi = nvc0_graph_class(dev); | ||
1730 | u32 r000260, tmp; | ||
1731 | |||
1732 | r000260 = nv_rd32(dev, 0x000260); | ||
1733 | nv_wr32(dev, 0x000260, r000260 & ~1); | ||
1734 | nv_wr32(dev, 0x400208, 0x00000000); | ||
1735 | |||
1736 | nvc0_grctx_generate_dispatch(dev); | ||
1737 | nvc0_grctx_generate_macro(dev); | ||
1738 | nvc0_grctx_generate_m2mf(dev); | ||
1739 | nvc0_grctx_generate_unk47xx(dev); | ||
1740 | nvc0_grctx_generate_shaders(dev); | ||
1741 | nvc0_grctx_generate_unk60xx(dev); | ||
1742 | nvc0_grctx_generate_unk64xx(dev); | ||
1743 | nvc0_grctx_generate_tpbus(dev); | ||
1744 | nvc0_grctx_generate_ccache(dev); | ||
1745 | nvc0_grctx_generate_rop(dev); | ||
1746 | nvc0_grctx_generate_gpc(dev); | ||
1747 | nvc0_grctx_generate_tp(dev); | ||
1748 | |||
1749 | nv_wr32(dev, 0x404154, 0x00000000); | ||
1750 | |||
1751 | /* fuc "mmio list" writes */ | ||
1752 | for (i = 0; i < grch->mmio_nr * 8; i += 8) { | ||
1753 | u32 reg = nv_ro32(grch->mmio, i + 0); | ||
1754 | nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4)); | ||
1755 | } | ||
1756 | |||
1757 | for (tp = 0, id = 0; tp < 4; tp++) { | ||
1758 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
1759 | if (tp < priv->tp_nr[gpc]) { | ||
1760 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); | ||
1761 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); | ||
1762 | nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); | ||
1763 | nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id); | ||
1764 | id++; | ||
1765 | } | ||
1766 | |||
1767 | nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]); | ||
1768 | nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]); | ||
1769 | } | ||
1770 | } | ||
1771 | |||
1772 | tmp = 0; | ||
1773 | for (i = 0; i < priv->gpc_nr; i++) | ||
1774 | tmp |= priv->tp_nr[i] << (i * 4); | ||
1775 | nv_wr32(dev, 0x406028, tmp); | ||
1776 | nv_wr32(dev, 0x405870, tmp); | ||
1777 | |||
1778 | nv_wr32(dev, 0x40602c, 0x00000000); | ||
1779 | nv_wr32(dev, 0x405874, 0x00000000); | ||
1780 | nv_wr32(dev, 0x406030, 0x00000000); | ||
1781 | nv_wr32(dev, 0x405878, 0x00000000); | ||
1782 | nv_wr32(dev, 0x406034, 0x00000000); | ||
1783 | nv_wr32(dev, 0x40587c, 0x00000000); | ||
1784 | |||
1785 | if (1) { | ||
1786 | const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0, | ||
1787 | 16, 0, 0, 0, 0, 0, 8, 0 }; | ||
1788 | u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; | ||
1789 | u8 tpnr[GPC_MAX]; | ||
1790 | u8 data[TP_MAX]; | ||
1791 | |||
1792 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | ||
1793 | memset(data, 0x1f, sizeof(data)); | ||
1794 | |||
1795 | gpc = -1; | ||
1796 | for (tp = 0; tp < priv->tp_total; tp++) { | ||
1797 | do { | ||
1798 | gpc = (gpc + 1) % priv->gpc_nr; | ||
1799 | } while (!tpnr[gpc]); | ||
1800 | tpnr[gpc]--; | ||
1801 | data[tp] = gpc; | ||
1802 | } | ||
1803 | |||
1804 | for (i = 0; i < max / 4; i++) | ||
1805 | nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); | ||
1806 | } | ||
1807 | |||
1808 | if (1) { | ||
1809 | u32 data[6] = {}, data2[2] = {}; | ||
1810 | u8 tpnr[GPC_MAX]; | ||
1811 | u8 shift, ntpcv; | ||
1812 | |||
1813 | /* calculate first set of magics */ | ||
1814 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | ||
1815 | |||
1816 | for (tp = 0; tp < priv->tp_total; tp++) { | ||
1817 | do { | ||
1818 | gpc = (gpc + 1) % priv->gpc_nr; | ||
1819 | } while (!tpnr[gpc]); | ||
1820 | tpnr[gpc]--; | ||
1821 | |||
1822 | data[tp / 6] |= gpc << ((tp % 6) * 5); | ||
1823 | } | ||
1824 | |||
1825 | for (; tp < 32; tp++) | ||
1826 | data[tp / 6] |= 7 << ((tp % 6) * 5); | ||
1827 | |||
1828 | /* and the second... */ | ||
1829 | shift = 0; | ||
1830 | ntpcv = priv->tp_total; | ||
1831 | while (!(ntpcv & (1 << 4))) { | ||
1832 | ntpcv <<= 1; | ||
1833 | shift++; | ||
1834 | } | ||
1835 | |||
1836 | data2[0] = (ntpcv << 16); | ||
1837 | data2[0] |= (shift << 21); | ||
1838 | data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); | ||
1839 | for (i = 1; i < 7; i++) | ||
1840 | data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); | ||
1841 | |||
1842 | /* GPC_BROADCAST */ | ||
1843 | nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | | ||
1844 | priv->magic_not_rop_nr); | ||
1845 | for (i = 0; i < 6; i++) | ||
1846 | nv_wr32(dev, 0x418b08 + (i * 4), data[i]); | ||
1847 | |||
1848 | /* GPC_BROADCAST.TP_BROADCAST */ | ||
1849 | nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | | ||
1850 | priv->magic_not_rop_nr | | ||
1851 | data2[0]); | ||
1852 | nv_wr32(dev, 0x419be4, data2[1]); | ||
1853 | for (i = 0; i < 6; i++) | ||
1854 | nv_wr32(dev, 0x419b00 + (i * 4), data[i]); | ||
1855 | |||
1856 | /* UNK78xx */ | ||
1857 | nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | | ||
1858 | priv->magic_not_rop_nr); | ||
1859 | for (i = 0; i < 6; i++) | ||
1860 | nv_wr32(dev, 0x40780c + (i * 4), data[i]); | ||
1861 | } | ||
1862 | |||
1863 | if (1) { | ||
1864 | u32 tp_mask = 0, tp_set = 0; | ||
1865 | u8 tpnr[GPC_MAX]; | ||
1866 | |||
1867 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | ||
1868 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) | ||
1869 | tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); | ||
1870 | |||
1871 | gpc = -1; | ||
1872 | for (i = 0, gpc = -1; i < 32; i++) { | ||
1873 | int ltp = i * (priv->tp_total - 1) / 32; | ||
1874 | |||
1875 | do { | ||
1876 | gpc = (gpc + 1) % priv->gpc_nr; | ||
1877 | } while (!tpnr[gpc]); | ||
1878 | tp = priv->tp_nr[gpc] - tpnr[gpc]--; | ||
1879 | |||
1880 | tp_set |= 1 << ((gpc * 8) + tp); | ||
1881 | |||
1882 | do { | ||
1883 | nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); | ||
1884 | tp_set ^= tp_mask; | ||
1885 | nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); | ||
1886 | tp_set ^= tp_mask; | ||
1887 | } while (ltp == (++i * (priv->tp_total - 1) / 32)); | ||
1888 | i--; | ||
1889 | } | ||
1890 | } | ||
1891 | |||
1892 | nv_wr32(dev, 0x400208, 0x80000000); | ||
1893 | |||
1894 | nv_icmd(dev, 0x00001000, 0x00000004); | ||
1895 | nv_icmd(dev, 0x000000a9, 0x0000ffff); | ||
1896 | nv_icmd(dev, 0x00000038, 0x0fac6881); | ||
1897 | nv_icmd(dev, 0x0000003d, 0x00000001); | ||
1898 | nv_icmd(dev, 0x000000e8, 0x00000400); | ||
1899 | nv_icmd(dev, 0x000000e9, 0x00000400); | ||
1900 | nv_icmd(dev, 0x000000ea, 0x00000400); | ||
1901 | nv_icmd(dev, 0x000000eb, 0x00000400); | ||
1902 | nv_icmd(dev, 0x000000ec, 0x00000400); | ||
1903 | nv_icmd(dev, 0x000000ed, 0x00000400); | ||
1904 | nv_icmd(dev, 0x000000ee, 0x00000400); | ||
1905 | nv_icmd(dev, 0x000000ef, 0x00000400); | ||
1906 | nv_icmd(dev, 0x00000078, 0x00000300); | ||
1907 | nv_icmd(dev, 0x00000079, 0x00000300); | ||
1908 | nv_icmd(dev, 0x0000007a, 0x00000300); | ||
1909 | nv_icmd(dev, 0x0000007b, 0x00000300); | ||
1910 | nv_icmd(dev, 0x0000007c, 0x00000300); | ||
1911 | nv_icmd(dev, 0x0000007d, 0x00000300); | ||
1912 | nv_icmd(dev, 0x0000007e, 0x00000300); | ||
1913 | nv_icmd(dev, 0x0000007f, 0x00000300); | ||
1914 | nv_icmd(dev, 0x00000050, 0x00000011); | ||
1915 | nv_icmd(dev, 0x00000058, 0x00000008); | ||
1916 | nv_icmd(dev, 0x00000059, 0x00000008); | ||
1917 | nv_icmd(dev, 0x0000005a, 0x00000008); | ||
1918 | nv_icmd(dev, 0x0000005b, 0x00000008); | ||
1919 | nv_icmd(dev, 0x0000005c, 0x00000008); | ||
1920 | nv_icmd(dev, 0x0000005d, 0x00000008); | ||
1921 | nv_icmd(dev, 0x0000005e, 0x00000008); | ||
1922 | nv_icmd(dev, 0x0000005f, 0x00000008); | ||
1923 | nv_icmd(dev, 0x00000208, 0x00000001); | ||
1924 | nv_icmd(dev, 0x00000209, 0x00000001); | ||
1925 | nv_icmd(dev, 0x0000020a, 0x00000001); | ||
1926 | nv_icmd(dev, 0x0000020b, 0x00000001); | ||
1927 | nv_icmd(dev, 0x0000020c, 0x00000001); | ||
1928 | nv_icmd(dev, 0x0000020d, 0x00000001); | ||
1929 | nv_icmd(dev, 0x0000020e, 0x00000001); | ||
1930 | nv_icmd(dev, 0x0000020f, 0x00000001); | ||
1931 | nv_icmd(dev, 0x00000081, 0x00000001); | ||
1932 | nv_icmd(dev, 0x00000085, 0x00000004); | ||
1933 | nv_icmd(dev, 0x00000088, 0x00000400); | ||
1934 | nv_icmd(dev, 0x00000090, 0x00000300); | ||
1935 | nv_icmd(dev, 0x00000098, 0x00001001); | ||
1936 | nv_icmd(dev, 0x000000e3, 0x00000001); | ||
1937 | nv_icmd(dev, 0x000000da, 0x00000001); | ||
1938 | nv_icmd(dev, 0x000000f8, 0x00000003); | ||
1939 | nv_icmd(dev, 0x000000fa, 0x00000001); | ||
1940 | nv_icmd(dev, 0x0000009f, 0x0000ffff); | ||
1941 | nv_icmd(dev, 0x000000a0, 0x0000ffff); | ||
1942 | nv_icmd(dev, 0x000000a1, 0x0000ffff); | ||
1943 | nv_icmd(dev, 0x000000a2, 0x0000ffff); | ||
1944 | nv_icmd(dev, 0x000000b1, 0x00000001); | ||
1945 | nv_icmd(dev, 0x000000b2, 0x00000000); | ||
1946 | nv_icmd(dev, 0x000000b3, 0x00000000); | ||
1947 | nv_icmd(dev, 0x000000b4, 0x00000000); | ||
1948 | nv_icmd(dev, 0x000000b5, 0x00000000); | ||
1949 | nv_icmd(dev, 0x000000b6, 0x00000000); | ||
1950 | nv_icmd(dev, 0x000000b7, 0x00000000); | ||
1951 | nv_icmd(dev, 0x000000b8, 0x00000000); | ||
1952 | nv_icmd(dev, 0x000000b9, 0x00000000); | ||
1953 | nv_icmd(dev, 0x000000ba, 0x00000000); | ||
1954 | nv_icmd(dev, 0x000000bb, 0x00000000); | ||
1955 | nv_icmd(dev, 0x000000bc, 0x00000000); | ||
1956 | nv_icmd(dev, 0x000000bd, 0x00000000); | ||
1957 | nv_icmd(dev, 0x000000be, 0x00000000); | ||
1958 | nv_icmd(dev, 0x000000bf, 0x00000000); | ||
1959 | nv_icmd(dev, 0x000000c0, 0x00000000); | ||
1960 | nv_icmd(dev, 0x000000c1, 0x00000000); | ||
1961 | nv_icmd(dev, 0x000000c2, 0x00000000); | ||
1962 | nv_icmd(dev, 0x000000c3, 0x00000000); | ||
1963 | nv_icmd(dev, 0x000000c4, 0x00000000); | ||
1964 | nv_icmd(dev, 0x000000c5, 0x00000000); | ||
1965 | nv_icmd(dev, 0x000000c6, 0x00000000); | ||
1966 | nv_icmd(dev, 0x000000c7, 0x00000000); | ||
1967 | nv_icmd(dev, 0x000000c8, 0x00000000); | ||
1968 | nv_icmd(dev, 0x000000c9, 0x00000000); | ||
1969 | nv_icmd(dev, 0x000000ca, 0x00000000); | ||
1970 | nv_icmd(dev, 0x000000cb, 0x00000000); | ||
1971 | nv_icmd(dev, 0x000000cc, 0x00000000); | ||
1972 | nv_icmd(dev, 0x000000cd, 0x00000000); | ||
1973 | nv_icmd(dev, 0x000000ce, 0x00000000); | ||
1974 | nv_icmd(dev, 0x000000cf, 0x00000000); | ||
1975 | nv_icmd(dev, 0x000000d0, 0x00000000); | ||
1976 | nv_icmd(dev, 0x000000d1, 0x00000000); | ||
1977 | nv_icmd(dev, 0x000000d2, 0x00000000); | ||
1978 | nv_icmd(dev, 0x000000d3, 0x00000000); | ||
1979 | nv_icmd(dev, 0x000000d4, 0x00000000); | ||
1980 | nv_icmd(dev, 0x000000d5, 0x00000000); | ||
1981 | nv_icmd(dev, 0x000000d6, 0x00000000); | ||
1982 | nv_icmd(dev, 0x000000d7, 0x00000000); | ||
1983 | nv_icmd(dev, 0x000000d8, 0x00000000); | ||
1984 | nv_icmd(dev, 0x000000d9, 0x00000000); | ||
1985 | nv_icmd(dev, 0x00000210, 0x00000040); | ||
1986 | nv_icmd(dev, 0x00000211, 0x00000040); | ||
1987 | nv_icmd(dev, 0x00000212, 0x00000040); | ||
1988 | nv_icmd(dev, 0x00000213, 0x00000040); | ||
1989 | nv_icmd(dev, 0x00000214, 0x00000040); | ||
1990 | nv_icmd(dev, 0x00000215, 0x00000040); | ||
1991 | nv_icmd(dev, 0x00000216, 0x00000040); | ||
1992 | nv_icmd(dev, 0x00000217, 0x00000040); | ||
1993 | nv_icmd(dev, 0x00000218, 0x0000c080); | ||
1994 | nv_icmd(dev, 0x00000219, 0x0000c080); | ||
1995 | nv_icmd(dev, 0x0000021a, 0x0000c080); | ||
1996 | nv_icmd(dev, 0x0000021b, 0x0000c080); | ||
1997 | nv_icmd(dev, 0x0000021c, 0x0000c080); | ||
1998 | nv_icmd(dev, 0x0000021d, 0x0000c080); | ||
1999 | nv_icmd(dev, 0x0000021e, 0x0000c080); | ||
2000 | nv_icmd(dev, 0x0000021f, 0x0000c080); | ||
2001 | nv_icmd(dev, 0x000000ad, 0x0000013e); | ||
2002 | nv_icmd(dev, 0x000000e1, 0x00000010); | ||
2003 | nv_icmd(dev, 0x00000290, 0x00000000); | ||
2004 | nv_icmd(dev, 0x00000291, 0x00000000); | ||
2005 | nv_icmd(dev, 0x00000292, 0x00000000); | ||
2006 | nv_icmd(dev, 0x00000293, 0x00000000); | ||
2007 | nv_icmd(dev, 0x00000294, 0x00000000); | ||
2008 | nv_icmd(dev, 0x00000295, 0x00000000); | ||
2009 | nv_icmd(dev, 0x00000296, 0x00000000); | ||
2010 | nv_icmd(dev, 0x00000297, 0x00000000); | ||
2011 | nv_icmd(dev, 0x00000298, 0x00000000); | ||
2012 | nv_icmd(dev, 0x00000299, 0x00000000); | ||
2013 | nv_icmd(dev, 0x0000029a, 0x00000000); | ||
2014 | nv_icmd(dev, 0x0000029b, 0x00000000); | ||
2015 | nv_icmd(dev, 0x0000029c, 0x00000000); | ||
2016 | nv_icmd(dev, 0x0000029d, 0x00000000); | ||
2017 | nv_icmd(dev, 0x0000029e, 0x00000000); | ||
2018 | nv_icmd(dev, 0x0000029f, 0x00000000); | ||
2019 | nv_icmd(dev, 0x000003b0, 0x00000000); | ||
2020 | nv_icmd(dev, 0x000003b1, 0x00000000); | ||
2021 | nv_icmd(dev, 0x000003b2, 0x00000000); | ||
2022 | nv_icmd(dev, 0x000003b3, 0x00000000); | ||
2023 | nv_icmd(dev, 0x000003b4, 0x00000000); | ||
2024 | nv_icmd(dev, 0x000003b5, 0x00000000); | ||
2025 | nv_icmd(dev, 0x000003b6, 0x00000000); | ||
2026 | nv_icmd(dev, 0x000003b7, 0x00000000); | ||
2027 | nv_icmd(dev, 0x000003b8, 0x00000000); | ||
2028 | nv_icmd(dev, 0x000003b9, 0x00000000); | ||
2029 | nv_icmd(dev, 0x000003ba, 0x00000000); | ||
2030 | nv_icmd(dev, 0x000003bb, 0x00000000); | ||
2031 | nv_icmd(dev, 0x000003bc, 0x00000000); | ||
2032 | nv_icmd(dev, 0x000003bd, 0x00000000); | ||
2033 | nv_icmd(dev, 0x000003be, 0x00000000); | ||
2034 | nv_icmd(dev, 0x000003bf, 0x00000000); | ||
2035 | nv_icmd(dev, 0x000002a0, 0x00000000); | ||
2036 | nv_icmd(dev, 0x000002a1, 0x00000000); | ||
2037 | nv_icmd(dev, 0x000002a2, 0x00000000); | ||
2038 | nv_icmd(dev, 0x000002a3, 0x00000000); | ||
2039 | nv_icmd(dev, 0x000002a4, 0x00000000); | ||
2040 | nv_icmd(dev, 0x000002a5, 0x00000000); | ||
2041 | nv_icmd(dev, 0x000002a6, 0x00000000); | ||
2042 | nv_icmd(dev, 0x000002a7, 0x00000000); | ||
2043 | nv_icmd(dev, 0x000002a8, 0x00000000); | ||
2044 | nv_icmd(dev, 0x000002a9, 0x00000000); | ||
2045 | nv_icmd(dev, 0x000002aa, 0x00000000); | ||
2046 | nv_icmd(dev, 0x000002ab, 0x00000000); | ||
2047 | nv_icmd(dev, 0x000002ac, 0x00000000); | ||
2048 | nv_icmd(dev, 0x000002ad, 0x00000000); | ||
2049 | nv_icmd(dev, 0x000002ae, 0x00000000); | ||
2050 | nv_icmd(dev, 0x000002af, 0x00000000); | ||
2051 | nv_icmd(dev, 0x00000420, 0x00000000); | ||
2052 | nv_icmd(dev, 0x00000421, 0x00000000); | ||
2053 | nv_icmd(dev, 0x00000422, 0x00000000); | ||
2054 | nv_icmd(dev, 0x00000423, 0x00000000); | ||
2055 | nv_icmd(dev, 0x00000424, 0x00000000); | ||
2056 | nv_icmd(dev, 0x00000425, 0x00000000); | ||
2057 | nv_icmd(dev, 0x00000426, 0x00000000); | ||
2058 | nv_icmd(dev, 0x00000427, 0x00000000); | ||
2059 | nv_icmd(dev, 0x00000428, 0x00000000); | ||
2060 | nv_icmd(dev, 0x00000429, 0x00000000); | ||
2061 | nv_icmd(dev, 0x0000042a, 0x00000000); | ||
2062 | nv_icmd(dev, 0x0000042b, 0x00000000); | ||
2063 | nv_icmd(dev, 0x0000042c, 0x00000000); | ||
2064 | nv_icmd(dev, 0x0000042d, 0x00000000); | ||
2065 | nv_icmd(dev, 0x0000042e, 0x00000000); | ||
2066 | nv_icmd(dev, 0x0000042f, 0x00000000); | ||
2067 | nv_icmd(dev, 0x000002b0, 0x00000000); | ||
2068 | nv_icmd(dev, 0x000002b1, 0x00000000); | ||
2069 | nv_icmd(dev, 0x000002b2, 0x00000000); | ||
2070 | nv_icmd(dev, 0x000002b3, 0x00000000); | ||
2071 | nv_icmd(dev, 0x000002b4, 0x00000000); | ||
2072 | nv_icmd(dev, 0x000002b5, 0x00000000); | ||
2073 | nv_icmd(dev, 0x000002b6, 0x00000000); | ||
2074 | nv_icmd(dev, 0x000002b7, 0x00000000); | ||
2075 | nv_icmd(dev, 0x000002b8, 0x00000000); | ||
2076 | nv_icmd(dev, 0x000002b9, 0x00000000); | ||
2077 | nv_icmd(dev, 0x000002ba, 0x00000000); | ||
2078 | nv_icmd(dev, 0x000002bb, 0x00000000); | ||
2079 | nv_icmd(dev, 0x000002bc, 0x00000000); | ||
2080 | nv_icmd(dev, 0x000002bd, 0x00000000); | ||
2081 | nv_icmd(dev, 0x000002be, 0x00000000); | ||
2082 | nv_icmd(dev, 0x000002bf, 0x00000000); | ||
2083 | nv_icmd(dev, 0x00000430, 0x00000000); | ||
2084 | nv_icmd(dev, 0x00000431, 0x00000000); | ||
2085 | nv_icmd(dev, 0x00000432, 0x00000000); | ||
2086 | nv_icmd(dev, 0x00000433, 0x00000000); | ||
2087 | nv_icmd(dev, 0x00000434, 0x00000000); | ||
2088 | nv_icmd(dev, 0x00000435, 0x00000000); | ||
2089 | nv_icmd(dev, 0x00000436, 0x00000000); | ||
2090 | nv_icmd(dev, 0x00000437, 0x00000000); | ||
2091 | nv_icmd(dev, 0x00000438, 0x00000000); | ||
2092 | nv_icmd(dev, 0x00000439, 0x00000000); | ||
2093 | nv_icmd(dev, 0x0000043a, 0x00000000); | ||
2094 | nv_icmd(dev, 0x0000043b, 0x00000000); | ||
2095 | nv_icmd(dev, 0x0000043c, 0x00000000); | ||
2096 | nv_icmd(dev, 0x0000043d, 0x00000000); | ||
2097 | nv_icmd(dev, 0x0000043e, 0x00000000); | ||
2098 | nv_icmd(dev, 0x0000043f, 0x00000000); | ||
2099 | nv_icmd(dev, 0x000002c0, 0x00000000); | ||
2100 | nv_icmd(dev, 0x000002c1, 0x00000000); | ||
2101 | nv_icmd(dev, 0x000002c2, 0x00000000); | ||
2102 | nv_icmd(dev, 0x000002c3, 0x00000000); | ||
2103 | nv_icmd(dev, 0x000002c4, 0x00000000); | ||
2104 | nv_icmd(dev, 0x000002c5, 0x00000000); | ||
2105 | nv_icmd(dev, 0x000002c6, 0x00000000); | ||
2106 | nv_icmd(dev, 0x000002c7, 0x00000000); | ||
2107 | nv_icmd(dev, 0x000002c8, 0x00000000); | ||
2108 | nv_icmd(dev, 0x000002c9, 0x00000000); | ||
2109 | nv_icmd(dev, 0x000002ca, 0x00000000); | ||
2110 | nv_icmd(dev, 0x000002cb, 0x00000000); | ||
2111 | nv_icmd(dev, 0x000002cc, 0x00000000); | ||
2112 | nv_icmd(dev, 0x000002cd, 0x00000000); | ||
2113 | nv_icmd(dev, 0x000002ce, 0x00000000); | ||
2114 | nv_icmd(dev, 0x000002cf, 0x00000000); | ||
2115 | nv_icmd(dev, 0x000004d0, 0x00000000); | ||
2116 | nv_icmd(dev, 0x000004d1, 0x00000000); | ||
2117 | nv_icmd(dev, 0x000004d2, 0x00000000); | ||
2118 | nv_icmd(dev, 0x000004d3, 0x00000000); | ||
2119 | nv_icmd(dev, 0x000004d4, 0x00000000); | ||
2120 | nv_icmd(dev, 0x000004d5, 0x00000000); | ||
2121 | nv_icmd(dev, 0x000004d6, 0x00000000); | ||
2122 | nv_icmd(dev, 0x000004d7, 0x00000000); | ||
2123 | nv_icmd(dev, 0x000004d8, 0x00000000); | ||
2124 | nv_icmd(dev, 0x000004d9, 0x00000000); | ||
2125 | nv_icmd(dev, 0x000004da, 0x00000000); | ||
2126 | nv_icmd(dev, 0x000004db, 0x00000000); | ||
2127 | nv_icmd(dev, 0x000004dc, 0x00000000); | ||
2128 | nv_icmd(dev, 0x000004dd, 0x00000000); | ||
2129 | nv_icmd(dev, 0x000004de, 0x00000000); | ||
2130 | nv_icmd(dev, 0x000004df, 0x00000000); | ||
2131 | nv_icmd(dev, 0x00000720, 0x00000000); | ||
2132 | nv_icmd(dev, 0x00000721, 0x00000000); | ||
2133 | nv_icmd(dev, 0x00000722, 0x00000000); | ||
2134 | nv_icmd(dev, 0x00000723, 0x00000000); | ||
2135 | nv_icmd(dev, 0x00000724, 0x00000000); | ||
2136 | nv_icmd(dev, 0x00000725, 0x00000000); | ||
2137 | nv_icmd(dev, 0x00000726, 0x00000000); | ||
2138 | nv_icmd(dev, 0x00000727, 0x00000000); | ||
2139 | nv_icmd(dev, 0x00000728, 0x00000000); | ||
2140 | nv_icmd(dev, 0x00000729, 0x00000000); | ||
2141 | nv_icmd(dev, 0x0000072a, 0x00000000); | ||
2142 | nv_icmd(dev, 0x0000072b, 0x00000000); | ||
2143 | nv_icmd(dev, 0x0000072c, 0x00000000); | ||
2144 | nv_icmd(dev, 0x0000072d, 0x00000000); | ||
2145 | nv_icmd(dev, 0x0000072e, 0x00000000); | ||
2146 | nv_icmd(dev, 0x0000072f, 0x00000000); | ||
2147 | nv_icmd(dev, 0x000008c0, 0x00000000); | ||
2148 | nv_icmd(dev, 0x000008c1, 0x00000000); | ||
2149 | nv_icmd(dev, 0x000008c2, 0x00000000); | ||
2150 | nv_icmd(dev, 0x000008c3, 0x00000000); | ||
2151 | nv_icmd(dev, 0x000008c4, 0x00000000); | ||
2152 | nv_icmd(dev, 0x000008c5, 0x00000000); | ||
2153 | nv_icmd(dev, 0x000008c6, 0x00000000); | ||
2154 | nv_icmd(dev, 0x000008c7, 0x00000000); | ||
2155 | nv_icmd(dev, 0x000008c8, 0x00000000); | ||
2156 | nv_icmd(dev, 0x000008c9, 0x00000000); | ||
2157 | nv_icmd(dev, 0x000008ca, 0x00000000); | ||
2158 | nv_icmd(dev, 0x000008cb, 0x00000000); | ||
2159 | nv_icmd(dev, 0x000008cc, 0x00000000); | ||
2160 | nv_icmd(dev, 0x000008cd, 0x00000000); | ||
2161 | nv_icmd(dev, 0x000008ce, 0x00000000); | ||
2162 | nv_icmd(dev, 0x000008cf, 0x00000000); | ||
2163 | nv_icmd(dev, 0x00000890, 0x00000000); | ||
2164 | nv_icmd(dev, 0x00000891, 0x00000000); | ||
2165 | nv_icmd(dev, 0x00000892, 0x00000000); | ||
2166 | nv_icmd(dev, 0x00000893, 0x00000000); | ||
2167 | nv_icmd(dev, 0x00000894, 0x00000000); | ||
2168 | nv_icmd(dev, 0x00000895, 0x00000000); | ||
2169 | nv_icmd(dev, 0x00000896, 0x00000000); | ||
2170 | nv_icmd(dev, 0x00000897, 0x00000000); | ||
2171 | nv_icmd(dev, 0x00000898, 0x00000000); | ||
2172 | nv_icmd(dev, 0x00000899, 0x00000000); | ||
2173 | nv_icmd(dev, 0x0000089a, 0x00000000); | ||
2174 | nv_icmd(dev, 0x0000089b, 0x00000000); | ||
2175 | nv_icmd(dev, 0x0000089c, 0x00000000); | ||
2176 | nv_icmd(dev, 0x0000089d, 0x00000000); | ||
2177 | nv_icmd(dev, 0x0000089e, 0x00000000); | ||
2178 | nv_icmd(dev, 0x0000089f, 0x00000000); | ||
2179 | nv_icmd(dev, 0x000008e0, 0x00000000); | ||
2180 | nv_icmd(dev, 0x000008e1, 0x00000000); | ||
2181 | nv_icmd(dev, 0x000008e2, 0x00000000); | ||
2182 | nv_icmd(dev, 0x000008e3, 0x00000000); | ||
2183 | nv_icmd(dev, 0x000008e4, 0x00000000); | ||
2184 | nv_icmd(dev, 0x000008e5, 0x00000000); | ||
2185 | nv_icmd(dev, 0x000008e6, 0x00000000); | ||
2186 | nv_icmd(dev, 0x000008e7, 0x00000000); | ||
2187 | nv_icmd(dev, 0x000008e8, 0x00000000); | ||
2188 | nv_icmd(dev, 0x000008e9, 0x00000000); | ||
2189 | nv_icmd(dev, 0x000008ea, 0x00000000); | ||
2190 | nv_icmd(dev, 0x000008eb, 0x00000000); | ||
2191 | nv_icmd(dev, 0x000008ec, 0x00000000); | ||
2192 | nv_icmd(dev, 0x000008ed, 0x00000000); | ||
2193 | nv_icmd(dev, 0x000008ee, 0x00000000); | ||
2194 | nv_icmd(dev, 0x000008ef, 0x00000000); | ||
2195 | nv_icmd(dev, 0x000008a0, 0x00000000); | ||
2196 | nv_icmd(dev, 0x000008a1, 0x00000000); | ||
2197 | nv_icmd(dev, 0x000008a2, 0x00000000); | ||
2198 | nv_icmd(dev, 0x000008a3, 0x00000000); | ||
2199 | nv_icmd(dev, 0x000008a4, 0x00000000); | ||
2200 | nv_icmd(dev, 0x000008a5, 0x00000000); | ||
2201 | nv_icmd(dev, 0x000008a6, 0x00000000); | ||
2202 | nv_icmd(dev, 0x000008a7, 0x00000000); | ||
2203 | nv_icmd(dev, 0x000008a8, 0x00000000); | ||
2204 | nv_icmd(dev, 0x000008a9, 0x00000000); | ||
2205 | nv_icmd(dev, 0x000008aa, 0x00000000); | ||
2206 | nv_icmd(dev, 0x000008ab, 0x00000000); | ||
2207 | nv_icmd(dev, 0x000008ac, 0x00000000); | ||
2208 | nv_icmd(dev, 0x000008ad, 0x00000000); | ||
2209 | nv_icmd(dev, 0x000008ae, 0x00000000); | ||
2210 | nv_icmd(dev, 0x000008af, 0x00000000); | ||
2211 | nv_icmd(dev, 0x000008f0, 0x00000000); | ||
2212 | nv_icmd(dev, 0x000008f1, 0x00000000); | ||
2213 | nv_icmd(dev, 0x000008f2, 0x00000000); | ||
2214 | nv_icmd(dev, 0x000008f3, 0x00000000); | ||
2215 | nv_icmd(dev, 0x000008f4, 0x00000000); | ||
2216 | nv_icmd(dev, 0x000008f5, 0x00000000); | ||
2217 | nv_icmd(dev, 0x000008f6, 0x00000000); | ||
2218 | nv_icmd(dev, 0x000008f7, 0x00000000); | ||
2219 | nv_icmd(dev, 0x000008f8, 0x00000000); | ||
2220 | nv_icmd(dev, 0x000008f9, 0x00000000); | ||
2221 | nv_icmd(dev, 0x000008fa, 0x00000000); | ||
2222 | nv_icmd(dev, 0x000008fb, 0x00000000); | ||
2223 | nv_icmd(dev, 0x000008fc, 0x00000000); | ||
2224 | nv_icmd(dev, 0x000008fd, 0x00000000); | ||
2225 | nv_icmd(dev, 0x000008fe, 0x00000000); | ||
2226 | nv_icmd(dev, 0x000008ff, 0x00000000); | ||
2227 | nv_icmd(dev, 0x0000094c, 0x000000ff); | ||
2228 | nv_icmd(dev, 0x0000094d, 0xffffffff); | ||
2229 | nv_icmd(dev, 0x0000094e, 0x00000002); | ||
2230 | nv_icmd(dev, 0x000002ec, 0x00000001); | ||
2231 | nv_icmd(dev, 0x00000303, 0x00000001); | ||
2232 | nv_icmd(dev, 0x000002e6, 0x00000001); | ||
2233 | nv_icmd(dev, 0x00000466, 0x00000052); | ||
2234 | nv_icmd(dev, 0x00000301, 0x3f800000); | ||
2235 | nv_icmd(dev, 0x00000304, 0x30201000); | ||
2236 | nv_icmd(dev, 0x00000305, 0x70605040); | ||
2237 | nv_icmd(dev, 0x00000306, 0xb8a89888); | ||
2238 | nv_icmd(dev, 0x00000307, 0xf8e8d8c8); | ||
2239 | nv_icmd(dev, 0x0000030a, 0x00ffff00); | ||
2240 | nv_icmd(dev, 0x0000030b, 0x0000001a); | ||
2241 | nv_icmd(dev, 0x0000030c, 0x00000001); | ||
2242 | nv_icmd(dev, 0x00000318, 0x00000001); | ||
2243 | nv_icmd(dev, 0x00000340, 0x00000000); | ||
2244 | nv_icmd(dev, 0x00000375, 0x00000001); | ||
2245 | nv_icmd(dev, 0x00000351, 0x00000100); | ||
2246 | nv_icmd(dev, 0x0000037d, 0x00000006); | ||
2247 | nv_icmd(dev, 0x000003a0, 0x00000002); | ||
2248 | nv_icmd(dev, 0x000003aa, 0x00000001); | ||
2249 | nv_icmd(dev, 0x000003a9, 0x00000001); | ||
2250 | nv_icmd(dev, 0x00000380, 0x00000001); | ||
2251 | nv_icmd(dev, 0x00000360, 0x00000040); | ||
2252 | nv_icmd(dev, 0x00000366, 0x00000000); | ||
2253 | nv_icmd(dev, 0x00000367, 0x00000000); | ||
2254 | nv_icmd(dev, 0x00000368, 0x00001fff); | ||
2255 | nv_icmd(dev, 0x00000370, 0x00000000); | ||
2256 | nv_icmd(dev, 0x00000371, 0x00000000); | ||
2257 | nv_icmd(dev, 0x00000372, 0x003fffff); | ||
2258 | nv_icmd(dev, 0x0000037a, 0x00000012); | ||
2259 | nv_icmd(dev, 0x000005e0, 0x00000022); | ||
2260 | nv_icmd(dev, 0x000005e1, 0x00000022); | ||
2261 | nv_icmd(dev, 0x000005e2, 0x00000022); | ||
2262 | nv_icmd(dev, 0x000005e3, 0x00000022); | ||
2263 | nv_icmd(dev, 0x000005e4, 0x00000022); | ||
2264 | nv_icmd(dev, 0x00000619, 0x00000003); | ||
2265 | nv_icmd(dev, 0x00000811, 0x00000003); | ||
2266 | nv_icmd(dev, 0x00000812, 0x00000004); | ||
2267 | nv_icmd(dev, 0x00000813, 0x00000006); | ||
2268 | nv_icmd(dev, 0x00000814, 0x00000008); | ||
2269 | nv_icmd(dev, 0x00000815, 0x0000000b); | ||
2270 | nv_icmd(dev, 0x00000800, 0x00000001); | ||
2271 | nv_icmd(dev, 0x00000801, 0x00000001); | ||
2272 | nv_icmd(dev, 0x00000802, 0x00000001); | ||
2273 | nv_icmd(dev, 0x00000803, 0x00000001); | ||
2274 | nv_icmd(dev, 0x00000804, 0x00000001); | ||
2275 | nv_icmd(dev, 0x00000805, 0x00000001); | ||
2276 | nv_icmd(dev, 0x00000632, 0x00000001); | ||
2277 | nv_icmd(dev, 0x00000633, 0x00000002); | ||
2278 | nv_icmd(dev, 0x00000634, 0x00000003); | ||
2279 | nv_icmd(dev, 0x00000635, 0x00000004); | ||
2280 | nv_icmd(dev, 0x00000654, 0x3f800000); | ||
2281 | nv_icmd(dev, 0x00000657, 0x3f800000); | ||
2282 | nv_icmd(dev, 0x00000655, 0x3f800000); | ||
2283 | nv_icmd(dev, 0x00000656, 0x3f800000); | ||
2284 | nv_icmd(dev, 0x000006cd, 0x3f800000); | ||
2285 | nv_icmd(dev, 0x000007f5, 0x3f800000); | ||
2286 | nv_icmd(dev, 0x000007dc, 0x39291909); | ||
2287 | nv_icmd(dev, 0x000007dd, 0x79695949); | ||
2288 | nv_icmd(dev, 0x000007de, 0xb9a99989); | ||
2289 | nv_icmd(dev, 0x000007df, 0xf9e9d9c9); | ||
2290 | nv_icmd(dev, 0x000007e8, 0x00003210); | ||
2291 | nv_icmd(dev, 0x000007e9, 0x00007654); | ||
2292 | nv_icmd(dev, 0x000007ea, 0x00000098); | ||
2293 | nv_icmd(dev, 0x000007ec, 0x39291909); | ||
2294 | nv_icmd(dev, 0x000007ed, 0x79695949); | ||
2295 | nv_icmd(dev, 0x000007ee, 0xb9a99989); | ||
2296 | nv_icmd(dev, 0x000007ef, 0xf9e9d9c9); | ||
2297 | nv_icmd(dev, 0x000007f0, 0x00003210); | ||
2298 | nv_icmd(dev, 0x000007f1, 0x00007654); | ||
2299 | nv_icmd(dev, 0x000007f2, 0x00000098); | ||
2300 | nv_icmd(dev, 0x000005a5, 0x00000001); | ||
2301 | nv_icmd(dev, 0x00000980, 0x00000000); | ||
2302 | nv_icmd(dev, 0x00000981, 0x00000000); | ||
2303 | nv_icmd(dev, 0x00000982, 0x00000000); | ||
2304 | nv_icmd(dev, 0x00000983, 0x00000000); | ||
2305 | nv_icmd(dev, 0x00000984, 0x00000000); | ||
2306 | nv_icmd(dev, 0x00000985, 0x00000000); | ||
2307 | nv_icmd(dev, 0x00000986, 0x00000000); | ||
2308 | nv_icmd(dev, 0x00000987, 0x00000000); | ||
2309 | nv_icmd(dev, 0x00000988, 0x00000000); | ||
2310 | nv_icmd(dev, 0x00000989, 0x00000000); | ||
2311 | nv_icmd(dev, 0x0000098a, 0x00000000); | ||
2312 | nv_icmd(dev, 0x0000098b, 0x00000000); | ||
2313 | nv_icmd(dev, 0x0000098c, 0x00000000); | ||
2314 | nv_icmd(dev, 0x0000098d, 0x00000000); | ||
2315 | nv_icmd(dev, 0x0000098e, 0x00000000); | ||
2316 | nv_icmd(dev, 0x0000098f, 0x00000000); | ||
2317 | nv_icmd(dev, 0x00000990, 0x00000000); | ||
2318 | nv_icmd(dev, 0x00000991, 0x00000000); | ||
2319 | nv_icmd(dev, 0x00000992, 0x00000000); | ||
2320 | nv_icmd(dev, 0x00000993, 0x00000000); | ||
2321 | nv_icmd(dev, 0x00000994, 0x00000000); | ||
2322 | nv_icmd(dev, 0x00000995, 0x00000000); | ||
2323 | nv_icmd(dev, 0x00000996, 0x00000000); | ||
2324 | nv_icmd(dev, 0x00000997, 0x00000000); | ||
2325 | nv_icmd(dev, 0x00000998, 0x00000000); | ||
2326 | nv_icmd(dev, 0x00000999, 0x00000000); | ||
2327 | nv_icmd(dev, 0x0000099a, 0x00000000); | ||
2328 | nv_icmd(dev, 0x0000099b, 0x00000000); | ||
2329 | nv_icmd(dev, 0x0000099c, 0x00000000); | ||
2330 | nv_icmd(dev, 0x0000099d, 0x00000000); | ||
2331 | nv_icmd(dev, 0x0000099e, 0x00000000); | ||
2332 | nv_icmd(dev, 0x0000099f, 0x00000000); | ||
2333 | nv_icmd(dev, 0x000009a0, 0x00000000); | ||
2334 | nv_icmd(dev, 0x000009a1, 0x00000000); | ||
2335 | nv_icmd(dev, 0x000009a2, 0x00000000); | ||
2336 | nv_icmd(dev, 0x000009a3, 0x00000000); | ||
2337 | nv_icmd(dev, 0x000009a4, 0x00000000); | ||
2338 | nv_icmd(dev, 0x000009a5, 0x00000000); | ||
2339 | nv_icmd(dev, 0x000009a6, 0x00000000); | ||
2340 | nv_icmd(dev, 0x000009a7, 0x00000000); | ||
2341 | nv_icmd(dev, 0x000009a8, 0x00000000); | ||
2342 | nv_icmd(dev, 0x000009a9, 0x00000000); | ||
2343 | nv_icmd(dev, 0x000009aa, 0x00000000); | ||
2344 | nv_icmd(dev, 0x000009ab, 0x00000000); | ||
2345 | nv_icmd(dev, 0x000009ac, 0x00000000); | ||
2346 | nv_icmd(dev, 0x000009ad, 0x00000000); | ||
2347 | nv_icmd(dev, 0x000009ae, 0x00000000); | ||
2348 | nv_icmd(dev, 0x000009af, 0x00000000); | ||
2349 | nv_icmd(dev, 0x000009b0, 0x00000000); | ||
2350 | nv_icmd(dev, 0x000009b1, 0x00000000); | ||
2351 | nv_icmd(dev, 0x000009b2, 0x00000000); | ||
2352 | nv_icmd(dev, 0x000009b3, 0x00000000); | ||
2353 | nv_icmd(dev, 0x000009b4, 0x00000000); | ||
2354 | nv_icmd(dev, 0x000009b5, 0x00000000); | ||
2355 | nv_icmd(dev, 0x000009b6, 0x00000000); | ||
2356 | nv_icmd(dev, 0x000009b7, 0x00000000); | ||
2357 | nv_icmd(dev, 0x000009b8, 0x00000000); | ||
2358 | nv_icmd(dev, 0x000009b9, 0x00000000); | ||
2359 | nv_icmd(dev, 0x000009ba, 0x00000000); | ||
2360 | nv_icmd(dev, 0x000009bb, 0x00000000); | ||
2361 | nv_icmd(dev, 0x000009bc, 0x00000000); | ||
2362 | nv_icmd(dev, 0x000009bd, 0x00000000); | ||
2363 | nv_icmd(dev, 0x000009be, 0x00000000); | ||
2364 | nv_icmd(dev, 0x000009bf, 0x00000000); | ||
2365 | nv_icmd(dev, 0x000009c0, 0x00000000); | ||
2366 | nv_icmd(dev, 0x000009c1, 0x00000000); | ||
2367 | nv_icmd(dev, 0x000009c2, 0x00000000); | ||
2368 | nv_icmd(dev, 0x000009c3, 0x00000000); | ||
2369 | nv_icmd(dev, 0x000009c4, 0x00000000); | ||
2370 | nv_icmd(dev, 0x000009c5, 0x00000000); | ||
2371 | nv_icmd(dev, 0x000009c6, 0x00000000); | ||
2372 | nv_icmd(dev, 0x000009c7, 0x00000000); | ||
2373 | nv_icmd(dev, 0x000009c8, 0x00000000); | ||
2374 | nv_icmd(dev, 0x000009c9, 0x00000000); | ||
2375 | nv_icmd(dev, 0x000009ca, 0x00000000); | ||
2376 | nv_icmd(dev, 0x000009cb, 0x00000000); | ||
2377 | nv_icmd(dev, 0x000009cc, 0x00000000); | ||
2378 | nv_icmd(dev, 0x000009cd, 0x00000000); | ||
2379 | nv_icmd(dev, 0x000009ce, 0x00000000); | ||
2380 | nv_icmd(dev, 0x000009cf, 0x00000000); | ||
2381 | nv_icmd(dev, 0x000009d0, 0x00000000); | ||
2382 | nv_icmd(dev, 0x000009d1, 0x00000000); | ||
2383 | nv_icmd(dev, 0x000009d2, 0x00000000); | ||
2384 | nv_icmd(dev, 0x000009d3, 0x00000000); | ||
2385 | nv_icmd(dev, 0x000009d4, 0x00000000); | ||
2386 | nv_icmd(dev, 0x000009d5, 0x00000000); | ||
2387 | nv_icmd(dev, 0x000009d6, 0x00000000); | ||
2388 | nv_icmd(dev, 0x000009d7, 0x00000000); | ||
2389 | nv_icmd(dev, 0x000009d8, 0x00000000); | ||
2390 | nv_icmd(dev, 0x000009d9, 0x00000000); | ||
2391 | nv_icmd(dev, 0x000009da, 0x00000000); | ||
2392 | nv_icmd(dev, 0x000009db, 0x00000000); | ||
2393 | nv_icmd(dev, 0x000009dc, 0x00000000); | ||
2394 | nv_icmd(dev, 0x000009dd, 0x00000000); | ||
2395 | nv_icmd(dev, 0x000009de, 0x00000000); | ||
2396 | nv_icmd(dev, 0x000009df, 0x00000000); | ||
2397 | nv_icmd(dev, 0x000009e0, 0x00000000); | ||
2398 | nv_icmd(dev, 0x000009e1, 0x00000000); | ||
2399 | nv_icmd(dev, 0x000009e2, 0x00000000); | ||
2400 | nv_icmd(dev, 0x000009e3, 0x00000000); | ||
2401 | nv_icmd(dev, 0x000009e4, 0x00000000); | ||
2402 | nv_icmd(dev, 0x000009e5, 0x00000000); | ||
2403 | nv_icmd(dev, 0x000009e6, 0x00000000); | ||
2404 | nv_icmd(dev, 0x000009e7, 0x00000000); | ||
2405 | nv_icmd(dev, 0x000009e8, 0x00000000); | ||
2406 | nv_icmd(dev, 0x000009e9, 0x00000000); | ||
2407 | nv_icmd(dev, 0x000009ea, 0x00000000); | ||
2408 | nv_icmd(dev, 0x000009eb, 0x00000000); | ||
2409 | nv_icmd(dev, 0x000009ec, 0x00000000); | ||
2410 | nv_icmd(dev, 0x000009ed, 0x00000000); | ||
2411 | nv_icmd(dev, 0x000009ee, 0x00000000); | ||
2412 | nv_icmd(dev, 0x000009ef, 0x00000000); | ||
2413 | nv_icmd(dev, 0x000009f0, 0x00000000); | ||
2414 | nv_icmd(dev, 0x000009f1, 0x00000000); | ||
2415 | nv_icmd(dev, 0x000009f2, 0x00000000); | ||
2416 | nv_icmd(dev, 0x000009f3, 0x00000000); | ||
2417 | nv_icmd(dev, 0x000009f4, 0x00000000); | ||
2418 | nv_icmd(dev, 0x000009f5, 0x00000000); | ||
2419 | nv_icmd(dev, 0x000009f6, 0x00000000); | ||
2420 | nv_icmd(dev, 0x000009f7, 0x00000000); | ||
2421 | nv_icmd(dev, 0x000009f8, 0x00000000); | ||
2422 | nv_icmd(dev, 0x000009f9, 0x00000000); | ||
2423 | nv_icmd(dev, 0x000009fa, 0x00000000); | ||
2424 | nv_icmd(dev, 0x000009fb, 0x00000000); | ||
2425 | nv_icmd(dev, 0x000009fc, 0x00000000); | ||
2426 | nv_icmd(dev, 0x000009fd, 0x00000000); | ||
2427 | nv_icmd(dev, 0x000009fe, 0x00000000); | ||
2428 | nv_icmd(dev, 0x000009ff, 0x00000000); | ||
2429 | nv_icmd(dev, 0x00000468, 0x00000004); | ||
2430 | nv_icmd(dev, 0x0000046c, 0x00000001); | ||
2431 | nv_icmd(dev, 0x00000470, 0x00000000); | ||
2432 | nv_icmd(dev, 0x00000471, 0x00000000); | ||
2433 | nv_icmd(dev, 0x00000472, 0x00000000); | ||
2434 | nv_icmd(dev, 0x00000473, 0x00000000); | ||
2435 | nv_icmd(dev, 0x00000474, 0x00000000); | ||
2436 | nv_icmd(dev, 0x00000475, 0x00000000); | ||
2437 | nv_icmd(dev, 0x00000476, 0x00000000); | ||
2438 | nv_icmd(dev, 0x00000477, 0x00000000); | ||
2439 | nv_icmd(dev, 0x00000478, 0x00000000); | ||
2440 | nv_icmd(dev, 0x00000479, 0x00000000); | ||
2441 | nv_icmd(dev, 0x0000047a, 0x00000000); | ||
2442 | nv_icmd(dev, 0x0000047b, 0x00000000); | ||
2443 | nv_icmd(dev, 0x0000047c, 0x00000000); | ||
2444 | nv_icmd(dev, 0x0000047d, 0x00000000); | ||
2445 | nv_icmd(dev, 0x0000047e, 0x00000000); | ||
2446 | nv_icmd(dev, 0x0000047f, 0x00000000); | ||
2447 | nv_icmd(dev, 0x00000480, 0x00000000); | ||
2448 | nv_icmd(dev, 0x00000481, 0x00000000); | ||
2449 | nv_icmd(dev, 0x00000482, 0x00000000); | ||
2450 | nv_icmd(dev, 0x00000483, 0x00000000); | ||
2451 | nv_icmd(dev, 0x00000484, 0x00000000); | ||
2452 | nv_icmd(dev, 0x00000485, 0x00000000); | ||
2453 | nv_icmd(dev, 0x00000486, 0x00000000); | ||
2454 | nv_icmd(dev, 0x00000487, 0x00000000); | ||
2455 | nv_icmd(dev, 0x00000488, 0x00000000); | ||
2456 | nv_icmd(dev, 0x00000489, 0x00000000); | ||
2457 | nv_icmd(dev, 0x0000048a, 0x00000000); | ||
2458 | nv_icmd(dev, 0x0000048b, 0x00000000); | ||
2459 | nv_icmd(dev, 0x0000048c, 0x00000000); | ||
2460 | nv_icmd(dev, 0x0000048d, 0x00000000); | ||
2461 | nv_icmd(dev, 0x0000048e, 0x00000000); | ||
2462 | nv_icmd(dev, 0x0000048f, 0x00000000); | ||
2463 | nv_icmd(dev, 0x00000490, 0x00000000); | ||
2464 | nv_icmd(dev, 0x00000491, 0x00000000); | ||
2465 | nv_icmd(dev, 0x00000492, 0x00000000); | ||
2466 | nv_icmd(dev, 0x00000493, 0x00000000); | ||
2467 | nv_icmd(dev, 0x00000494, 0x00000000); | ||
2468 | nv_icmd(dev, 0x00000495, 0x00000000); | ||
2469 | nv_icmd(dev, 0x00000496, 0x00000000); | ||
2470 | nv_icmd(dev, 0x00000497, 0x00000000); | ||
2471 | nv_icmd(dev, 0x00000498, 0x00000000); | ||
2472 | nv_icmd(dev, 0x00000499, 0x00000000); | ||
2473 | nv_icmd(dev, 0x0000049a, 0x00000000); | ||
2474 | nv_icmd(dev, 0x0000049b, 0x00000000); | ||
2475 | nv_icmd(dev, 0x0000049c, 0x00000000); | ||
2476 | nv_icmd(dev, 0x0000049d, 0x00000000); | ||
2477 | nv_icmd(dev, 0x0000049e, 0x00000000); | ||
2478 | nv_icmd(dev, 0x0000049f, 0x00000000); | ||
2479 | nv_icmd(dev, 0x000004a0, 0x00000000); | ||
2480 | nv_icmd(dev, 0x000004a1, 0x00000000); | ||
2481 | nv_icmd(dev, 0x000004a2, 0x00000000); | ||
2482 | nv_icmd(dev, 0x000004a3, 0x00000000); | ||
2483 | nv_icmd(dev, 0x000004a4, 0x00000000); | ||
2484 | nv_icmd(dev, 0x000004a5, 0x00000000); | ||
2485 | nv_icmd(dev, 0x000004a6, 0x00000000); | ||
2486 | nv_icmd(dev, 0x000004a7, 0x00000000); | ||
2487 | nv_icmd(dev, 0x000004a8, 0x00000000); | ||
2488 | nv_icmd(dev, 0x000004a9, 0x00000000); | ||
2489 | nv_icmd(dev, 0x000004aa, 0x00000000); | ||
2490 | nv_icmd(dev, 0x000004ab, 0x00000000); | ||
2491 | nv_icmd(dev, 0x000004ac, 0x00000000); | ||
2492 | nv_icmd(dev, 0x000004ad, 0x00000000); | ||
2493 | nv_icmd(dev, 0x000004ae, 0x00000000); | ||
2494 | nv_icmd(dev, 0x000004af, 0x00000000); | ||
2495 | nv_icmd(dev, 0x000004b0, 0x00000000); | ||
2496 | nv_icmd(dev, 0x000004b1, 0x00000000); | ||
2497 | nv_icmd(dev, 0x000004b2, 0x00000000); | ||
2498 | nv_icmd(dev, 0x000004b3, 0x00000000); | ||
2499 | nv_icmd(dev, 0x000004b4, 0x00000000); | ||
2500 | nv_icmd(dev, 0x000004b5, 0x00000000); | ||
2501 | nv_icmd(dev, 0x000004b6, 0x00000000); | ||
2502 | nv_icmd(dev, 0x000004b7, 0x00000000); | ||
2503 | nv_icmd(dev, 0x000004b8, 0x00000000); | ||
2504 | nv_icmd(dev, 0x000004b9, 0x00000000); | ||
2505 | nv_icmd(dev, 0x000004ba, 0x00000000); | ||
2506 | nv_icmd(dev, 0x000004bb, 0x00000000); | ||
2507 | nv_icmd(dev, 0x000004bc, 0x00000000); | ||
2508 | nv_icmd(dev, 0x000004bd, 0x00000000); | ||
2509 | nv_icmd(dev, 0x000004be, 0x00000000); | ||
2510 | nv_icmd(dev, 0x000004bf, 0x00000000); | ||
2511 | nv_icmd(dev, 0x000004c0, 0x00000000); | ||
2512 | nv_icmd(dev, 0x000004c1, 0x00000000); | ||
2513 | nv_icmd(dev, 0x000004c2, 0x00000000); | ||
2514 | nv_icmd(dev, 0x000004c3, 0x00000000); | ||
2515 | nv_icmd(dev, 0x000004c4, 0x00000000); | ||
2516 | nv_icmd(dev, 0x000004c5, 0x00000000); | ||
2517 | nv_icmd(dev, 0x000004c6, 0x00000000); | ||
2518 | nv_icmd(dev, 0x000004c7, 0x00000000); | ||
2519 | nv_icmd(dev, 0x000004c8, 0x00000000); | ||
2520 | nv_icmd(dev, 0x000004c9, 0x00000000); | ||
2521 | nv_icmd(dev, 0x000004ca, 0x00000000); | ||
2522 | nv_icmd(dev, 0x000004cb, 0x00000000); | ||
2523 | nv_icmd(dev, 0x000004cc, 0x00000000); | ||
2524 | nv_icmd(dev, 0x000004cd, 0x00000000); | ||
2525 | nv_icmd(dev, 0x000004ce, 0x00000000); | ||
2526 | nv_icmd(dev, 0x000004cf, 0x00000000); | ||
2527 | nv_icmd(dev, 0x00000510, 0x3f800000); | ||
2528 | nv_icmd(dev, 0x00000511, 0x3f800000); | ||
2529 | nv_icmd(dev, 0x00000512, 0x3f800000); | ||
2530 | nv_icmd(dev, 0x00000513, 0x3f800000); | ||
2531 | nv_icmd(dev, 0x00000514, 0x3f800000); | ||
2532 | nv_icmd(dev, 0x00000515, 0x3f800000); | ||
2533 | nv_icmd(dev, 0x00000516, 0x3f800000); | ||
2534 | nv_icmd(dev, 0x00000517, 0x3f800000); | ||
2535 | nv_icmd(dev, 0x00000518, 0x3f800000); | ||
2536 | nv_icmd(dev, 0x00000519, 0x3f800000); | ||
2537 | nv_icmd(dev, 0x0000051a, 0x3f800000); | ||
2538 | nv_icmd(dev, 0x0000051b, 0x3f800000); | ||
2539 | nv_icmd(dev, 0x0000051c, 0x3f800000); | ||
2540 | nv_icmd(dev, 0x0000051d, 0x3f800000); | ||
2541 | nv_icmd(dev, 0x0000051e, 0x3f800000); | ||
2542 | nv_icmd(dev, 0x0000051f, 0x3f800000); | ||
2543 | nv_icmd(dev, 0x00000520, 0x000002b6); | ||
2544 | nv_icmd(dev, 0x00000529, 0x00000001); | ||
2545 | nv_icmd(dev, 0x00000530, 0xffff0000); | ||
2546 | nv_icmd(dev, 0x00000531, 0xffff0000); | ||
2547 | nv_icmd(dev, 0x00000532, 0xffff0000); | ||
2548 | nv_icmd(dev, 0x00000533, 0xffff0000); | ||
2549 | nv_icmd(dev, 0x00000534, 0xffff0000); | ||
2550 | nv_icmd(dev, 0x00000535, 0xffff0000); | ||
2551 | nv_icmd(dev, 0x00000536, 0xffff0000); | ||
2552 | nv_icmd(dev, 0x00000537, 0xffff0000); | ||
2553 | nv_icmd(dev, 0x00000538, 0xffff0000); | ||
2554 | nv_icmd(dev, 0x00000539, 0xffff0000); | ||
2555 | nv_icmd(dev, 0x0000053a, 0xffff0000); | ||
2556 | nv_icmd(dev, 0x0000053b, 0xffff0000); | ||
2557 | nv_icmd(dev, 0x0000053c, 0xffff0000); | ||
2558 | nv_icmd(dev, 0x0000053d, 0xffff0000); | ||
2559 | nv_icmd(dev, 0x0000053e, 0xffff0000); | ||
2560 | nv_icmd(dev, 0x0000053f, 0xffff0000); | ||
2561 | nv_icmd(dev, 0x00000585, 0x0000003f); | ||
2562 | nv_icmd(dev, 0x00000576, 0x00000003); | ||
2563 | if (dev_priv->chipset == 0xc1) | ||
2564 | nv_icmd(dev, 0x0000057b, 0x00000059); | ||
2565 | nv_icmd(dev, 0x00000586, 0x00000040); | ||
2566 | nv_icmd(dev, 0x00000582, 0x00000080); | ||
2567 | nv_icmd(dev, 0x00000583, 0x00000080); | ||
2568 | nv_icmd(dev, 0x000005c2, 0x00000001); | ||
2569 | nv_icmd(dev, 0x00000638, 0x00000001); | ||
2570 | nv_icmd(dev, 0x00000639, 0x00000001); | ||
2571 | nv_icmd(dev, 0x0000063a, 0x00000002); | ||
2572 | nv_icmd(dev, 0x0000063b, 0x00000001); | ||
2573 | nv_icmd(dev, 0x0000063c, 0x00000001); | ||
2574 | nv_icmd(dev, 0x0000063d, 0x00000002); | ||
2575 | nv_icmd(dev, 0x0000063e, 0x00000001); | ||
2576 | nv_icmd(dev, 0x000008b8, 0x00000001); | ||
2577 | nv_icmd(dev, 0x000008b9, 0x00000001); | ||
2578 | nv_icmd(dev, 0x000008ba, 0x00000001); | ||
2579 | nv_icmd(dev, 0x000008bb, 0x00000001); | ||
2580 | nv_icmd(dev, 0x000008bc, 0x00000001); | ||
2581 | nv_icmd(dev, 0x000008bd, 0x00000001); | ||
2582 | nv_icmd(dev, 0x000008be, 0x00000001); | ||
2583 | nv_icmd(dev, 0x000008bf, 0x00000001); | ||
2584 | nv_icmd(dev, 0x00000900, 0x00000001); | ||
2585 | nv_icmd(dev, 0x00000901, 0x00000001); | ||
2586 | nv_icmd(dev, 0x00000902, 0x00000001); | ||
2587 | nv_icmd(dev, 0x00000903, 0x00000001); | ||
2588 | nv_icmd(dev, 0x00000904, 0x00000001); | ||
2589 | nv_icmd(dev, 0x00000905, 0x00000001); | ||
2590 | nv_icmd(dev, 0x00000906, 0x00000001); | ||
2591 | nv_icmd(dev, 0x00000907, 0x00000001); | ||
2592 | nv_icmd(dev, 0x00000908, 0x00000002); | ||
2593 | nv_icmd(dev, 0x00000909, 0x00000002); | ||
2594 | nv_icmd(dev, 0x0000090a, 0x00000002); | ||
2595 | nv_icmd(dev, 0x0000090b, 0x00000002); | ||
2596 | nv_icmd(dev, 0x0000090c, 0x00000002); | ||
2597 | nv_icmd(dev, 0x0000090d, 0x00000002); | ||
2598 | nv_icmd(dev, 0x0000090e, 0x00000002); | ||
2599 | nv_icmd(dev, 0x0000090f, 0x00000002); | ||
2600 | nv_icmd(dev, 0x00000910, 0x00000001); | ||
2601 | nv_icmd(dev, 0x00000911, 0x00000001); | ||
2602 | nv_icmd(dev, 0x00000912, 0x00000001); | ||
2603 | nv_icmd(dev, 0x00000913, 0x00000001); | ||
2604 | nv_icmd(dev, 0x00000914, 0x00000001); | ||
2605 | nv_icmd(dev, 0x00000915, 0x00000001); | ||
2606 | nv_icmd(dev, 0x00000916, 0x00000001); | ||
2607 | nv_icmd(dev, 0x00000917, 0x00000001); | ||
2608 | nv_icmd(dev, 0x00000918, 0x00000001); | ||
2609 | nv_icmd(dev, 0x00000919, 0x00000001); | ||
2610 | nv_icmd(dev, 0x0000091a, 0x00000001); | ||
2611 | nv_icmd(dev, 0x0000091b, 0x00000001); | ||
2612 | nv_icmd(dev, 0x0000091c, 0x00000001); | ||
2613 | nv_icmd(dev, 0x0000091d, 0x00000001); | ||
2614 | nv_icmd(dev, 0x0000091e, 0x00000001); | ||
2615 | nv_icmd(dev, 0x0000091f, 0x00000001); | ||
2616 | nv_icmd(dev, 0x00000920, 0x00000002); | ||
2617 | nv_icmd(dev, 0x00000921, 0x00000002); | ||
2618 | nv_icmd(dev, 0x00000922, 0x00000002); | ||
2619 | nv_icmd(dev, 0x00000923, 0x00000002); | ||
2620 | nv_icmd(dev, 0x00000924, 0x00000002); | ||
2621 | nv_icmd(dev, 0x00000925, 0x00000002); | ||
2622 | nv_icmd(dev, 0x00000926, 0x00000002); | ||
2623 | nv_icmd(dev, 0x00000927, 0x00000002); | ||
2624 | nv_icmd(dev, 0x00000928, 0x00000001); | ||
2625 | nv_icmd(dev, 0x00000929, 0x00000001); | ||
2626 | nv_icmd(dev, 0x0000092a, 0x00000001); | ||
2627 | nv_icmd(dev, 0x0000092b, 0x00000001); | ||
2628 | nv_icmd(dev, 0x0000092c, 0x00000001); | ||
2629 | nv_icmd(dev, 0x0000092d, 0x00000001); | ||
2630 | nv_icmd(dev, 0x0000092e, 0x00000001); | ||
2631 | nv_icmd(dev, 0x0000092f, 0x00000001); | ||
2632 | nv_icmd(dev, 0x00000648, 0x00000001); | ||
2633 | nv_icmd(dev, 0x00000649, 0x00000001); | ||
2634 | nv_icmd(dev, 0x0000064a, 0x00000001); | ||
2635 | nv_icmd(dev, 0x0000064b, 0x00000001); | ||
2636 | nv_icmd(dev, 0x0000064c, 0x00000001); | ||
2637 | nv_icmd(dev, 0x0000064d, 0x00000001); | ||
2638 | nv_icmd(dev, 0x0000064e, 0x00000001); | ||
2639 | nv_icmd(dev, 0x0000064f, 0x00000001); | ||
2640 | nv_icmd(dev, 0x00000650, 0x00000001); | ||
2641 | nv_icmd(dev, 0x00000658, 0x0000000f); | ||
2642 | nv_icmd(dev, 0x000007ff, 0x0000000a); | ||
2643 | nv_icmd(dev, 0x0000066a, 0x40000000); | ||
2644 | nv_icmd(dev, 0x0000066b, 0x10000000); | ||
2645 | nv_icmd(dev, 0x0000066c, 0xffff0000); | ||
2646 | nv_icmd(dev, 0x0000066d, 0xffff0000); | ||
2647 | nv_icmd(dev, 0x000007af, 0x00000008); | ||
2648 | nv_icmd(dev, 0x000007b0, 0x00000008); | ||
2649 | nv_icmd(dev, 0x000007f6, 0x00000001); | ||
2650 | nv_icmd(dev, 0x000006b2, 0x00000055); | ||
2651 | nv_icmd(dev, 0x000007ad, 0x00000003); | ||
2652 | nv_icmd(dev, 0x00000937, 0x00000001); | ||
2653 | nv_icmd(dev, 0x00000971, 0x00000008); | ||
2654 | nv_icmd(dev, 0x00000972, 0x00000040); | ||
2655 | nv_icmd(dev, 0x00000973, 0x0000012c); | ||
2656 | nv_icmd(dev, 0x0000097c, 0x00000040); | ||
2657 | nv_icmd(dev, 0x00000979, 0x00000003); | ||
2658 | nv_icmd(dev, 0x00000975, 0x00000020); | ||
2659 | nv_icmd(dev, 0x00000976, 0x00000001); | ||
2660 | nv_icmd(dev, 0x00000977, 0x00000020); | ||
2661 | nv_icmd(dev, 0x00000978, 0x00000001); | ||
2662 | nv_icmd(dev, 0x00000957, 0x00000003); | ||
2663 | nv_icmd(dev, 0x0000095e, 0x20164010); | ||
2664 | nv_icmd(dev, 0x0000095f, 0x00000020); | ||
2665 | nv_icmd(dev, 0x00000683, 0x00000006); | ||
2666 | nv_icmd(dev, 0x00000685, 0x003fffff); | ||
2667 | nv_icmd(dev, 0x00000687, 0x00000c48); | ||
2668 | nv_icmd(dev, 0x000006a0, 0x00000005); | ||
2669 | nv_icmd(dev, 0x00000840, 0x00300008); | ||
2670 | nv_icmd(dev, 0x00000841, 0x04000080); | ||
2671 | nv_icmd(dev, 0x00000842, 0x00300008); | ||
2672 | nv_icmd(dev, 0x00000843, 0x04000080); | ||
2673 | nv_icmd(dev, 0x00000818, 0x00000000); | ||
2674 | nv_icmd(dev, 0x00000819, 0x00000000); | ||
2675 | nv_icmd(dev, 0x0000081a, 0x00000000); | ||
2676 | nv_icmd(dev, 0x0000081b, 0x00000000); | ||
2677 | nv_icmd(dev, 0x0000081c, 0x00000000); | ||
2678 | nv_icmd(dev, 0x0000081d, 0x00000000); | ||
2679 | nv_icmd(dev, 0x0000081e, 0x00000000); | ||
2680 | nv_icmd(dev, 0x0000081f, 0x00000000); | ||
2681 | nv_icmd(dev, 0x00000848, 0x00000000); | ||
2682 | nv_icmd(dev, 0x00000849, 0x00000000); | ||
2683 | nv_icmd(dev, 0x0000084a, 0x00000000); | ||
2684 | nv_icmd(dev, 0x0000084b, 0x00000000); | ||
2685 | nv_icmd(dev, 0x0000084c, 0x00000000); | ||
2686 | nv_icmd(dev, 0x0000084d, 0x00000000); | ||
2687 | nv_icmd(dev, 0x0000084e, 0x00000000); | ||
2688 | nv_icmd(dev, 0x0000084f, 0x00000000); | ||
2689 | nv_icmd(dev, 0x00000850, 0x00000000); | ||
2690 | nv_icmd(dev, 0x00000851, 0x00000000); | ||
2691 | nv_icmd(dev, 0x00000852, 0x00000000); | ||
2692 | nv_icmd(dev, 0x00000853, 0x00000000); | ||
2693 | nv_icmd(dev, 0x00000854, 0x00000000); | ||
2694 | nv_icmd(dev, 0x00000855, 0x00000000); | ||
2695 | nv_icmd(dev, 0x00000856, 0x00000000); | ||
2696 | nv_icmd(dev, 0x00000857, 0x00000000); | ||
2697 | nv_icmd(dev, 0x00000738, 0x00000000); | ||
2698 | nv_icmd(dev, 0x000006aa, 0x00000001); | ||
2699 | nv_icmd(dev, 0x000006ab, 0x00000002); | ||
2700 | nv_icmd(dev, 0x000006ac, 0x00000080); | ||
2701 | nv_icmd(dev, 0x000006ad, 0x00000100); | ||
2702 | nv_icmd(dev, 0x000006ae, 0x00000100); | ||
2703 | nv_icmd(dev, 0x000006b1, 0x00000011); | ||
2704 | nv_icmd(dev, 0x000006bb, 0x000000cf); | ||
2705 | nv_icmd(dev, 0x000006ce, 0x2a712488); | ||
2706 | nv_icmd(dev, 0x00000739, 0x4085c000); | ||
2707 | nv_icmd(dev, 0x0000073a, 0x00000080); | ||
2708 | nv_icmd(dev, 0x00000786, 0x80000100); | ||
2709 | nv_icmd(dev, 0x0000073c, 0x00010100); | ||
2710 | nv_icmd(dev, 0x0000073d, 0x02800000); | ||
2711 | nv_icmd(dev, 0x00000787, 0x000000cf); | ||
2712 | nv_icmd(dev, 0x0000078c, 0x00000008); | ||
2713 | nv_icmd(dev, 0x00000792, 0x00000001); | ||
2714 | nv_icmd(dev, 0x00000794, 0x00000001); | ||
2715 | nv_icmd(dev, 0x00000795, 0x00000001); | ||
2716 | nv_icmd(dev, 0x00000796, 0x00000001); | ||
2717 | nv_icmd(dev, 0x00000797, 0x000000cf); | ||
2718 | nv_icmd(dev, 0x00000836, 0x00000001); | ||
2719 | nv_icmd(dev, 0x0000079a, 0x00000002); | ||
2720 | nv_icmd(dev, 0x00000833, 0x04444480); | ||
2721 | nv_icmd(dev, 0x000007a1, 0x00000001); | ||
2722 | nv_icmd(dev, 0x000007a3, 0x00000001); | ||
2723 | nv_icmd(dev, 0x000007a4, 0x00000001); | ||
2724 | nv_icmd(dev, 0x000007a5, 0x00000001); | ||
2725 | nv_icmd(dev, 0x00000831, 0x00000004); | ||
2726 | nv_icmd(dev, 0x0000080c, 0x00000002); | ||
2727 | nv_icmd(dev, 0x0000080d, 0x00000100); | ||
2728 | nv_icmd(dev, 0x0000080e, 0x00000100); | ||
2729 | nv_icmd(dev, 0x0000080f, 0x00000001); | ||
2730 | nv_icmd(dev, 0x00000823, 0x00000002); | ||
2731 | nv_icmd(dev, 0x00000824, 0x00000100); | ||
2732 | nv_icmd(dev, 0x00000825, 0x00000100); | ||
2733 | nv_icmd(dev, 0x00000826, 0x00000001); | ||
2734 | nv_icmd(dev, 0x0000095d, 0x00000001); | ||
2735 | nv_icmd(dev, 0x0000082b, 0x00000004); | ||
2736 | nv_icmd(dev, 0x00000942, 0x00010001); | ||
2737 | nv_icmd(dev, 0x00000943, 0x00000001); | ||
2738 | nv_icmd(dev, 0x00000944, 0x00000022); | ||
2739 | nv_icmd(dev, 0x000007c5, 0x00010001); | ||
2740 | nv_icmd(dev, 0x00000834, 0x00000001); | ||
2741 | nv_icmd(dev, 0x000007c7, 0x00000001); | ||
2742 | nv_icmd(dev, 0x0000c1b0, 0x0000000f); | ||
2743 | nv_icmd(dev, 0x0000c1b1, 0x0000000f); | ||
2744 | nv_icmd(dev, 0x0000c1b2, 0x0000000f); | ||
2745 | nv_icmd(dev, 0x0000c1b3, 0x0000000f); | ||
2746 | nv_icmd(dev, 0x0000c1b4, 0x0000000f); | ||
2747 | nv_icmd(dev, 0x0000c1b5, 0x0000000f); | ||
2748 | nv_icmd(dev, 0x0000c1b6, 0x0000000f); | ||
2749 | nv_icmd(dev, 0x0000c1b7, 0x0000000f); | ||
2750 | nv_icmd(dev, 0x0000c1b8, 0x0fac6881); | ||
2751 | nv_icmd(dev, 0x0000c1b9, 0x00fac688); | ||
2752 | nv_icmd(dev, 0x0001e100, 0x00000001); | ||
2753 | nv_icmd(dev, 0x00001000, 0x00000002); | ||
2754 | nv_icmd(dev, 0x000006aa, 0x00000001); | ||
2755 | nv_icmd(dev, 0x000006ad, 0x00000100); | ||
2756 | nv_icmd(dev, 0x000006ae, 0x00000100); | ||
2757 | nv_icmd(dev, 0x000006b1, 0x00000011); | ||
2758 | nv_icmd(dev, 0x0000078c, 0x00000008); | ||
2759 | nv_icmd(dev, 0x00000792, 0x00000001); | ||
2760 | nv_icmd(dev, 0x00000794, 0x00000001); | ||
2761 | nv_icmd(dev, 0x00000795, 0x00000001); | ||
2762 | nv_icmd(dev, 0x00000796, 0x00000001); | ||
2763 | nv_icmd(dev, 0x00000797, 0x000000cf); | ||
2764 | nv_icmd(dev, 0x0000079a, 0x00000002); | ||
2765 | nv_icmd(dev, 0x00000833, 0x04444480); | ||
2766 | nv_icmd(dev, 0x000007a1, 0x00000001); | ||
2767 | nv_icmd(dev, 0x000007a3, 0x00000001); | ||
2768 | nv_icmd(dev, 0x000007a4, 0x00000001); | ||
2769 | nv_icmd(dev, 0x000007a5, 0x00000001); | ||
2770 | nv_icmd(dev, 0x00000831, 0x00000004); | ||
2771 | nv_icmd(dev, 0x0001e100, 0x00000001); | ||
2772 | nv_icmd(dev, 0x00001000, 0x00000014); | ||
2773 | nv_icmd(dev, 0x00000351, 0x00000100); | ||
2774 | nv_icmd(dev, 0x00000957, 0x00000003); | ||
2775 | nv_icmd(dev, 0x0000095d, 0x00000001); | ||
2776 | nv_icmd(dev, 0x0000082b, 0x00000004); | ||
2777 | nv_icmd(dev, 0x00000942, 0x00010001); | ||
2778 | nv_icmd(dev, 0x00000943, 0x00000001); | ||
2779 | nv_icmd(dev, 0x000007c5, 0x00010001); | ||
2780 | nv_icmd(dev, 0x00000834, 0x00000001); | ||
2781 | nv_icmd(dev, 0x000007c7, 0x00000001); | ||
2782 | nv_icmd(dev, 0x0001e100, 0x00000001); | ||
2783 | nv_icmd(dev, 0x00001000, 0x00000001); | ||
2784 | nv_icmd(dev, 0x0000080c, 0x00000002); | ||
2785 | nv_icmd(dev, 0x0000080d, 0x00000100); | ||
2786 | nv_icmd(dev, 0x0000080e, 0x00000100); | ||
2787 | nv_icmd(dev, 0x0000080f, 0x00000001); | ||
2788 | nv_icmd(dev, 0x00000823, 0x00000002); | ||
2789 | nv_icmd(dev, 0x00000824, 0x00000100); | ||
2790 | nv_icmd(dev, 0x00000825, 0x00000100); | ||
2791 | nv_icmd(dev, 0x00000826, 0x00000001); | ||
2792 | nv_icmd(dev, 0x0001e100, 0x00000001); | ||
2793 | nv_wr32(dev, 0x400208, 0x00000000); | ||
2794 | nv_wr32(dev, 0x404154, 0x00000400); | ||
2795 | |||
2796 | nvc0_grctx_generate_9097(dev); | ||
2797 | if (fermi >= 0x9197) | ||
2798 | nvc0_grctx_generate_9197(dev); | ||
2799 | if (fermi >= 0x9297) | ||
2800 | nvc0_grctx_generate_9297(dev); | ||
2801 | nvc0_grctx_generate_902d(dev); | ||
2802 | nvc0_grctx_generate_9039(dev); | ||
2803 | nvc0_grctx_generate_90c0(dev); | ||
2804 | |||
2805 | nv_wr32(dev, 0x000260, r000260); | ||
2806 | return 0; | ||
2807 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc new file mode 100644 index 00000000000..0ec2add72a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc | |||
@@ -0,0 +1,474 @@ | |||
1 | /* fuc microcode for nvc0 PGRAPH/GPC | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | /* To build: | ||
27 | * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h | ||
28 | */ | ||
29 | |||
30 | /* TODO | ||
31 | * - bracket certain functions with scratch writes, useful for debugging | ||
32 | * - watchdog timer around ctx operations | ||
33 | */ | ||
34 | |||
35 | .section nvc0_grgpc_data | ||
36 | include(`nvc0_graph.fuc') | ||
37 | gpc_id: .b32 0 | ||
38 | gpc_mmio_list_head: .b32 0 | ||
39 | gpc_mmio_list_tail: .b32 0 | ||
40 | |||
41 | tpc_count: .b32 0 | ||
42 | tpc_mask: .b32 0 | ||
43 | tpc_mmio_list_head: .b32 0 | ||
44 | tpc_mmio_list_tail: .b32 0 | ||
45 | |||
46 | cmd_queue: queue_init | ||
47 | |||
48 | // chipset descriptions | ||
49 | chipsets: | ||
50 | .b8 0xc0 0 0 0 | ||
51 | .b16 nvc0_gpc_mmio_head | ||
52 | .b16 nvc0_gpc_mmio_tail | ||
53 | .b16 nvc0_tpc_mmio_head | ||
54 | .b16 nvc0_tpc_mmio_tail | ||
55 | .b8 0xc1 0 0 0 | ||
56 | .b16 nvc0_gpc_mmio_head | ||
57 | .b16 nvc1_gpc_mmio_tail | ||
58 | .b16 nvc0_tpc_mmio_head | ||
59 | .b16 nvc1_tpc_mmio_tail | ||
60 | .b8 0xc3 0 0 0 | ||
61 | .b16 nvc0_gpc_mmio_head | ||
62 | .b16 nvc0_gpc_mmio_tail | ||
63 | .b16 nvc0_tpc_mmio_head | ||
64 | .b16 nvc3_tpc_mmio_tail | ||
65 | .b8 0xc4 0 0 0 | ||
66 | .b16 nvc0_gpc_mmio_head | ||
67 | .b16 nvc0_gpc_mmio_tail | ||
68 | .b16 nvc0_tpc_mmio_head | ||
69 | .b16 nvc3_tpc_mmio_tail | ||
70 | .b8 0xc8 0 0 0 | ||
71 | .b16 nvc0_gpc_mmio_head | ||
72 | .b16 nvc0_gpc_mmio_tail | ||
73 | .b16 nvc0_tpc_mmio_head | ||
74 | .b16 nvc0_tpc_mmio_tail | ||
75 | .b8 0xce 0 0 0 | ||
76 | .b16 nvc0_gpc_mmio_head | ||
77 | .b16 nvc0_gpc_mmio_tail | ||
78 | .b16 nvc0_tpc_mmio_head | ||
79 | .b16 nvc3_tpc_mmio_tail | ||
80 | .b8 0 0 0 0 | ||
81 | |||
82 | // GPC mmio lists | ||
83 | nvc0_gpc_mmio_head: | ||
84 | mmctx_data(0x000380, 1) | ||
85 | mmctx_data(0x000400, 6) | ||
86 | mmctx_data(0x000450, 9) | ||
87 | mmctx_data(0x000600, 1) | ||
88 | mmctx_data(0x000684, 1) | ||
89 | mmctx_data(0x000700, 5) | ||
90 | mmctx_data(0x000800, 1) | ||
91 | mmctx_data(0x000808, 3) | ||
92 | mmctx_data(0x000828, 1) | ||
93 | mmctx_data(0x000830, 1) | ||
94 | mmctx_data(0x0008d8, 1) | ||
95 | mmctx_data(0x0008e0, 1) | ||
96 | mmctx_data(0x0008e8, 6) | ||
97 | mmctx_data(0x00091c, 1) | ||
98 | mmctx_data(0x000924, 3) | ||
99 | mmctx_data(0x000b00, 1) | ||
100 | mmctx_data(0x000b08, 6) | ||
101 | mmctx_data(0x000bb8, 1) | ||
102 | mmctx_data(0x000c08, 1) | ||
103 | mmctx_data(0x000c10, 8) | ||
104 | mmctx_data(0x000c80, 1) | ||
105 | mmctx_data(0x000c8c, 1) | ||
106 | mmctx_data(0x001000, 3) | ||
107 | mmctx_data(0x001014, 1) | ||
108 | nvc0_gpc_mmio_tail: | ||
109 | mmctx_data(0x000c6c, 1); | ||
110 | nvc1_gpc_mmio_tail: | ||
111 | |||
112 | // TPC mmio lists | ||
113 | nvc0_tpc_mmio_head: | ||
114 | mmctx_data(0x000018, 1) | ||
115 | mmctx_data(0x00003c, 1) | ||
116 | mmctx_data(0x000048, 1) | ||
117 | mmctx_data(0x000064, 1) | ||
118 | mmctx_data(0x000088, 1) | ||
119 | mmctx_data(0x000200, 6) | ||
120 | mmctx_data(0x00021c, 2) | ||
121 | mmctx_data(0x000300, 6) | ||
122 | mmctx_data(0x0003d0, 1) | ||
123 | mmctx_data(0x0003e0, 2) | ||
124 | mmctx_data(0x000400, 3) | ||
125 | mmctx_data(0x000420, 1) | ||
126 | mmctx_data(0x0004b0, 1) | ||
127 | mmctx_data(0x0004e8, 1) | ||
128 | mmctx_data(0x0004f4, 1) | ||
129 | mmctx_data(0x000520, 2) | ||
130 | mmctx_data(0x000604, 4) | ||
131 | mmctx_data(0x000644, 20) | ||
132 | mmctx_data(0x000698, 1) | ||
133 | mmctx_data(0x000750, 2) | ||
134 | nvc0_tpc_mmio_tail: | ||
135 | mmctx_data(0x000758, 1) | ||
136 | mmctx_data(0x0002c4, 1) | ||
137 | mmctx_data(0x0004bc, 1) | ||
138 | mmctx_data(0x0006e0, 1) | ||
139 | nvc3_tpc_mmio_tail: | ||
140 | mmctx_data(0x000544, 1) | ||
141 | nvc1_tpc_mmio_tail: | ||
142 | |||
143 | |||
144 | .section nvc0_grgpc_code | ||
145 | bra init | ||
146 | define(`include_code') | ||
147 | include(`nvc0_graph.fuc') | ||
148 | |||
149 | // reports an exception to the host | ||
150 | // | ||
151 | // In: $r15 error code (see nvc0_graph.fuc) | ||
152 | // | ||
153 | error: | ||
154 | push $r14 | ||
155 | mov $r14 -0x67ec // 0x9814 | ||
156 | sethi $r14 0x400000 | ||
157 | call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code | ||
158 | add b32 $r14 0x41c | ||
159 | mov $r15 1 | ||
160 | call nv_wr32 // HUB_CTXCTL_INTR_UP_SET | ||
161 | pop $r14 | ||
162 | ret | ||
163 | |||
164 | // GPC fuc initialisation, executed by triggering ucode start, will | ||
165 | // fall through to main loop after completion. | ||
166 | // | ||
167 | // Input: | ||
168 | // CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) | ||
169 | // CC_SCRATCH[1]: context base | ||
170 | // | ||
171 | // Output: | ||
172 | // CC_SCRATCH[0]: | ||
173 | // 31:31: set to signal completion | ||
174 | // CC_SCRATCH[1]: | ||
175 | // 31:0: GPC context size | ||
176 | // | ||
177 | init: | ||
178 | clear b32 $r0 | ||
179 | mov $sp $r0 | ||
180 | |||
181 | // enable fifo access | ||
182 | mov $r1 0x1200 | ||
183 | mov $r2 2 | ||
184 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
185 | |||
186 | // setup i0 handler, and route all interrupts to it | ||
187 | mov $r1 ih | ||
188 | mov $iv0 $r1 | ||
189 | mov $r1 0x400 | ||
190 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
191 | |||
192 | // enable fifo interrupt | ||
193 | mov $r2 4 | ||
194 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | ||
195 | |||
196 | // enable interrupts | ||
197 | bset $flags ie0 | ||
198 | |||
199 | // figure out which GPC we are, and how many TPCs we have | ||
200 | mov $r1 0x608 | ||
201 | shl b32 $r1 6 | ||
202 | iord $r2 I[$r1 + 0x000] // UNITS | ||
203 | mov $r3 1 | ||
204 | and $r2 0x1f | ||
205 | shl b32 $r3 $r2 | ||
206 | sub b32 $r3 1 | ||
207 | st b32 D[$r0 + tpc_count] $r2 | ||
208 | st b32 D[$r0 + tpc_mask] $r3 | ||
209 | add b32 $r1 0x400 | ||
210 | iord $r2 I[$r1 + 0x000] // MYINDEX | ||
211 | st b32 D[$r0 + gpc_id] $r2 | ||
212 | |||
213 | // find context data for this chipset | ||
214 | mov $r2 0x800 | ||
215 | shl b32 $r2 6 | ||
216 | iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] | ||
217 | mov $r1 chipsets - 12 | ||
218 | init_find_chipset: | ||
219 | add b32 $r1 12 | ||
220 | ld b32 $r3 D[$r1 + 0x00] | ||
221 | cmpu b32 $r3 $r2 | ||
222 | bra e init_context | ||
223 | cmpu b32 $r3 0 | ||
224 | bra ne init_find_chipset | ||
225 | // unknown chipset | ||
226 | ret | ||
227 | |||
228 | // initialise context base, and size tracking | ||
229 | init_context: | ||
230 | mov $r2 0x800 | ||
231 | shl b32 $r2 6 | ||
232 | iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base | ||
233 | clear b32 $r3 // track GPC context size here | ||
234 | |||
235 | // set mmctx base addresses now so we don't have to do it later, | ||
236 | // they don't currently ever change | ||
237 | mov $r4 0x700 | ||
238 | shl b32 $r4 6 | ||
239 | shr b32 $r5 $r2 8 | ||
240 | iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE | ||
241 | iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE | ||
242 | |||
243 | // calculate GPC mmio context size, store the chipset-specific | ||
244 | // mmio list pointers somewhere we can get at them later without | ||
245 | // re-parsing the chipset list | ||
246 | clear b32 $r14 | ||
247 | clear b32 $r15 | ||
248 | ld b16 $r14 D[$r1 + 4] | ||
249 | ld b16 $r15 D[$r1 + 6] | ||
250 | st b16 D[$r0 + gpc_mmio_list_head] $r14 | ||
251 | st b16 D[$r0 + gpc_mmio_list_tail] $r15 | ||
252 | call mmctx_size | ||
253 | add b32 $r2 $r15 | ||
254 | add b32 $r3 $r15 | ||
255 | |||
256 | // calculate per-TPC mmio context size, store the list pointers | ||
257 | ld b16 $r14 D[$r1 + 8] | ||
258 | ld b16 $r15 D[$r1 + 10] | ||
259 | st b16 D[$r0 + tpc_mmio_list_head] $r14 | ||
260 | st b16 D[$r0 + tpc_mmio_list_tail] $r15 | ||
261 | call mmctx_size | ||
262 | ld b32 $r14 D[$r0 + tpc_count] | ||
263 | mulu $r14 $r15 | ||
264 | add b32 $r2 $r14 | ||
265 | add b32 $r3 $r14 | ||
266 | |||
267 | // round up base/size to 256 byte boundary (for strand SWBASE) | ||
268 | add b32 $r4 0x1300 | ||
269 | shr b32 $r3 2 | ||
270 | iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? | ||
271 | shr b32 $r2 8 | ||
272 | shr b32 $r3 6 | ||
273 | add b32 $r2 1 | ||
274 | add b32 $r3 1 | ||
275 | shl b32 $r2 8 | ||
276 | shl b32 $r3 8 | ||
277 | |||
278 | // calculate size of strand context data | ||
279 | mov b32 $r15 $r2 | ||
280 | call strand_ctx_init | ||
281 | add b32 $r3 $r15 | ||
282 | |||
283 | // save context size, and tell HUB we're done | ||
284 | mov $r1 0x800 | ||
285 | shl b32 $r1 6 | ||
286 | iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size | ||
287 | add b32 $r1 0x800 | ||
288 | clear b32 $r2 | ||
289 | bset $r2 31 | ||
290 | iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 | ||
291 | |||
292 | // Main program loop, very simple, sleeps until woken up by the interrupt | ||
293 | // handler, pulls a command from the queue and executes its handler | ||
294 | // | ||
295 | main: | ||
296 | bset $flags $p0 | ||
297 | sleep $p0 | ||
298 | mov $r13 cmd_queue | ||
299 | call queue_get | ||
300 | bra $p1 main | ||
301 | |||
302 | // 0x0000-0x0003 are all context transfers | ||
303 | cmpu b32 $r14 0x04 | ||
304 | bra nc main_not_ctx_xfer | ||
305 | // fetch $flags and mask off $p1/$p2 | ||
306 | mov $r1 $flags | ||
307 | mov $r2 0x0006 | ||
308 | not b32 $r2 | ||
309 | and $r1 $r2 | ||
310 | // set $p1/$p2 according to transfer type | ||
311 | shl b32 $r14 1 | ||
312 | or $r1 $r14 | ||
313 | mov $flags $r1 | ||
314 | // transfer context data | ||
315 | call ctx_xfer | ||
316 | bra main | ||
317 | |||
318 | main_not_ctx_xfer: | ||
319 | shl b32 $r15 $r14 16 | ||
320 | or $r15 E_BAD_COMMAND | ||
321 | call error | ||
322 | bra main | ||
323 | |||
324 | // interrupt handler | ||
325 | ih: | ||
326 | push $r8 | ||
327 | mov $r8 $flags | ||
328 | push $r8 | ||
329 | push $r9 | ||
330 | push $r10 | ||
331 | push $r11 | ||
332 | push $r13 | ||
333 | push $r14 | ||
334 | push $r15 | ||
335 | |||
336 | // incoming fifo command? | ||
337 | iord $r10 I[$r0 + 0x200] // INTR | ||
338 | and $r11 $r10 0x00000004 | ||
339 | bra e ih_no_fifo | ||
340 | // queue incoming fifo command for later processing | ||
341 | mov $r11 0x1900 | ||
342 | mov $r13 cmd_queue | ||
343 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | ||
344 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | ||
345 | call queue_put | ||
346 | add b32 $r11 0x400 | ||
347 | mov $r14 1 | ||
348 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | ||
349 | |||
350 | // ack, and wake up main() | ||
351 | ih_no_fifo: | ||
352 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | ||
353 | |||
354 | pop $r15 | ||
355 | pop $r14 | ||
356 | pop $r13 | ||
357 | pop $r11 | ||
358 | pop $r10 | ||
359 | pop $r9 | ||
360 | pop $r8 | ||
361 | mov $flags $r8 | ||
362 | pop $r8 | ||
363 | bclr $flags $p0 | ||
364 | iret | ||
365 | |||
366 | // Set this GPC's bit in HUB_BAR, used to signal completion of various | ||
367 | // activities to the HUB fuc | ||
368 | // | ||
369 | hub_barrier_done: | ||
370 | mov $r15 1 | ||
371 | ld b32 $r14 D[$r0 + gpc_id] | ||
372 | shl b32 $r15 $r14 | ||
373 | mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET | ||
374 | sethi $r14 0x400000 | ||
375 | call nv_wr32 | ||
376 | ret | ||
377 | |||
378 | // Disables various things, waits a bit, and re-enables them.. | ||
379 | // | ||
380 | // Not sure how exactly this helps, perhaps "ENABLE" is not such a | ||
381 | // good description for the bits we turn off? Anyways, without this, | ||
382 | // funny things happen. | ||
383 | // | ||
384 | ctx_redswitch: | ||
385 | mov $r14 0x614 | ||
386 | shl b32 $r14 6 | ||
387 | mov $r15 0x020 | ||
388 | iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER | ||
389 | mov $r15 8 | ||
390 | ctx_redswitch_delay: | ||
391 | sub b32 $r15 1 | ||
392 | bra ne ctx_redswitch_delay | ||
393 | mov $r15 0xa20 | ||
394 | iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER | ||
395 | ret | ||
396 | |||
397 | // Transfer GPC context data between GPU and storage area | ||
398 | // | ||
399 | // In: $r15 context base address | ||
400 | // $p1 clear on save, set on load | ||
401 | // $p2 set if opposite direction done/will be done, so: | ||
402 | // on save it means: "a load will follow this save" | ||
403 | // on load it means: "a save preceeded this load" | ||
404 | // | ||
405 | ctx_xfer: | ||
406 | // set context base address | ||
407 | mov $r1 0xa04 | ||
408 | shl b32 $r1 6 | ||
409 | iowr I[$r1 + 0x000] $r15// MEM_BASE | ||
410 | bra not $p1 ctx_xfer_not_load | ||
411 | call ctx_redswitch | ||
412 | ctx_xfer_not_load: | ||
413 | |||
414 | // strands | ||
415 | mov $r1 0x4afc | ||
416 | sethi $r1 0x20000 | ||
417 | mov $r2 0xc | ||
418 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | ||
419 | call strand_wait | ||
420 | mov $r2 0x47fc | ||
421 | sethi $r2 0x20000 | ||
422 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
423 | xbit $r2 $flags $p1 | ||
424 | add b32 $r2 3 | ||
425 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
426 | |||
427 | // mmio context | ||
428 | xbit $r10 $flags $p1 // direction | ||
429 | or $r10 2 // first | ||
430 | mov $r11 0x0000 | ||
431 | sethi $r11 0x500000 | ||
432 | ld b32 $r12 D[$r0 + gpc_id] | ||
433 | shl b32 $r12 15 | ||
434 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn | ||
435 | ld b32 $r12 D[$r0 + gpc_mmio_list_head] | ||
436 | ld b32 $r13 D[$r0 + gpc_mmio_list_tail] | ||
437 | mov $r14 0 // not multi | ||
438 | call mmctx_xfer | ||
439 | |||
440 | // per-TPC mmio context | ||
441 | xbit $r10 $flags $p1 // direction | ||
442 | or $r10 4 // last | ||
443 | mov $r11 0x4000 | ||
444 | sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 | ||
445 | ld b32 $r12 D[$r0 + gpc_id] | ||
446 | shl b32 $r12 15 | ||
447 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 | ||
448 | ld b32 $r12 D[$r0 + tpc_mmio_list_head] | ||
449 | ld b32 $r13 D[$r0 + tpc_mmio_list_tail] | ||
450 | ld b32 $r15 D[$r0 + tpc_mask] | ||
451 | mov $r14 0x800 // stride = 0x800 | ||
452 | call mmctx_xfer | ||
453 | |||
454 | // wait for strands to finish | ||
455 | call strand_wait | ||
456 | |||
457 | // if load, or a save without a load following, do some | ||
458 | // unknown stuff that's done after finishing a block of | ||
459 | // strand commands | ||
460 | bra $p1 ctx_xfer_post | ||
461 | bra not $p2 ctx_xfer_done | ||
462 | ctx_xfer_post: | ||
463 | mov $r1 0x4afc | ||
464 | sethi $r1 0x20000 | ||
465 | mov $r2 0xd | ||
466 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d | ||
467 | call strand_wait | ||
468 | |||
469 | // mark completion in HUB's barrier | ||
470 | ctx_xfer_done: | ||
471 | call hub_barrier_done | ||
472 | ret | ||
473 | |||
474 | .align 256 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h new file mode 100644 index 00000000000..1896c898f5b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h | |||
@@ -0,0 +1,483 @@ | |||
1 | uint32_t nvc0_grgpc_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x00000000, | ||
26 | 0x00000000, | ||
27 | 0x000000c0, | ||
28 | 0x011000b0, | ||
29 | 0x01640114, | ||
30 | 0x000000c1, | ||
31 | 0x011400b0, | ||
32 | 0x01780114, | ||
33 | 0x000000c3, | ||
34 | 0x011000b0, | ||
35 | 0x01740114, | ||
36 | 0x000000c4, | ||
37 | 0x011000b0, | ||
38 | 0x01740114, | ||
39 | 0x000000c8, | ||
40 | 0x011000b0, | ||
41 | 0x01640114, | ||
42 | 0x000000ce, | ||
43 | 0x011000b0, | ||
44 | 0x01740114, | ||
45 | 0x00000000, | ||
46 | 0x00000380, | ||
47 | 0x14000400, | ||
48 | 0x20000450, | ||
49 | 0x00000600, | ||
50 | 0x00000684, | ||
51 | 0x10000700, | ||
52 | 0x00000800, | ||
53 | 0x08000808, | ||
54 | 0x00000828, | ||
55 | 0x00000830, | ||
56 | 0x000008d8, | ||
57 | 0x000008e0, | ||
58 | 0x140008e8, | ||
59 | 0x0000091c, | ||
60 | 0x08000924, | ||
61 | 0x00000b00, | ||
62 | 0x14000b08, | ||
63 | 0x00000bb8, | ||
64 | 0x00000c08, | ||
65 | 0x1c000c10, | ||
66 | 0x00000c80, | ||
67 | 0x00000c8c, | ||
68 | 0x08001000, | ||
69 | 0x00001014, | ||
70 | 0x00000c6c, | ||
71 | 0x00000018, | ||
72 | 0x0000003c, | ||
73 | 0x00000048, | ||
74 | 0x00000064, | ||
75 | 0x00000088, | ||
76 | 0x14000200, | ||
77 | 0x0400021c, | ||
78 | 0x14000300, | ||
79 | 0x000003d0, | ||
80 | 0x040003e0, | ||
81 | 0x08000400, | ||
82 | 0x00000420, | ||
83 | 0x000004b0, | ||
84 | 0x000004e8, | ||
85 | 0x000004f4, | ||
86 | 0x04000520, | ||
87 | 0x0c000604, | ||
88 | 0x4c000644, | ||
89 | 0x00000698, | ||
90 | 0x04000750, | ||
91 | 0x00000758, | ||
92 | 0x000002c4, | ||
93 | 0x000004bc, | ||
94 | 0x000006e0, | ||
95 | 0x00000544, | ||
96 | }; | ||
97 | |||
98 | uint32_t nvc0_grgpc_code[] = { | ||
99 | 0x03060ef5, | ||
100 | 0x9800d898, | ||
101 | 0x86f001d9, | ||
102 | 0x0489b808, | ||
103 | 0xf00c1bf4, | ||
104 | 0x21f502f7, | ||
105 | 0x00f802ec, | ||
106 | 0xb60798c4, | ||
107 | 0x8dbb0384, | ||
108 | 0x0880b600, | ||
109 | 0x80008e80, | ||
110 | 0x90b6018f, | ||
111 | 0x0f94f001, | ||
112 | 0xf801d980, | ||
113 | 0x0131f400, | ||
114 | 0x9800d898, | ||
115 | 0x89b801d9, | ||
116 | 0x210bf404, | ||
117 | 0xb60789c4, | ||
118 | 0x9dbb0394, | ||
119 | 0x0890b600, | ||
120 | 0x98009e98, | ||
121 | 0x80b6019f, | ||
122 | 0x0f84f001, | ||
123 | 0xf400d880, | ||
124 | 0x00f80132, | ||
125 | 0x0728b7f1, | ||
126 | 0xb906b4b6, | ||
127 | 0xc9f002ec, | ||
128 | 0x00bcd01f, | ||
129 | 0xc800bccf, | ||
130 | 0x1bf41fcc, | ||
131 | 0x06a7f0fa, | ||
132 | 0x010321f5, | ||
133 | 0xf840bfcf, | ||
134 | 0x28b7f100, | ||
135 | 0x06b4b607, | ||
136 | 0xb980bfd0, | ||
137 | 0xc9f002ec, | ||
138 | 0x1ec9f01f, | ||
139 | 0xcf00bcd0, | ||
140 | 0xccc800bc, | ||
141 | 0xfa1bf41f, | ||
142 | 0x87f100f8, | ||
143 | 0x84b60430, | ||
144 | 0x1ff9f006, | ||
145 | 0xf8008fd0, | ||
146 | 0x3087f100, | ||
147 | 0x0684b604, | ||
148 | 0xf80080d0, | ||
149 | 0x3c87f100, | ||
150 | 0x0684b608, | ||
151 | 0x99f094bd, | ||
152 | 0x0089d000, | ||
153 | 0x081887f1, | ||
154 | 0xd00684b6, | ||
155 | 0x87f1008a, | ||
156 | 0x84b60400, | ||
157 | 0x0088cf06, | ||
158 | 0xf4888aff, | ||
159 | 0x87f1f31b, | ||
160 | 0x84b6085c, | ||
161 | 0xf094bd06, | ||
162 | 0x89d00099, | ||
163 | 0xf100f800, | ||
164 | 0xb6083c87, | ||
165 | 0x94bd0684, | ||
166 | 0xd00099f0, | ||
167 | 0x87f10089, | ||
168 | 0x84b60818, | ||
169 | 0x008ad006, | ||
170 | 0x040087f1, | ||
171 | 0xcf0684b6, | ||
172 | 0x8aff0088, | ||
173 | 0xf30bf488, | ||
174 | 0x085c87f1, | ||
175 | 0xbd0684b6, | ||
176 | 0x0099f094, | ||
177 | 0xf80089d0, | ||
178 | 0x9894bd00, | ||
179 | 0x85b600e8, | ||
180 | 0x0180b61a, | ||
181 | 0xbb0284b6, | ||
182 | 0xe0b60098, | ||
183 | 0x04efb804, | ||
184 | 0xb9eb1bf4, | ||
185 | 0x00f8029f, | ||
186 | 0x083c87f1, | ||
187 | 0xbd0684b6, | ||
188 | 0x0199f094, | ||
189 | 0xf10089d0, | ||
190 | 0xb6071087, | ||
191 | 0x94bd0684, | ||
192 | 0xf405bbfd, | ||
193 | 0x8bd0090b, | ||
194 | 0x0099f000, | ||
195 | 0xf405eefd, | ||
196 | 0x8ed00c0b, | ||
197 | 0xc08fd080, | ||
198 | 0xb70199f0, | ||
199 | 0xc8010080, | ||
200 | 0xb4b600ab, | ||
201 | 0x0cb9f010, | ||
202 | 0xb601aec8, | ||
203 | 0xbefd11e4, | ||
204 | 0x008bd005, | ||
205 | 0xf0008ecf, | ||
206 | 0x0bf41fe4, | ||
207 | 0x00ce98fa, | ||
208 | 0xd005e9fd, | ||
209 | 0xc0b6c08e, | ||
210 | 0x04cdb804, | ||
211 | 0xc8e81bf4, | ||
212 | 0x1bf402ab, | ||
213 | 0x008bcf18, | ||
214 | 0xb01fb4f0, | ||
215 | 0x1bf410b4, | ||
216 | 0x02a7f0f7, | ||
217 | 0xf4c921f4, | ||
218 | 0xabc81b0e, | ||
219 | 0x10b4b600, | ||
220 | 0xf00cb9f0, | ||
221 | 0x8bd012b9, | ||
222 | 0x008bcf00, | ||
223 | 0xf412bbc8, | ||
224 | 0x87f1fa1b, | ||
225 | 0x84b6085c, | ||
226 | 0xf094bd06, | ||
227 | 0x89d00199, | ||
228 | 0xf900f800, | ||
229 | 0x02a7f0a0, | ||
230 | 0xfcc921f4, | ||
231 | 0xf100f8a0, | ||
232 | 0xf04afc87, | ||
233 | 0x97f00283, | ||
234 | 0x0089d00c, | ||
235 | 0x020721f5, | ||
236 | 0x87f100f8, | ||
237 | 0x83f04afc, | ||
238 | 0x0d97f002, | ||
239 | 0xf50089d0, | ||
240 | 0xf8020721, | ||
241 | 0xfca7f100, | ||
242 | 0x02a3f04f, | ||
243 | 0x0500aba2, | ||
244 | 0xd00fc7f0, | ||
245 | 0xc7f000ac, | ||
246 | 0x00bcd00b, | ||
247 | 0x020721f5, | ||
248 | 0xf000aed0, | ||
249 | 0xbcd00ac7, | ||
250 | 0x0721f500, | ||
251 | 0xf100f802, | ||
252 | 0xb6083c87, | ||
253 | 0x94bd0684, | ||
254 | 0xd00399f0, | ||
255 | 0x21f50089, | ||
256 | 0xe7f00213, | ||
257 | 0x3921f503, | ||
258 | 0xfca7f102, | ||
259 | 0x02a3f046, | ||
260 | 0x0400aba0, | ||
261 | 0xf040a0d0, | ||
262 | 0xbcd001c7, | ||
263 | 0x0721f500, | ||
264 | 0x010c9202, | ||
265 | 0xf000acd0, | ||
266 | 0xbcd002c7, | ||
267 | 0x0721f500, | ||
268 | 0x2621f502, | ||
269 | 0x8087f102, | ||
270 | 0x0684b608, | ||
271 | 0xb70089cf, | ||
272 | 0x95220080, | ||
273 | 0x8ed008fe, | ||
274 | 0x408ed000, | ||
275 | 0xb6808acf, | ||
276 | 0xa0b606a5, | ||
277 | 0x00eabb01, | ||
278 | 0xb60480b6, | ||
279 | 0x1bf40192, | ||
280 | 0x08e4b6e8, | ||
281 | 0xf1f2efbc, | ||
282 | 0xb6085c87, | ||
283 | 0x94bd0684, | ||
284 | 0xd00399f0, | ||
285 | 0x00f80089, | ||
286 | 0xe7f1e0f9, | ||
287 | 0xe3f09814, | ||
288 | 0x8d21f440, | ||
289 | 0x041ce0b7, | ||
290 | 0xf401f7f0, | ||
291 | 0xe0fc8d21, | ||
292 | 0x04bd00f8, | ||
293 | 0xf10004fe, | ||
294 | 0xf0120017, | ||
295 | 0x12d00227, | ||
296 | 0x3e17f100, | ||
297 | 0x0010fe04, | ||
298 | 0x040017f1, | ||
299 | 0xf0c010d0, | ||
300 | 0x12d00427, | ||
301 | 0x1031f400, | ||
302 | 0x060817f1, | ||
303 | 0xcf0614b6, | ||
304 | 0x37f00012, | ||
305 | 0x1f24f001, | ||
306 | 0xb60432bb, | ||
307 | 0x02800132, | ||
308 | 0x04038003, | ||
309 | 0x040010b7, | ||
310 | 0x800012cf, | ||
311 | 0x27f10002, | ||
312 | 0x24b60800, | ||
313 | 0x0022cf06, | ||
314 | 0xb65817f0, | ||
315 | 0x13980c10, | ||
316 | 0x0432b800, | ||
317 | 0xb00b0bf4, | ||
318 | 0x1bf40034, | ||
319 | 0xf100f8f1, | ||
320 | 0xb6080027, | ||
321 | 0x22cf0624, | ||
322 | 0xf134bd40, | ||
323 | 0xb6070047, | ||
324 | 0x25950644, | ||
325 | 0x0045d008, | ||
326 | 0xbd4045d0, | ||
327 | 0x58f4bde4, | ||
328 | 0x1f58021e, | ||
329 | 0x020e4003, | ||
330 | 0xf5040f40, | ||
331 | 0xbb013d21, | ||
332 | 0x3fbb002f, | ||
333 | 0x041e5800, | ||
334 | 0x40051f58, | ||
335 | 0x0f400a0e, | ||
336 | 0x3d21f50c, | ||
337 | 0x030e9801, | ||
338 | 0xbb00effd, | ||
339 | 0x3ebb002e, | ||
340 | 0x0040b700, | ||
341 | 0x0235b613, | ||
342 | 0xb60043d0, | ||
343 | 0x35b60825, | ||
344 | 0x0120b606, | ||
345 | 0xb60130b6, | ||
346 | 0x34b60824, | ||
347 | 0x022fb908, | ||
348 | 0x026321f5, | ||
349 | 0xf1003fbb, | ||
350 | 0xb6080017, | ||
351 | 0x13d00614, | ||
352 | 0x0010b740, | ||
353 | 0xf024bd08, | ||
354 | 0x12d01f29, | ||
355 | 0x0031f400, | ||
356 | 0xf00028f4, | ||
357 | 0x21f41cd7, | ||
358 | 0xf401f439, | ||
359 | 0xf404e4b0, | ||
360 | 0x81fe1e18, | ||
361 | 0x0627f001, | ||
362 | 0x12fd20bd, | ||
363 | 0x01e4b604, | ||
364 | 0xfe051efd, | ||
365 | 0x21f50018, | ||
366 | 0x0ef404c3, | ||
367 | 0x10ef94d3, | ||
368 | 0xf501f5f0, | ||
369 | 0xf402ec21, | ||
370 | 0x80f9c60e, | ||
371 | 0xf90188fe, | ||
372 | 0xf990f980, | ||
373 | 0xf9b0f9a0, | ||
374 | 0xf9e0f9d0, | ||
375 | 0x800acff0, | ||
376 | 0xf404abc4, | ||
377 | 0xb7f11d0b, | ||
378 | 0xd7f01900, | ||
379 | 0x40becf1c, | ||
380 | 0xf400bfcf, | ||
381 | 0xb0b70421, | ||
382 | 0xe7f00400, | ||
383 | 0x00bed001, | ||
384 | 0xfc400ad0, | ||
385 | 0xfce0fcf0, | ||
386 | 0xfcb0fcd0, | ||
387 | 0xfc90fca0, | ||
388 | 0x0088fe80, | ||
389 | 0x32f480fc, | ||
390 | 0xf001f800, | ||
391 | 0x0e9801f7, | ||
392 | 0x04febb00, | ||
393 | 0x9418e7f1, | ||
394 | 0xf440e3f0, | ||
395 | 0x00f88d21, | ||
396 | 0x0614e7f1, | ||
397 | 0xf006e4b6, | ||
398 | 0xefd020f7, | ||
399 | 0x08f7f000, | ||
400 | 0xf401f2b6, | ||
401 | 0xf7f1fd1b, | ||
402 | 0xefd00a20, | ||
403 | 0xf100f800, | ||
404 | 0xb60a0417, | ||
405 | 0x1fd00614, | ||
406 | 0x0711f400, | ||
407 | 0x04a421f5, | ||
408 | 0x4afc17f1, | ||
409 | 0xf00213f0, | ||
410 | 0x12d00c27, | ||
411 | 0x0721f500, | ||
412 | 0xfc27f102, | ||
413 | 0x0223f047, | ||
414 | 0xf00020d0, | ||
415 | 0x20b6012c, | ||
416 | 0x0012d003, | ||
417 | 0xf001acf0, | ||
418 | 0xb7f002a5, | ||
419 | 0x50b3f000, | ||
420 | 0xb6000c98, | ||
421 | 0xbcbb0fc4, | ||
422 | 0x010c9800, | ||
423 | 0xf0020d98, | ||
424 | 0x21f500e7, | ||
425 | 0xacf0015c, | ||
426 | 0x04a5f001, | ||
427 | 0x4000b7f1, | ||
428 | 0x9850b3f0, | ||
429 | 0xc4b6000c, | ||
430 | 0x00bcbb0f, | ||
431 | 0x98050c98, | ||
432 | 0x0f98060d, | ||
433 | 0x00e7f104, | ||
434 | 0x5c21f508, | ||
435 | 0x0721f501, | ||
436 | 0x0601f402, | ||
437 | 0xf11412f4, | ||
438 | 0xf04afc17, | ||
439 | 0x27f00213, | ||
440 | 0x0012d00d, | ||
441 | 0x020721f5, | ||
442 | 0x048f21f5, | ||
443 | 0x000000f8, | ||
444 | 0x00000000, | ||
445 | 0x00000000, | ||
446 | 0x00000000, | ||
447 | 0x00000000, | ||
448 | 0x00000000, | ||
449 | 0x00000000, | ||
450 | 0x00000000, | ||
451 | 0x00000000, | ||
452 | 0x00000000, | ||
453 | 0x00000000, | ||
454 | 0x00000000, | ||
455 | 0x00000000, | ||
456 | 0x00000000, | ||
457 | 0x00000000, | ||
458 | 0x00000000, | ||
459 | 0x00000000, | ||
460 | 0x00000000, | ||
461 | 0x00000000, | ||
462 | 0x00000000, | ||
463 | 0x00000000, | ||
464 | 0x00000000, | ||
465 | 0x00000000, | ||
466 | 0x00000000, | ||
467 | 0x00000000, | ||
468 | 0x00000000, | ||
469 | 0x00000000, | ||
470 | 0x00000000, | ||
471 | 0x00000000, | ||
472 | 0x00000000, | ||
473 | 0x00000000, | ||
474 | 0x00000000, | ||
475 | 0x00000000, | ||
476 | 0x00000000, | ||
477 | 0x00000000, | ||
478 | 0x00000000, | ||
479 | 0x00000000, | ||
480 | 0x00000000, | ||
481 | 0x00000000, | ||
482 | 0x00000000, | ||
483 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc new file mode 100644 index 00000000000..a1a599124cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc | |||
@@ -0,0 +1,808 @@ | |||
1 | /* fuc microcode for nvc0 PGRAPH/HUB | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | /* To build: | ||
27 | * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h | ||
28 | */ | ||
29 | |||
30 | .section nvc0_grhub_data | ||
31 | include(`nvc0_graph.fuc') | ||
32 | gpc_count: .b32 0 | ||
33 | rop_count: .b32 0 | ||
34 | cmd_queue: queue_init | ||
35 | hub_mmio_list_head: .b32 0 | ||
36 | hub_mmio_list_tail: .b32 0 | ||
37 | |||
38 | ctx_current: .b32 0 | ||
39 | |||
40 | chipsets: | ||
41 | .b8 0xc0 0 0 0 | ||
42 | .b16 nvc0_hub_mmio_head | ||
43 | .b16 nvc0_hub_mmio_tail | ||
44 | .b8 0xc1 0 0 0 | ||
45 | .b16 nvc0_hub_mmio_head | ||
46 | .b16 nvc1_hub_mmio_tail | ||
47 | .b8 0xc3 0 0 0 | ||
48 | .b16 nvc0_hub_mmio_head | ||
49 | .b16 nvc0_hub_mmio_tail | ||
50 | .b8 0xc4 0 0 0 | ||
51 | .b16 nvc0_hub_mmio_head | ||
52 | .b16 nvc0_hub_mmio_tail | ||
53 | .b8 0xc8 0 0 0 | ||
54 | .b16 nvc0_hub_mmio_head | ||
55 | .b16 nvc0_hub_mmio_tail | ||
56 | .b8 0xce 0 0 0 | ||
57 | .b16 nvc0_hub_mmio_head | ||
58 | .b16 nvc0_hub_mmio_tail | ||
59 | .b8 0 0 0 0 | ||
60 | |||
61 | nvc0_hub_mmio_head: | ||
62 | mmctx_data(0x17e91c, 2) | ||
63 | mmctx_data(0x400204, 2) | ||
64 | mmctx_data(0x404004, 11) | ||
65 | mmctx_data(0x404044, 1) | ||
66 | mmctx_data(0x404094, 14) | ||
67 | mmctx_data(0x4040d0, 7) | ||
68 | mmctx_data(0x4040f8, 1) | ||
69 | mmctx_data(0x404130, 3) | ||
70 | mmctx_data(0x404150, 3) | ||
71 | mmctx_data(0x404164, 2) | ||
72 | mmctx_data(0x404174, 3) | ||
73 | mmctx_data(0x404200, 8) | ||
74 | mmctx_data(0x404404, 14) | ||
75 | mmctx_data(0x404460, 4) | ||
76 | mmctx_data(0x404480, 1) | ||
77 | mmctx_data(0x404498, 1) | ||
78 | mmctx_data(0x404604, 4) | ||
79 | mmctx_data(0x404618, 32) | ||
80 | mmctx_data(0x404698, 21) | ||
81 | mmctx_data(0x4046f0, 2) | ||
82 | mmctx_data(0x404700, 22) | ||
83 | mmctx_data(0x405800, 1) | ||
84 | mmctx_data(0x405830, 3) | ||
85 | mmctx_data(0x405854, 1) | ||
86 | mmctx_data(0x405870, 4) | ||
87 | mmctx_data(0x405a00, 2) | ||
88 | mmctx_data(0x405a18, 1) | ||
89 | mmctx_data(0x406020, 1) | ||
90 | mmctx_data(0x406028, 4) | ||
91 | mmctx_data(0x4064a8, 2) | ||
92 | mmctx_data(0x4064b4, 2) | ||
93 | mmctx_data(0x407804, 1) | ||
94 | mmctx_data(0x40780c, 6) | ||
95 | mmctx_data(0x4078bc, 1) | ||
96 | mmctx_data(0x408000, 7) | ||
97 | mmctx_data(0x408064, 1) | ||
98 | mmctx_data(0x408800, 3) | ||
99 | mmctx_data(0x408900, 4) | ||
100 | mmctx_data(0x408980, 1) | ||
101 | nvc0_hub_mmio_tail: | ||
102 | mmctx_data(0x4064c0, 2) | ||
103 | nvc1_hub_mmio_tail: | ||
104 | |||
105 | .align 256 | ||
106 | chan_data: | ||
107 | chan_mmio_count: .b32 0 | ||
108 | chan_mmio_address: .b32 0 | ||
109 | |||
110 | .align 256 | ||
111 | xfer_data: .b32 0 | ||
112 | |||
113 | .section nvc0_grhub_code | ||
114 | bra init | ||
115 | define(`include_code') | ||
116 | include(`nvc0_graph.fuc') | ||
117 | |||
118 | // reports an exception to the host | ||
119 | // | ||
120 | // In: $r15 error code (see nvc0_graph.fuc) | ||
121 | // | ||
122 | error: | ||
123 | push $r14 | ||
124 | mov $r14 0x814 | ||
125 | shl b32 $r14 6 | ||
126 | iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code | ||
127 | mov $r14 0xc1c | ||
128 | shl b32 $r14 6 | ||
129 | mov $r15 1 | ||
130 | iowr I[$r14 + 0x000] $r15 // INTR_UP_SET | ||
131 | pop $r14 | ||
132 | ret | ||
133 | |||
134 | // HUB fuc initialisation, executed by triggering ucode start, will | ||
135 | // fall through to main loop after completion. | ||
136 | // | ||
137 | // Input: | ||
138 | // CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) | ||
139 | // | ||
140 | // Output: | ||
141 | // CC_SCRATCH[0]: | ||
142 | // 31:31: set to signal completion | ||
143 | // CC_SCRATCH[1]: | ||
144 | // 31:0: total PGRAPH context size | ||
145 | // | ||
146 | init: | ||
147 | clear b32 $r0 | ||
148 | mov $sp $r0 | ||
149 | mov $xdbase $r0 | ||
150 | |||
151 | // enable fifo access | ||
152 | mov $r1 0x1200 | ||
153 | mov $r2 2 | ||
154 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
155 | |||
156 | // setup i0 handler, and route all interrupts to it | ||
157 | mov $r1 ih | ||
158 | mov $iv0 $r1 | ||
159 | mov $r1 0x400 | ||
160 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
161 | |||
162 | // route HUB_CHANNEL_SWITCH to fuc interrupt 8 | ||
163 | mov $r3 0x404 | ||
164 | shl b32 $r3 6 | ||
165 | mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 | ||
166 | iowr I[$r3 + 0x000] $r2 | ||
167 | |||
168 | // not sure what these are, route them because NVIDIA does, and | ||
169 | // the IRQ handler will signal the host if we ever get one.. we | ||
170 | // may find out if/why we need to handle these if so.. | ||
171 | // | ||
172 | mov $r2 0x2004 | ||
173 | iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 | ||
174 | mov $r2 0x200b | ||
175 | iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 | ||
176 | mov $r2 0x200c | ||
177 | iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 | ||
178 | |||
179 | // enable all INTR_UP interrupts | ||
180 | mov $r2 0xc24 | ||
181 | shl b32 $r2 6 | ||
182 | not b32 $r3 $r0 | ||
183 | iowr I[$r2] $r3 | ||
184 | |||
185 | // enable fifo, ctxsw, 9, 10, 15 interrupts | ||
186 | mov $r2 -0x78fc // 0x8704 | ||
187 | sethi $r2 0 | ||
188 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | ||
189 | |||
190 | // fifo level triggered, rest edge | ||
191 | sub b32 $r1 0x100 | ||
192 | mov $r2 4 | ||
193 | iowr I[$r1] $r2 | ||
194 | |||
195 | // enable interrupts | ||
196 | bset $flags ie0 | ||
197 | |||
198 | // fetch enabled GPC/ROP counts | ||
199 | mov $r14 -0x69fc // 0x409604 | ||
200 | sethi $r14 0x400000 | ||
201 | call nv_rd32 | ||
202 | extr $r1 $r15 16:20 | ||
203 | st b32 D[$r0 + rop_count] $r1 | ||
204 | and $r15 0x1f | ||
205 | st b32 D[$r0 + gpc_count] $r15 | ||
206 | |||
207 | // set BAR_REQMASK to GPC mask | ||
208 | mov $r1 1 | ||
209 | shl b32 $r1 $r15 | ||
210 | sub b32 $r1 1 | ||
211 | mov $r2 0x40c | ||
212 | shl b32 $r2 6 | ||
213 | iowr I[$r2 + 0x000] $r1 | ||
214 | iowr I[$r2 + 0x100] $r1 | ||
215 | |||
216 | // find context data for this chipset | ||
217 | mov $r2 0x800 | ||
218 | shl b32 $r2 6 | ||
219 | iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] | ||
220 | mov $r15 chipsets - 8 | ||
221 | init_find_chipset: | ||
222 | add b32 $r15 8 | ||
223 | ld b32 $r3 D[$r15 + 0x00] | ||
224 | cmpu b32 $r3 $r2 | ||
225 | bra e init_context | ||
226 | cmpu b32 $r3 0 | ||
227 | bra ne init_find_chipset | ||
228 | // unknown chipset | ||
229 | ret | ||
230 | |||
231 | // context size calculation, reserve first 256 bytes for use by fuc | ||
232 | init_context: | ||
233 | mov $r1 256 | ||
234 | |||
235 | // calculate size of mmio context data | ||
236 | ld b16 $r14 D[$r15 + 4] | ||
237 | ld b16 $r15 D[$r15 + 6] | ||
238 | sethi $r14 0 | ||
239 | st b32 D[$r0 + hub_mmio_list_head] $r14 | ||
240 | st b32 D[$r0 + hub_mmio_list_tail] $r15 | ||
241 | call mmctx_size | ||
242 | |||
243 | // set mmctx base addresses now so we don't have to do it later, | ||
244 | // they don't (currently) ever change | ||
245 | mov $r3 0x700 | ||
246 | shl b32 $r3 6 | ||
247 | shr b32 $r4 $r1 8 | ||
248 | iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE | ||
249 | iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE | ||
250 | add b32 $r3 0x1300 | ||
251 | add b32 $r1 $r15 | ||
252 | shr b32 $r15 2 | ||
253 | iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? | ||
254 | |||
255 | // strands, base offset needs to be aligned to 256 bytes | ||
256 | shr b32 $r1 8 | ||
257 | add b32 $r1 1 | ||
258 | shl b32 $r1 8 | ||
259 | mov b32 $r15 $r1 | ||
260 | call strand_ctx_init | ||
261 | add b32 $r1 $r15 | ||
262 | |||
263 | // initialise each GPC in sequence by passing in the offset of its | ||
264 | // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which | ||
265 | // has previously been uploaded by the host) running. | ||
266 | // | ||
267 | // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 | ||
268 | // when it has completed, and return the size of its context data | ||
269 | // in GPCn_CC_SCRATCH[1] | ||
270 | // | ||
271 | ld b32 $r3 D[$r0 + gpc_count] | ||
272 | mov $r4 0x2000 | ||
273 | sethi $r4 0x500000 | ||
274 | init_gpc: | ||
275 | // setup, and start GPC ucode running | ||
276 | add b32 $r14 $r4 0x804 | ||
277 | mov b32 $r15 $r1 | ||
278 | call nv_wr32 // CC_SCRATCH[1] = ctx offset | ||
279 | add b32 $r14 $r4 0x800 | ||
280 | mov b32 $r15 $r2 | ||
281 | call nv_wr32 // CC_SCRATCH[0] = chipset | ||
282 | add b32 $r14 $r4 0x10c | ||
283 | clear b32 $r15 | ||
284 | call nv_wr32 | ||
285 | add b32 $r14 $r4 0x104 | ||
286 | call nv_wr32 // ENTRY | ||
287 | add b32 $r14 $r4 0x100 | ||
288 | mov $r15 2 // CTRL_START_TRIGGER | ||
289 | call nv_wr32 // CTRL | ||
290 | |||
291 | // wait for it to complete, and adjust context size | ||
292 | add b32 $r14 $r4 0x800 | ||
293 | init_gpc_wait: | ||
294 | call nv_rd32 | ||
295 | xbit $r15 $r15 31 | ||
296 | bra e init_gpc_wait | ||
297 | add b32 $r14 $r4 0x804 | ||
298 | call nv_rd32 | ||
299 | add b32 $r1 $r15 | ||
300 | |||
301 | // next! | ||
302 | add b32 $r4 0x8000 | ||
303 | sub b32 $r3 1 | ||
304 | bra ne init_gpc | ||
305 | |||
306 | // save context size, and tell host we're ready | ||
307 | mov $r2 0x800 | ||
308 | shl b32 $r2 6 | ||
309 | iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size | ||
310 | add b32 $r2 0x800 | ||
311 | clear b32 $r1 | ||
312 | bset $r1 31 | ||
313 | iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000 | ||
314 | |||
315 | // Main program loop, very simple, sleeps until woken up by the interrupt | ||
316 | // handler, pulls a command from the queue and executes its handler | ||
317 | // | ||
318 | main: | ||
319 | // sleep until we have something to do | ||
320 | bset $flags $p0 | ||
321 | sleep $p0 | ||
322 | mov $r13 cmd_queue | ||
323 | call queue_get | ||
324 | bra $p1 main | ||
325 | |||
326 | // context switch, requested by GPU? | ||
327 | cmpu b32 $r14 0x4001 | ||
328 | bra ne main_not_ctx_switch | ||
329 | trace_set(T_AUTO) | ||
330 | mov $r1 0xb00 | ||
331 | shl b32 $r1 6 | ||
332 | iord $r2 I[$r1 + 0x100] // CHAN_NEXT | ||
333 | iord $r1 I[$r1 + 0x000] // CHAN_CUR | ||
334 | |||
335 | xbit $r3 $r1 31 | ||
336 | bra e chsw_no_prev | ||
337 | xbit $r3 $r2 31 | ||
338 | bra e chsw_prev_no_next | ||
339 | push $r2 | ||
340 | mov b32 $r2 $r1 | ||
341 | trace_set(T_SAVE) | ||
342 | bclr $flags $p1 | ||
343 | bset $flags $p2 | ||
344 | call ctx_xfer | ||
345 | trace_clr(T_SAVE); | ||
346 | pop $r2 | ||
347 | trace_set(T_LOAD); | ||
348 | bset $flags $p1 | ||
349 | call ctx_xfer | ||
350 | trace_clr(T_LOAD); | ||
351 | bra chsw_done | ||
352 | chsw_prev_no_next: | ||
353 | push $r2 | ||
354 | mov b32 $r2 $r1 | ||
355 | bclr $flags $p1 | ||
356 | bclr $flags $p2 | ||
357 | call ctx_xfer | ||
358 | pop $r2 | ||
359 | mov $r1 0xb00 | ||
360 | shl b32 $r1 6 | ||
361 | iowr I[$r1] $r2 | ||
362 | bra chsw_done | ||
363 | chsw_no_prev: | ||
364 | xbit $r3 $r2 31 | ||
365 | bra e chsw_done | ||
366 | bset $flags $p1 | ||
367 | bclr $flags $p2 | ||
368 | call ctx_xfer | ||
369 | |||
370 | // ack the context switch request | ||
371 | chsw_done: | ||
372 | mov $r1 0xb0c | ||
373 | shl b32 $r1 6 | ||
374 | mov $r2 1 | ||
375 | iowr I[$r1 + 0x000] $r2 // 0x409b0c | ||
376 | trace_clr(T_AUTO) | ||
377 | bra main | ||
378 | |||
379 | // request to set current channel? (*not* a context switch) | ||
380 | main_not_ctx_switch: | ||
381 | cmpu b32 $r14 0x0001 | ||
382 | bra ne main_not_ctx_chan | ||
383 | mov b32 $r2 $r15 | ||
384 | call ctx_chan | ||
385 | bra main_done | ||
386 | |||
387 | // request to store current channel context? | ||
388 | main_not_ctx_chan: | ||
389 | cmpu b32 $r14 0x0002 | ||
390 | bra ne main_not_ctx_save | ||
391 | trace_set(T_SAVE) | ||
392 | bclr $flags $p1 | ||
393 | bclr $flags $p2 | ||
394 | call ctx_xfer | ||
395 | trace_clr(T_SAVE) | ||
396 | bra main_done | ||
397 | |||
398 | main_not_ctx_save: | ||
399 | shl b32 $r15 $r14 16 | ||
400 | or $r15 E_BAD_COMMAND | ||
401 | call error | ||
402 | bra main | ||
403 | |||
404 | main_done: | ||
405 | mov $r1 0x820 | ||
406 | shl b32 $r1 6 | ||
407 | clear b32 $r2 | ||
408 | bset $r2 31 | ||
409 | iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 | ||
410 | bra main | ||
411 | |||
412 | // interrupt handler | ||
413 | ih: | ||
414 | push $r8 | ||
415 | mov $r8 $flags | ||
416 | push $r8 | ||
417 | push $r9 | ||
418 | push $r10 | ||
419 | push $r11 | ||
420 | push $r13 | ||
421 | push $r14 | ||
422 | push $r15 | ||
423 | |||
424 | // incoming fifo command? | ||
425 | iord $r10 I[$r0 + 0x200] // INTR | ||
426 | and $r11 $r10 0x00000004 | ||
427 | bra e ih_no_fifo | ||
428 | // queue incoming fifo command for later processing | ||
429 | mov $r11 0x1900 | ||
430 | mov $r13 cmd_queue | ||
431 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | ||
432 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | ||
433 | call queue_put | ||
434 | add b32 $r11 0x400 | ||
435 | mov $r14 1 | ||
436 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | ||
437 | |||
438 | // context switch request? | ||
439 | ih_no_fifo: | ||
440 | and $r11 $r10 0x00000100 | ||
441 | bra e ih_no_ctxsw | ||
442 | // enqueue a context switch for later processing | ||
443 | mov $r13 cmd_queue | ||
444 | mov $r14 0x4001 | ||
445 | call queue_put | ||
446 | |||
447 | // anything we didn't handle, bring it to the host's attention | ||
448 | ih_no_ctxsw: | ||
449 | mov $r11 0x104 | ||
450 | not b32 $r11 | ||
451 | and $r11 $r10 $r11 | ||
452 | bra e ih_no_other | ||
453 | mov $r10 0xc1c | ||
454 | shl b32 $r10 6 | ||
455 | iowr I[$r10] $r11 // INTR_UP_SET | ||
456 | |||
457 | // ack, and wake up main() | ||
458 | ih_no_other: | ||
459 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | ||
460 | |||
461 | pop $r15 | ||
462 | pop $r14 | ||
463 | pop $r13 | ||
464 | pop $r11 | ||
465 | pop $r10 | ||
466 | pop $r9 | ||
467 | pop $r8 | ||
468 | mov $flags $r8 | ||
469 | pop $r8 | ||
470 | bclr $flags $p0 | ||
471 | iret | ||
472 | |||
473 | // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done | ||
474 | ctx_4160s: | ||
475 | mov $r14 0x4160 | ||
476 | sethi $r14 0x400000 | ||
477 | mov $r15 1 | ||
478 | call nv_wr32 | ||
479 | ctx_4160s_wait: | ||
480 | call nv_rd32 | ||
481 | xbit $r15 $r15 4 | ||
482 | bra e ctx_4160s_wait | ||
483 | ret | ||
484 | |||
485 | // Without clearing again at end of xfer, some things cause PGRAPH | ||
486 | // to hang with STATUS=0x00000007 until it's cleared.. fbcon can | ||
487 | // still function with it set however... | ||
488 | ctx_4160c: | ||
489 | mov $r14 0x4160 | ||
490 | sethi $r14 0x400000 | ||
491 | clear b32 $r15 | ||
492 | call nv_wr32 | ||
493 | ret | ||
494 | |||
495 | // Again, not real sure | ||
496 | // | ||
497 | // In: $r15 value to set 0x404170 to | ||
498 | // | ||
499 | ctx_4170s: | ||
500 | mov $r14 0x4170 | ||
501 | sethi $r14 0x400000 | ||
502 | or $r15 0x10 | ||
503 | call nv_wr32 | ||
504 | ret | ||
505 | |||
506 | // Waits for a ctx_4170s() call to complete | ||
507 | // | ||
508 | ctx_4170w: | ||
509 | mov $r14 0x4170 | ||
510 | sethi $r14 0x400000 | ||
511 | call nv_rd32 | ||
512 | and $r15 0x10 | ||
513 | bra ne ctx_4170w | ||
514 | ret | ||
515 | |||
516 | // Disables various things, waits a bit, and re-enables them.. | ||
517 | // | ||
518 | // Not sure how exactly this helps, perhaps "ENABLE" is not such a | ||
519 | // good description for the bits we turn off? Anyways, without this, | ||
520 | // funny things happen. | ||
521 | // | ||
522 | ctx_redswitch: | ||
523 | mov $r14 0x614 | ||
524 | shl b32 $r14 6 | ||
525 | mov $r15 0x270 | ||
526 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL | ||
527 | mov $r15 8 | ||
528 | ctx_redswitch_delay: | ||
529 | sub b32 $r15 1 | ||
530 | bra ne ctx_redswitch_delay | ||
531 | mov $r15 0x770 | ||
532 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL | ||
533 | ret | ||
534 | |||
535 | // Not a clue what this is for, except that unless the value is 0x10, the | ||
536 | // strand context is saved (and presumably restored) incorrectly.. | ||
537 | // | ||
538 | // In: $r15 value to set to (0x00/0x10 are used) | ||
539 | // | ||
540 | ctx_86c: | ||
541 | mov $r14 0x86c | ||
542 | shl b32 $r14 6 | ||
543 | iowr I[$r14] $r15 // HUB(0x86c) = val | ||
544 | mov $r14 -0x75ec | ||
545 | sethi $r14 0x400000 | ||
546 | call nv_wr32 // ROP(0xa14) = val | ||
547 | mov $r14 -0x5794 | ||
548 | sethi $r14 0x410000 | ||
549 | call nv_wr32 // GPC(0x86c) = val | ||
550 | ret | ||
551 | |||
552 | // ctx_load - load's a channel's ctxctl data, and selects its vm | ||
553 | // | ||
554 | // In: $r2 channel address | ||
555 | // | ||
556 | ctx_load: | ||
557 | trace_set(T_CHAN) | ||
558 | |||
559 | // switch to channel, somewhat magic in parts.. | ||
560 | mov $r10 12 // DONE_UNK12 | ||
561 | call wait_donez | ||
562 | mov $r1 0xa24 | ||
563 | shl b32 $r1 6 | ||
564 | iowr I[$r1 + 0x000] $r0 // 0x409a24 | ||
565 | mov $r3 0xb00 | ||
566 | shl b32 $r3 6 | ||
567 | iowr I[$r3 + 0x100] $r2 // CHAN_NEXT | ||
568 | mov $r1 0xa0c | ||
569 | shl b32 $r1 6 | ||
570 | mov $r4 7 | ||
571 | iowr I[$r1 + 0x000] $r2 // MEM_CHAN | ||
572 | iowr I[$r1 + 0x100] $r4 // MEM_CMD | ||
573 | ctx_chan_wait_0: | ||
574 | iord $r4 I[$r1 + 0x100] | ||
575 | and $r4 0x1f | ||
576 | bra ne ctx_chan_wait_0 | ||
577 | iowr I[$r3 + 0x000] $r2 // CHAN_CUR | ||
578 | |||
579 | // load channel header, fetch PGRAPH context pointer | ||
580 | mov $xtargets $r0 | ||
581 | bclr $r2 31 | ||
582 | shl b32 $r2 4 | ||
583 | add b32 $r2 2 | ||
584 | |||
585 | trace_set(T_LCHAN) | ||
586 | mov $r1 0xa04 | ||
587 | shl b32 $r1 6 | ||
588 | iowr I[$r1 + 0x000] $r2 // MEM_BASE | ||
589 | mov $r1 0xa20 | ||
590 | shl b32 $r1 6 | ||
591 | mov $r2 0x0002 | ||
592 | sethi $r2 0x80000000 | ||
593 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram | ||
594 | mov $r1 0x10 // chan + 0x0210 | ||
595 | mov $r2 xfer_data | ||
596 | sethi $r2 0x00020000 // 16 bytes | ||
597 | xdld $r1 $r2 | ||
598 | xdwait | ||
599 | trace_clr(T_LCHAN) | ||
600 | |||
601 | // update current context | ||
602 | ld b32 $r1 D[$r0 + xfer_data + 4] | ||
603 | shl b32 $r1 24 | ||
604 | ld b32 $r2 D[$r0 + xfer_data + 0] | ||
605 | shr b32 $r2 8 | ||
606 | or $r1 $r2 | ||
607 | st b32 D[$r0 + ctx_current] $r1 | ||
608 | |||
609 | // set transfer base to start of context, and fetch context header | ||
610 | trace_set(T_LCTXH) | ||
611 | mov $r2 0xa04 | ||
612 | shl b32 $r2 6 | ||
613 | iowr I[$r2 + 0x000] $r1 // MEM_BASE | ||
614 | mov $r2 1 | ||
615 | mov $r1 0xa20 | ||
616 | shl b32 $r1 6 | ||
617 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm | ||
618 | mov $r1 chan_data | ||
619 | sethi $r1 0x00060000 // 256 bytes | ||
620 | xdld $r0 $r1 | ||
621 | xdwait | ||
622 | trace_clr(T_LCTXH) | ||
623 | |||
624 | trace_clr(T_CHAN) | ||
625 | ret | ||
626 | |||
627 | // ctx_chan - handler for HUB_SET_CHAN command, will set a channel as | ||
628 | // the active channel for ctxctl, but not actually transfer | ||
629 | // any context data. intended for use only during initial | ||
630 | // context construction. | ||
631 | // | ||
632 | // In: $r2 channel address | ||
633 | // | ||
634 | ctx_chan: | ||
635 | call ctx_4160s | ||
636 | call ctx_load | ||
637 | mov $r10 12 // DONE_UNK12 | ||
638 | call wait_donez | ||
639 | mov $r1 0xa10 | ||
640 | shl b32 $r1 6 | ||
641 | mov $r2 5 | ||
642 | iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) | ||
643 | ctx_chan_wait: | ||
644 | iord $r2 I[$r1 + 0x000] | ||
645 | or $r2 $r2 | ||
646 | bra ne ctx_chan_wait | ||
647 | call ctx_4160c | ||
648 | ret | ||
649 | |||
650 | // Execute per-context state overrides list | ||
651 | // | ||
652 | // Only executed on the first load of a channel. Might want to look into | ||
653 | // removing this and having the host directly modify the channel's context | ||
654 | // to change this state... The nouveau DRM already builds this list as | ||
655 | // it's definitely needed for NVIDIA's, so we may as well use it for now | ||
656 | // | ||
657 | // Input: $r1 mmio list length | ||
658 | // | ||
659 | ctx_mmio_exec: | ||
660 | // set transfer base to be the mmio list | ||
661 | ld b32 $r3 D[$r0 + chan_mmio_address] | ||
662 | mov $r2 0xa04 | ||
663 | shl b32 $r2 6 | ||
664 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | ||
665 | |||
666 | clear b32 $r3 | ||
667 | ctx_mmio_loop: | ||
668 | // fetch next 256 bytes of mmio list if necessary | ||
669 | and $r4 $r3 0xff | ||
670 | bra ne ctx_mmio_pull | ||
671 | mov $r5 xfer_data | ||
672 | sethi $r5 0x00060000 // 256 bytes | ||
673 | xdld $r3 $r5 | ||
674 | xdwait | ||
675 | |||
676 | // execute a single list entry | ||
677 | ctx_mmio_pull: | ||
678 | ld b32 $r14 D[$r4 + xfer_data + 0x00] | ||
679 | ld b32 $r15 D[$r4 + xfer_data + 0x04] | ||
680 | call nv_wr32 | ||
681 | |||
682 | // next! | ||
683 | add b32 $r3 8 | ||
684 | sub b32 $r1 1 | ||
685 | bra ne ctx_mmio_loop | ||
686 | |||
687 | // set transfer base back to the current context | ||
688 | ctx_mmio_done: | ||
689 | ld b32 $r3 D[$r0 + ctx_current] | ||
690 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | ||
691 | |||
692 | // disable the mmio list now, we don't need/want to execute it again | ||
693 | st b32 D[$r0 + chan_mmio_count] $r0 | ||
694 | mov $r1 chan_data | ||
695 | sethi $r1 0x00060000 // 256 bytes | ||
696 | xdst $r0 $r1 | ||
697 | xdwait | ||
698 | ret | ||
699 | |||
700 | // Transfer HUB context data between GPU and storage area | ||
701 | // | ||
702 | // In: $r2 channel address | ||
703 | // $p1 clear on save, set on load | ||
704 | // $p2 set if opposite direction done/will be done, so: | ||
705 | // on save it means: "a load will follow this save" | ||
706 | // on load it means: "a save preceeded this load" | ||
707 | // | ||
708 | ctx_xfer: | ||
709 | bra not $p1 ctx_xfer_pre | ||
710 | bra $p2 ctx_xfer_pre_load | ||
711 | ctx_xfer_pre: | ||
712 | mov $r15 0x10 | ||
713 | call ctx_86c | ||
714 | call ctx_4160s | ||
715 | bra not $p1 ctx_xfer_exec | ||
716 | |||
717 | ctx_xfer_pre_load: | ||
718 | mov $r15 2 | ||
719 | call ctx_4170s | ||
720 | call ctx_4170w | ||
721 | call ctx_redswitch | ||
722 | clear b32 $r15 | ||
723 | call ctx_4170s | ||
724 | call ctx_load | ||
725 | |||
726 | // fetch context pointer, and initiate xfer on all GPCs | ||
727 | ctx_xfer_exec: | ||
728 | ld b32 $r1 D[$r0 + ctx_current] | ||
729 | mov $r2 0x414 | ||
730 | shl b32 $r2 6 | ||
731 | iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset | ||
732 | mov $r14 -0x5b00 | ||
733 | sethi $r14 0x410000 | ||
734 | mov b32 $r15 $r1 | ||
735 | call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer | ||
736 | add b32 $r14 4 | ||
737 | xbit $r15 $flags $p1 | ||
738 | xbit $r2 $flags $p2 | ||
739 | shl b32 $r2 1 | ||
740 | or $r15 $r2 | ||
741 | call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) | ||
742 | |||
743 | // strands | ||
744 | mov $r1 0x4afc | ||
745 | sethi $r1 0x20000 | ||
746 | mov $r2 0xc | ||
747 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | ||
748 | call strand_wait | ||
749 | mov $r2 0x47fc | ||
750 | sethi $r2 0x20000 | ||
751 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
752 | xbit $r2 $flags $p1 | ||
753 | add b32 $r2 3 | ||
754 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
755 | |||
756 | // mmio context | ||
757 | xbit $r10 $flags $p1 // direction | ||
758 | or $r10 6 // first, last | ||
759 | mov $r11 0 // base = 0 | ||
760 | ld b32 $r12 D[$r0 + hub_mmio_list_head] | ||
761 | ld b32 $r13 D[$r0 + hub_mmio_list_tail] | ||
762 | mov $r14 0 // not multi | ||
763 | call mmctx_xfer | ||
764 | |||
765 | // wait for GPCs to all complete | ||
766 | mov $r10 8 // DONE_BAR | ||
767 | call wait_doneo | ||
768 | |||
769 | // wait for strand xfer to complete | ||
770 | call strand_wait | ||
771 | |||
772 | // post-op | ||
773 | bra $p1 ctx_xfer_post | ||
774 | mov $r10 12 // DONE_UNK12 | ||
775 | call wait_donez | ||
776 | mov $r1 0xa10 | ||
777 | shl b32 $r1 6 | ||
778 | mov $r2 5 | ||
779 | iowr I[$r1] $r2 // MEM_CMD | ||
780 | ctx_xfer_post_save_wait: | ||
781 | iord $r2 I[$r1] | ||
782 | or $r2 $r2 | ||
783 | bra ne ctx_xfer_post_save_wait | ||
784 | |||
785 | bra $p2 ctx_xfer_done | ||
786 | ctx_xfer_post: | ||
787 | mov $r15 2 | ||
788 | call ctx_4170s | ||
789 | clear b32 $r15 | ||
790 | call ctx_86c | ||
791 | call strand_post | ||
792 | call ctx_4170w | ||
793 | clear b32 $r15 | ||
794 | call ctx_4170s | ||
795 | |||
796 | bra not $p1 ctx_xfer_no_post_mmio | ||
797 | ld b32 $r1 D[$r0 + chan_mmio_count] | ||
798 | or $r1 $r1 | ||
799 | bra e ctx_xfer_no_post_mmio | ||
800 | call ctx_mmio_exec | ||
801 | |||
802 | ctx_xfer_no_post_mmio: | ||
803 | call ctx_4160c | ||
804 | |||
805 | ctx_xfer_done: | ||
806 | ret | ||
807 | |||
808 | .align 256 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h new file mode 100644 index 00000000000..b3b541b6d04 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h | |||
@@ -0,0 +1,838 @@ | |||
1 | uint32_t nvc0_grhub_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x000000c0, | ||
26 | 0x012c0090, | ||
27 | 0x000000c1, | ||
28 | 0x01300090, | ||
29 | 0x000000c3, | ||
30 | 0x012c0090, | ||
31 | 0x000000c4, | ||
32 | 0x012c0090, | ||
33 | 0x000000c8, | ||
34 | 0x012c0090, | ||
35 | 0x000000ce, | ||
36 | 0x012c0090, | ||
37 | 0x00000000, | ||
38 | 0x0417e91c, | ||
39 | 0x04400204, | ||
40 | 0x28404004, | ||
41 | 0x00404044, | ||
42 | 0x34404094, | ||
43 | 0x184040d0, | ||
44 | 0x004040f8, | ||
45 | 0x08404130, | ||
46 | 0x08404150, | ||
47 | 0x04404164, | ||
48 | 0x08404174, | ||
49 | 0x1c404200, | ||
50 | 0x34404404, | ||
51 | 0x0c404460, | ||
52 | 0x00404480, | ||
53 | 0x00404498, | ||
54 | 0x0c404604, | ||
55 | 0x7c404618, | ||
56 | 0x50404698, | ||
57 | 0x044046f0, | ||
58 | 0x54404700, | ||
59 | 0x00405800, | ||
60 | 0x08405830, | ||
61 | 0x00405854, | ||
62 | 0x0c405870, | ||
63 | 0x04405a00, | ||
64 | 0x00405a18, | ||
65 | 0x00406020, | ||
66 | 0x0c406028, | ||
67 | 0x044064a8, | ||
68 | 0x044064b4, | ||
69 | 0x00407804, | ||
70 | 0x1440780c, | ||
71 | 0x004078bc, | ||
72 | 0x18408000, | ||
73 | 0x00408064, | ||
74 | 0x08408800, | ||
75 | 0x0c408900, | ||
76 | 0x00408980, | ||
77 | 0x044064c0, | ||
78 | 0x00000000, | ||
79 | 0x00000000, | ||
80 | 0x00000000, | ||
81 | 0x00000000, | ||
82 | 0x00000000, | ||
83 | 0x00000000, | ||
84 | 0x00000000, | ||
85 | 0x00000000, | ||
86 | 0x00000000, | ||
87 | 0x00000000, | ||
88 | 0x00000000, | ||
89 | 0x00000000, | ||
90 | 0x00000000, | ||
91 | 0x00000000, | ||
92 | 0x00000000, | ||
93 | 0x00000000, | ||
94 | 0x00000000, | ||
95 | 0x00000000, | ||
96 | 0x00000000, | ||
97 | 0x00000000, | ||
98 | 0x00000000, | ||
99 | 0x00000000, | ||
100 | 0x00000000, | ||
101 | 0x00000000, | ||
102 | 0x00000000, | ||
103 | 0x00000000, | ||
104 | 0x00000000, | ||
105 | 0x00000000, | ||
106 | 0x00000000, | ||
107 | 0x00000000, | ||
108 | 0x00000000, | ||
109 | 0x00000000, | ||
110 | 0x00000000, | ||
111 | 0x00000000, | ||
112 | 0x00000000, | ||
113 | 0x00000000, | ||
114 | 0x00000000, | ||
115 | 0x00000000, | ||
116 | 0x00000000, | ||
117 | 0x00000000, | ||
118 | 0x00000000, | ||
119 | 0x00000000, | ||
120 | 0x00000000, | ||
121 | 0x00000000, | ||
122 | 0x00000000, | ||
123 | 0x00000000, | ||
124 | 0x00000000, | ||
125 | 0x00000000, | ||
126 | 0x00000000, | ||
127 | 0x00000000, | ||
128 | 0x00000000, | ||
129 | 0x00000000, | ||
130 | 0x00000000, | ||
131 | 0x00000000, | ||
132 | 0x00000000, | ||
133 | 0x00000000, | ||
134 | 0x00000000, | ||
135 | 0x00000000, | ||
136 | 0x00000000, | ||
137 | 0x00000000, | ||
138 | 0x00000000, | ||
139 | 0x00000000, | ||
140 | 0x00000000, | ||
141 | 0x00000000, | ||
142 | 0x00000000, | ||
143 | 0x00000000, | ||
144 | 0x00000000, | ||
145 | 0x00000000, | ||
146 | 0x00000000, | ||
147 | 0x00000000, | ||
148 | 0x00000000, | ||
149 | 0x00000000, | ||
150 | 0x00000000, | ||
151 | 0x00000000, | ||
152 | 0x00000000, | ||
153 | 0x00000000, | ||
154 | 0x00000000, | ||
155 | 0x00000000, | ||
156 | 0x00000000, | ||
157 | 0x00000000, | ||
158 | 0x00000000, | ||
159 | 0x00000000, | ||
160 | 0x00000000, | ||
161 | 0x00000000, | ||
162 | 0x00000000, | ||
163 | 0x00000000, | ||
164 | 0x00000000, | ||
165 | 0x00000000, | ||
166 | 0x00000000, | ||
167 | 0x00000000, | ||
168 | 0x00000000, | ||
169 | 0x00000000, | ||
170 | 0x00000000, | ||
171 | 0x00000000, | ||
172 | 0x00000000, | ||
173 | 0x00000000, | ||
174 | 0x00000000, | ||
175 | 0x00000000, | ||
176 | 0x00000000, | ||
177 | 0x00000000, | ||
178 | 0x00000000, | ||
179 | 0x00000000, | ||
180 | 0x00000000, | ||
181 | 0x00000000, | ||
182 | 0x00000000, | ||
183 | 0x00000000, | ||
184 | 0x00000000, | ||
185 | 0x00000000, | ||
186 | 0x00000000, | ||
187 | 0x00000000, | ||
188 | 0x00000000, | ||
189 | 0x00000000, | ||
190 | 0x00000000, | ||
191 | 0x00000000, | ||
192 | 0x00000000, | ||
193 | 0x00000000, | ||
194 | 0x00000000, | ||
195 | }; | ||
196 | |||
197 | uint32_t nvc0_grhub_code[] = { | ||
198 | 0x03090ef5, | ||
199 | 0x9800d898, | ||
200 | 0x86f001d9, | ||
201 | 0x0489b808, | ||
202 | 0xf00c1bf4, | ||
203 | 0x21f502f7, | ||
204 | 0x00f802ec, | ||
205 | 0xb60798c4, | ||
206 | 0x8dbb0384, | ||
207 | 0x0880b600, | ||
208 | 0x80008e80, | ||
209 | 0x90b6018f, | ||
210 | 0x0f94f001, | ||
211 | 0xf801d980, | ||
212 | 0x0131f400, | ||
213 | 0x9800d898, | ||
214 | 0x89b801d9, | ||
215 | 0x210bf404, | ||
216 | 0xb60789c4, | ||
217 | 0x9dbb0394, | ||
218 | 0x0890b600, | ||
219 | 0x98009e98, | ||
220 | 0x80b6019f, | ||
221 | 0x0f84f001, | ||
222 | 0xf400d880, | ||
223 | 0x00f80132, | ||
224 | 0x0728b7f1, | ||
225 | 0xb906b4b6, | ||
226 | 0xc9f002ec, | ||
227 | 0x00bcd01f, | ||
228 | 0xc800bccf, | ||
229 | 0x1bf41fcc, | ||
230 | 0x06a7f0fa, | ||
231 | 0x010321f5, | ||
232 | 0xf840bfcf, | ||
233 | 0x28b7f100, | ||
234 | 0x06b4b607, | ||
235 | 0xb980bfd0, | ||
236 | 0xc9f002ec, | ||
237 | 0x1ec9f01f, | ||
238 | 0xcf00bcd0, | ||
239 | 0xccc800bc, | ||
240 | 0xfa1bf41f, | ||
241 | 0x87f100f8, | ||
242 | 0x84b60430, | ||
243 | 0x1ff9f006, | ||
244 | 0xf8008fd0, | ||
245 | 0x3087f100, | ||
246 | 0x0684b604, | ||
247 | 0xf80080d0, | ||
248 | 0x3c87f100, | ||
249 | 0x0684b608, | ||
250 | 0x99f094bd, | ||
251 | 0x0089d000, | ||
252 | 0x081887f1, | ||
253 | 0xd00684b6, | ||
254 | 0x87f1008a, | ||
255 | 0x84b60400, | ||
256 | 0x0088cf06, | ||
257 | 0xf4888aff, | ||
258 | 0x87f1f31b, | ||
259 | 0x84b6085c, | ||
260 | 0xf094bd06, | ||
261 | 0x89d00099, | ||
262 | 0xf100f800, | ||
263 | 0xb6083c87, | ||
264 | 0x94bd0684, | ||
265 | 0xd00099f0, | ||
266 | 0x87f10089, | ||
267 | 0x84b60818, | ||
268 | 0x008ad006, | ||
269 | 0x040087f1, | ||
270 | 0xcf0684b6, | ||
271 | 0x8aff0088, | ||
272 | 0xf30bf488, | ||
273 | 0x085c87f1, | ||
274 | 0xbd0684b6, | ||
275 | 0x0099f094, | ||
276 | 0xf80089d0, | ||
277 | 0x9894bd00, | ||
278 | 0x85b600e8, | ||
279 | 0x0180b61a, | ||
280 | 0xbb0284b6, | ||
281 | 0xe0b60098, | ||
282 | 0x04efb804, | ||
283 | 0xb9eb1bf4, | ||
284 | 0x00f8029f, | ||
285 | 0x083c87f1, | ||
286 | 0xbd0684b6, | ||
287 | 0x0199f094, | ||
288 | 0xf10089d0, | ||
289 | 0xb6071087, | ||
290 | 0x94bd0684, | ||
291 | 0xf405bbfd, | ||
292 | 0x8bd0090b, | ||
293 | 0x0099f000, | ||
294 | 0xf405eefd, | ||
295 | 0x8ed00c0b, | ||
296 | 0xc08fd080, | ||
297 | 0xb70199f0, | ||
298 | 0xc8010080, | ||
299 | 0xb4b600ab, | ||
300 | 0x0cb9f010, | ||
301 | 0xb601aec8, | ||
302 | 0xbefd11e4, | ||
303 | 0x008bd005, | ||
304 | 0xf0008ecf, | ||
305 | 0x0bf41fe4, | ||
306 | 0x00ce98fa, | ||
307 | 0xd005e9fd, | ||
308 | 0xc0b6c08e, | ||
309 | 0x04cdb804, | ||
310 | 0xc8e81bf4, | ||
311 | 0x1bf402ab, | ||
312 | 0x008bcf18, | ||
313 | 0xb01fb4f0, | ||
314 | 0x1bf410b4, | ||
315 | 0x02a7f0f7, | ||
316 | 0xf4c921f4, | ||
317 | 0xabc81b0e, | ||
318 | 0x10b4b600, | ||
319 | 0xf00cb9f0, | ||
320 | 0x8bd012b9, | ||
321 | 0x008bcf00, | ||
322 | 0xf412bbc8, | ||
323 | 0x87f1fa1b, | ||
324 | 0x84b6085c, | ||
325 | 0xf094bd06, | ||
326 | 0x89d00199, | ||
327 | 0xf900f800, | ||
328 | 0x02a7f0a0, | ||
329 | 0xfcc921f4, | ||
330 | 0xf100f8a0, | ||
331 | 0xf04afc87, | ||
332 | 0x97f00283, | ||
333 | 0x0089d00c, | ||
334 | 0x020721f5, | ||
335 | 0x87f100f8, | ||
336 | 0x83f04afc, | ||
337 | 0x0d97f002, | ||
338 | 0xf50089d0, | ||
339 | 0xf8020721, | ||
340 | 0xfca7f100, | ||
341 | 0x02a3f04f, | ||
342 | 0x0500aba2, | ||
343 | 0xd00fc7f0, | ||
344 | 0xc7f000ac, | ||
345 | 0x00bcd00b, | ||
346 | 0x020721f5, | ||
347 | 0xf000aed0, | ||
348 | 0xbcd00ac7, | ||
349 | 0x0721f500, | ||
350 | 0xf100f802, | ||
351 | 0xb6083c87, | ||
352 | 0x94bd0684, | ||
353 | 0xd00399f0, | ||
354 | 0x21f50089, | ||
355 | 0xe7f00213, | ||
356 | 0x3921f503, | ||
357 | 0xfca7f102, | ||
358 | 0x02a3f046, | ||
359 | 0x0400aba0, | ||
360 | 0xf040a0d0, | ||
361 | 0xbcd001c7, | ||
362 | 0x0721f500, | ||
363 | 0x010c9202, | ||
364 | 0xf000acd0, | ||
365 | 0xbcd002c7, | ||
366 | 0x0721f500, | ||
367 | 0x2621f502, | ||
368 | 0x8087f102, | ||
369 | 0x0684b608, | ||
370 | 0xb70089cf, | ||
371 | 0x95220080, | ||
372 | 0x8ed008fe, | ||
373 | 0x408ed000, | ||
374 | 0xb6808acf, | ||
375 | 0xa0b606a5, | ||
376 | 0x00eabb01, | ||
377 | 0xb60480b6, | ||
378 | 0x1bf40192, | ||
379 | 0x08e4b6e8, | ||
380 | 0xf1f2efbc, | ||
381 | 0xb6085c87, | ||
382 | 0x94bd0684, | ||
383 | 0xd00399f0, | ||
384 | 0x00f80089, | ||
385 | 0xe7f1e0f9, | ||
386 | 0xe4b60814, | ||
387 | 0x00efd006, | ||
388 | 0x0c1ce7f1, | ||
389 | 0xf006e4b6, | ||
390 | 0xefd001f7, | ||
391 | 0xf8e0fc00, | ||
392 | 0xfe04bd00, | ||
393 | 0x07fe0004, | ||
394 | 0x0017f100, | ||
395 | 0x0227f012, | ||
396 | 0xf10012d0, | ||
397 | 0xfe05b917, | ||
398 | 0x17f10010, | ||
399 | 0x10d00400, | ||
400 | 0x0437f1c0, | ||
401 | 0x0634b604, | ||
402 | 0x200327f1, | ||
403 | 0xf10032d0, | ||
404 | 0xd0200427, | ||
405 | 0x27f10132, | ||
406 | 0x32d0200b, | ||
407 | 0x0c27f102, | ||
408 | 0x0732d020, | ||
409 | 0x0c2427f1, | ||
410 | 0xb90624b6, | ||
411 | 0x23d00003, | ||
412 | 0x0427f100, | ||
413 | 0x0023f087, | ||
414 | 0xb70012d0, | ||
415 | 0xf0010012, | ||
416 | 0x12d00427, | ||
417 | 0x1031f400, | ||
418 | 0x9604e7f1, | ||
419 | 0xf440e3f0, | ||
420 | 0xf1c76821, | ||
421 | 0x01018090, | ||
422 | 0x801ff4f0, | ||
423 | 0x17f0000f, | ||
424 | 0x041fbb01, | ||
425 | 0xf10112b6, | ||
426 | 0xb6040c27, | ||
427 | 0x21d00624, | ||
428 | 0x4021d000, | ||
429 | 0x080027f1, | ||
430 | 0xcf0624b6, | ||
431 | 0xf7f00022, | ||
432 | 0x08f0b654, | ||
433 | 0xb800f398, | ||
434 | 0x0bf40432, | ||
435 | 0x0034b00b, | ||
436 | 0xf8f11bf4, | ||
437 | 0x0017f100, | ||
438 | 0x02fe5801, | ||
439 | 0xf003ff58, | ||
440 | 0x0e8000e3, | ||
441 | 0x150f8014, | ||
442 | 0x013d21f5, | ||
443 | 0x070037f1, | ||
444 | 0x950634b6, | ||
445 | 0x34d00814, | ||
446 | 0x4034d000, | ||
447 | 0x130030b7, | ||
448 | 0xb6001fbb, | ||
449 | 0x3fd002f5, | ||
450 | 0x0815b600, | ||
451 | 0xb60110b6, | ||
452 | 0x1fb90814, | ||
453 | 0x6321f502, | ||
454 | 0x001fbb02, | ||
455 | 0xf1000398, | ||
456 | 0xf0200047, | ||
457 | 0x4ea05043, | ||
458 | 0x1fb90804, | ||
459 | 0x8d21f402, | ||
460 | 0x08004ea0, | ||
461 | 0xf4022fb9, | ||
462 | 0x4ea08d21, | ||
463 | 0xf4bd010c, | ||
464 | 0xa08d21f4, | ||
465 | 0xf401044e, | ||
466 | 0x4ea08d21, | ||
467 | 0xf7f00100, | ||
468 | 0x8d21f402, | ||
469 | 0x08004ea0, | ||
470 | 0xc86821f4, | ||
471 | 0x0bf41fff, | ||
472 | 0x044ea0fa, | ||
473 | 0x6821f408, | ||
474 | 0xb7001fbb, | ||
475 | 0xb6800040, | ||
476 | 0x1bf40132, | ||
477 | 0x0027f1b4, | ||
478 | 0x0624b608, | ||
479 | 0xb74021d0, | ||
480 | 0xbd080020, | ||
481 | 0x1f19f014, | ||
482 | 0xf40021d0, | ||
483 | 0x28f40031, | ||
484 | 0x08d7f000, | ||
485 | 0xf43921f4, | ||
486 | 0xe4b1f401, | ||
487 | 0x1bf54001, | ||
488 | 0x87f100d1, | ||
489 | 0x84b6083c, | ||
490 | 0xf094bd06, | ||
491 | 0x89d00499, | ||
492 | 0x0017f100, | ||
493 | 0x0614b60b, | ||
494 | 0xcf4012cf, | ||
495 | 0x13c80011, | ||
496 | 0x7e0bf41f, | ||
497 | 0xf41f23c8, | ||
498 | 0x20f95a0b, | ||
499 | 0xf10212b9, | ||
500 | 0xb6083c87, | ||
501 | 0x94bd0684, | ||
502 | 0xd00799f0, | ||
503 | 0x32f40089, | ||
504 | 0x0231f401, | ||
505 | 0x082921f5, | ||
506 | 0x085c87f1, | ||
507 | 0xbd0684b6, | ||
508 | 0x0799f094, | ||
509 | 0xfc0089d0, | ||
510 | 0x3c87f120, | ||
511 | 0x0684b608, | ||
512 | 0x99f094bd, | ||
513 | 0x0089d006, | ||
514 | 0xf50131f4, | ||
515 | 0xf1082921, | ||
516 | 0xb6085c87, | ||
517 | 0x94bd0684, | ||
518 | 0xd00699f0, | ||
519 | 0x0ef40089, | ||
520 | 0xb920f931, | ||
521 | 0x32f40212, | ||
522 | 0x0232f401, | ||
523 | 0x082921f5, | ||
524 | 0x17f120fc, | ||
525 | 0x14b60b00, | ||
526 | 0x0012d006, | ||
527 | 0xc8130ef4, | ||
528 | 0x0bf41f23, | ||
529 | 0x0131f40d, | ||
530 | 0xf50232f4, | ||
531 | 0xf1082921, | ||
532 | 0xb60b0c17, | ||
533 | 0x27f00614, | ||
534 | 0x0012d001, | ||
535 | 0x085c87f1, | ||
536 | 0xbd0684b6, | ||
537 | 0x0499f094, | ||
538 | 0xf50089d0, | ||
539 | 0xb0ff200e, | ||
540 | 0x1bf401e4, | ||
541 | 0x02f2b90d, | ||
542 | 0x07b521f5, | ||
543 | 0xb0420ef4, | ||
544 | 0x1bf402e4, | ||
545 | 0x3c87f12e, | ||
546 | 0x0684b608, | ||
547 | 0x99f094bd, | ||
548 | 0x0089d007, | ||
549 | 0xf40132f4, | ||
550 | 0x21f50232, | ||
551 | 0x87f10829, | ||
552 | 0x84b6085c, | ||
553 | 0xf094bd06, | ||
554 | 0x89d00799, | ||
555 | 0x110ef400, | ||
556 | 0xf010ef94, | ||
557 | 0x21f501f5, | ||
558 | 0x0ef502ec, | ||
559 | 0x17f1fed1, | ||
560 | 0x14b60820, | ||
561 | 0xf024bd06, | ||
562 | 0x12d01f29, | ||
563 | 0xbe0ef500, | ||
564 | 0xfe80f9fe, | ||
565 | 0x80f90188, | ||
566 | 0xa0f990f9, | ||
567 | 0xd0f9b0f9, | ||
568 | 0xf0f9e0f9, | ||
569 | 0xc4800acf, | ||
570 | 0x0bf404ab, | ||
571 | 0x00b7f11d, | ||
572 | 0x08d7f019, | ||
573 | 0xcf40becf, | ||
574 | 0x21f400bf, | ||
575 | 0x00b0b704, | ||
576 | 0x01e7f004, | ||
577 | 0xe400bed0, | ||
578 | 0xf40100ab, | ||
579 | 0xd7f00d0b, | ||
580 | 0x01e7f108, | ||
581 | 0x0421f440, | ||
582 | 0x0104b7f1, | ||
583 | 0xabffb0bd, | ||
584 | 0x0d0bf4b4, | ||
585 | 0x0c1ca7f1, | ||
586 | 0xd006a4b6, | ||
587 | 0x0ad000ab, | ||
588 | 0xfcf0fc40, | ||
589 | 0xfcd0fce0, | ||
590 | 0xfca0fcb0, | ||
591 | 0xfe80fc90, | ||
592 | 0x80fc0088, | ||
593 | 0xf80032f4, | ||
594 | 0x60e7f101, | ||
595 | 0x40e3f041, | ||
596 | 0xf401f7f0, | ||
597 | 0x21f48d21, | ||
598 | 0x04ffc868, | ||
599 | 0xf8fa0bf4, | ||
600 | 0x60e7f100, | ||
601 | 0x40e3f041, | ||
602 | 0x21f4f4bd, | ||
603 | 0xf100f88d, | ||
604 | 0xf04170e7, | ||
605 | 0xf5f040e3, | ||
606 | 0x8d21f410, | ||
607 | 0xe7f100f8, | ||
608 | 0xe3f04170, | ||
609 | 0x6821f440, | ||
610 | 0xf410f4f0, | ||
611 | 0x00f8f31b, | ||
612 | 0x0614e7f1, | ||
613 | 0xf106e4b6, | ||
614 | 0xd00270f7, | ||
615 | 0xf7f000ef, | ||
616 | 0x01f2b608, | ||
617 | 0xf1fd1bf4, | ||
618 | 0xd00770f7, | ||
619 | 0x00f800ef, | ||
620 | 0x086ce7f1, | ||
621 | 0xd006e4b6, | ||
622 | 0xe7f100ef, | ||
623 | 0xe3f08a14, | ||
624 | 0x8d21f440, | ||
625 | 0xa86ce7f1, | ||
626 | 0xf441e3f0, | ||
627 | 0x00f88d21, | ||
628 | 0x083c87f1, | ||
629 | 0xbd0684b6, | ||
630 | 0x0599f094, | ||
631 | 0xf00089d0, | ||
632 | 0x21f40ca7, | ||
633 | 0x2417f1c9, | ||
634 | 0x0614b60a, | ||
635 | 0xf10010d0, | ||
636 | 0xb60b0037, | ||
637 | 0x32d00634, | ||
638 | 0x0c17f140, | ||
639 | 0x0614b60a, | ||
640 | 0xd00747f0, | ||
641 | 0x14d00012, | ||
642 | 0x4014cf40, | ||
643 | 0xf41f44f0, | ||
644 | 0x32d0fa1b, | ||
645 | 0x000bfe00, | ||
646 | 0xb61f2af0, | ||
647 | 0x20b60424, | ||
648 | 0x3c87f102, | ||
649 | 0x0684b608, | ||
650 | 0x99f094bd, | ||
651 | 0x0089d008, | ||
652 | 0x0a0417f1, | ||
653 | 0xd00614b6, | ||
654 | 0x17f10012, | ||
655 | 0x14b60a20, | ||
656 | 0x0227f006, | ||
657 | 0x800023f1, | ||
658 | 0xf00012d0, | ||
659 | 0x27f11017, | ||
660 | 0x23f00300, | ||
661 | 0x0512fa02, | ||
662 | 0x87f103f8, | ||
663 | 0x84b6085c, | ||
664 | 0xf094bd06, | ||
665 | 0x89d00899, | ||
666 | 0xc1019800, | ||
667 | 0x981814b6, | ||
668 | 0x25b6c002, | ||
669 | 0x0512fd08, | ||
670 | 0xf1160180, | ||
671 | 0xb6083c87, | ||
672 | 0x94bd0684, | ||
673 | 0xd00999f0, | ||
674 | 0x27f10089, | ||
675 | 0x24b60a04, | ||
676 | 0x0021d006, | ||
677 | 0xf10127f0, | ||
678 | 0xb60a2017, | ||
679 | 0x12d00614, | ||
680 | 0x0017f100, | ||
681 | 0x0613f002, | ||
682 | 0xf80501fa, | ||
683 | 0x5c87f103, | ||
684 | 0x0684b608, | ||
685 | 0x99f094bd, | ||
686 | 0x0089d009, | ||
687 | 0x085c87f1, | ||
688 | 0xbd0684b6, | ||
689 | 0x0599f094, | ||
690 | 0xf80089d0, | ||
691 | 0x3121f500, | ||
692 | 0xb821f506, | ||
693 | 0x0ca7f006, | ||
694 | 0xf1c921f4, | ||
695 | 0xb60a1017, | ||
696 | 0x27f00614, | ||
697 | 0x0012d005, | ||
698 | 0xfd0012cf, | ||
699 | 0x1bf40522, | ||
700 | 0x4921f5fa, | ||
701 | 0x9800f806, | ||
702 | 0x27f18103, | ||
703 | 0x24b60a04, | ||
704 | 0x0023d006, | ||
705 | 0x34c434bd, | ||
706 | 0x0f1bf4ff, | ||
707 | 0x030057f1, | ||
708 | 0xfa0653f0, | ||
709 | 0x03f80535, | ||
710 | 0x98c04e98, | ||
711 | 0x21f4c14f, | ||
712 | 0x0830b68d, | ||
713 | 0xf40112b6, | ||
714 | 0x0398df1b, | ||
715 | 0x0023d016, | ||
716 | 0xf1800080, | ||
717 | 0xf0020017, | ||
718 | 0x01fa0613, | ||
719 | 0xf803f806, | ||
720 | 0x0611f400, | ||
721 | 0xf01102f4, | ||
722 | 0x21f510f7, | ||
723 | 0x21f50698, | ||
724 | 0x11f40631, | ||
725 | 0x02f7f01c, | ||
726 | 0x065721f5, | ||
727 | 0x066621f5, | ||
728 | 0x067821f5, | ||
729 | 0x21f5f4bd, | ||
730 | 0x21f50657, | ||
731 | 0x019806b8, | ||
732 | 0x1427f116, | ||
733 | 0x0624b604, | ||
734 | 0xf10020d0, | ||
735 | 0xf0a500e7, | ||
736 | 0x1fb941e3, | ||
737 | 0x8d21f402, | ||
738 | 0xf004e0b6, | ||
739 | 0x2cf001fc, | ||
740 | 0x0124b602, | ||
741 | 0xf405f2fd, | ||
742 | 0x17f18d21, | ||
743 | 0x13f04afc, | ||
744 | 0x0c27f002, | ||
745 | 0xf50012d0, | ||
746 | 0xf1020721, | ||
747 | 0xf047fc27, | ||
748 | 0x20d00223, | ||
749 | 0x012cf000, | ||
750 | 0xd00320b6, | ||
751 | 0xacf00012, | ||
752 | 0x06a5f001, | ||
753 | 0x9800b7f0, | ||
754 | 0x0d98140c, | ||
755 | 0x00e7f015, | ||
756 | 0x015c21f5, | ||
757 | 0xf508a7f0, | ||
758 | 0xf5010321, | ||
759 | 0xf4020721, | ||
760 | 0xa7f02201, | ||
761 | 0xc921f40c, | ||
762 | 0x0a1017f1, | ||
763 | 0xf00614b6, | ||
764 | 0x12d00527, | ||
765 | 0x0012cf00, | ||
766 | 0xf40522fd, | ||
767 | 0x02f4fa1b, | ||
768 | 0x02f7f032, | ||
769 | 0x065721f5, | ||
770 | 0x21f5f4bd, | ||
771 | 0x21f50698, | ||
772 | 0x21f50226, | ||
773 | 0xf4bd0666, | ||
774 | 0x065721f5, | ||
775 | 0x981011f4, | ||
776 | 0x11fd8001, | ||
777 | 0x070bf405, | ||
778 | 0x07df21f5, | ||
779 | 0x064921f5, | ||
780 | 0x000000f8, | ||
781 | 0x00000000, | ||
782 | 0x00000000, | ||
783 | 0x00000000, | ||
784 | 0x00000000, | ||
785 | 0x00000000, | ||
786 | 0x00000000, | ||
787 | 0x00000000, | ||
788 | 0x00000000, | ||
789 | 0x00000000, | ||
790 | 0x00000000, | ||
791 | 0x00000000, | ||
792 | 0x00000000, | ||
793 | 0x00000000, | ||
794 | 0x00000000, | ||
795 | 0x00000000, | ||
796 | 0x00000000, | ||
797 | 0x00000000, | ||
798 | 0x00000000, | ||
799 | 0x00000000, | ||
800 | 0x00000000, | ||
801 | 0x00000000, | ||
802 | 0x00000000, | ||
803 | 0x00000000, | ||
804 | 0x00000000, | ||
805 | 0x00000000, | ||
806 | 0x00000000, | ||
807 | 0x00000000, | ||
808 | 0x00000000, | ||
809 | 0x00000000, | ||
810 | 0x00000000, | ||
811 | 0x00000000, | ||
812 | 0x00000000, | ||
813 | 0x00000000, | ||
814 | 0x00000000, | ||
815 | 0x00000000, | ||
816 | 0x00000000, | ||
817 | 0x00000000, | ||
818 | 0x00000000, | ||
819 | 0x00000000, | ||
820 | 0x00000000, | ||
821 | 0x00000000, | ||
822 | 0x00000000, | ||
823 | 0x00000000, | ||
824 | 0x00000000, | ||
825 | 0x00000000, | ||
826 | 0x00000000, | ||
827 | 0x00000000, | ||
828 | 0x00000000, | ||
829 | 0x00000000, | ||
830 | 0x00000000, | ||
831 | 0x00000000, | ||
832 | 0x00000000, | ||
833 | 0x00000000, | ||
834 | 0x00000000, | ||
835 | 0x00000000, | ||
836 | 0x00000000, | ||
837 | 0x00000000, | ||
838 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c new file mode 100644 index 00000000000..b701c439c92 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | struct nvc0_instmem_priv { | ||
31 | struct nouveau_gpuobj *bar1_pgd; | ||
32 | struct nouveau_channel *bar1; | ||
33 | struct nouveau_gpuobj *bar3_pgd; | ||
34 | struct nouveau_channel *bar3; | ||
35 | }; | ||
36 | |||
37 | int | ||
38 | nvc0_instmem_suspend(struct drm_device *dev) | ||
39 | { | ||
40 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
41 | |||
42 | dev_priv->ramin_available = false; | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | void | ||
47 | nvc0_instmem_resume(struct drm_device *dev) | ||
48 | { | ||
49 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
50 | struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; | ||
51 | |||
52 | nv_mask(dev, 0x100c80, 0x00000001, 0x00000000); | ||
53 | nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12); | ||
54 | nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12); | ||
55 | dev_priv->ramin_available = true; | ||
56 | } | ||
57 | |||
58 | static void | ||
59 | nvc0_channel_del(struct nouveau_channel **pchan) | ||
60 | { | ||
61 | struct nouveau_channel *chan; | ||
62 | |||
63 | chan = *pchan; | ||
64 | *pchan = NULL; | ||
65 | if (!chan) | ||
66 | return; | ||
67 | |||
68 | nouveau_vm_ref(NULL, &chan->vm, NULL); | ||
69 | if (drm_mm_initialized(&chan->ramin_heap)) | ||
70 | drm_mm_takedown(&chan->ramin_heap); | ||
71 | nouveau_gpuobj_ref(NULL, &chan->ramin); | ||
72 | kfree(chan); | ||
73 | } | ||
74 | |||
75 | static int | ||
76 | nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, | ||
77 | struct nouveau_channel **pchan, | ||
78 | struct nouveau_gpuobj *pgd, u64 vm_size) | ||
79 | { | ||
80 | struct nouveau_channel *chan; | ||
81 | int ret; | ||
82 | |||
83 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | ||
84 | if (!chan) | ||
85 | return -ENOMEM; | ||
86 | chan->dev = dev; | ||
87 | |||
88 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); | ||
89 | if (ret) { | ||
90 | nvc0_channel_del(&chan); | ||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000); | ||
95 | if (ret) { | ||
96 | nvc0_channel_del(&chan); | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | ret = nouveau_vm_ref(vm, &chan->vm, NULL); | ||
101 | if (ret) { | ||
102 | nvc0_channel_del(&chan); | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst)); | ||
107 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst)); | ||
108 | nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1)); | ||
109 | nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1)); | ||
110 | |||
111 | *pchan = chan; | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | int | ||
116 | nvc0_instmem_init(struct drm_device *dev) | ||
117 | { | ||
118 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
119 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
120 | struct pci_dev *pdev = dev->pdev; | ||
121 | struct nvc0_instmem_priv *priv; | ||
122 | struct nouveau_vm *vm = NULL; | ||
123 | int ret; | ||
124 | |||
125 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
126 | if (!priv) | ||
127 | return -ENOMEM; | ||
128 | pinstmem->priv = priv; | ||
129 | |||
130 | /* BAR3 VM */ | ||
131 | ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0, | ||
132 | &dev_priv->bar3_vm); | ||
133 | if (ret) | ||
134 | goto error; | ||
135 | |||
136 | ret = nouveau_gpuobj_new(dev, NULL, | ||
137 | (pci_resource_len(pdev, 3) >> 12) * 8, 0, | ||
138 | NVOBJ_FLAG_DONT_MAP | | ||
139 | NVOBJ_FLAG_ZERO_ALLOC, | ||
140 | &dev_priv->bar3_vm->pgt[0].obj[0]); | ||
141 | if (ret) | ||
142 | goto error; | ||
143 | dev_priv->bar3_vm->pgt[0].refcount[0] = 1; | ||
144 | |||
145 | nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); | ||
146 | |||
147 | ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, | ||
148 | NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd); | ||
149 | if (ret) | ||
150 | goto error; | ||
151 | |||
152 | ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd); | ||
153 | if (ret) | ||
154 | goto error; | ||
155 | nouveau_vm_ref(NULL, &vm, NULL); | ||
156 | |||
157 | ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3, | ||
158 | priv->bar3_pgd, pci_resource_len(dev->pdev, 3)); | ||
159 | if (ret) | ||
160 | goto error; | ||
161 | |||
162 | /* BAR1 VM */ | ||
163 | ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm); | ||
164 | if (ret) | ||
165 | goto error; | ||
166 | |||
167 | ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, | ||
168 | NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd); | ||
169 | if (ret) | ||
170 | goto error; | ||
171 | |||
172 | ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd); | ||
173 | if (ret) | ||
174 | goto error; | ||
175 | nouveau_vm_ref(NULL, &vm, NULL); | ||
176 | |||
177 | ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1, | ||
178 | priv->bar1_pgd, pci_resource_len(dev->pdev, 1)); | ||
179 | if (ret) | ||
180 | goto error; | ||
181 | |||
182 | /* channel vm */ | ||
183 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, | ||
184 | &dev_priv->chan_vm); | ||
185 | if (ret) | ||
186 | goto error; | ||
187 | |||
188 | nvc0_instmem_resume(dev); | ||
189 | return 0; | ||
190 | error: | ||
191 | nvc0_instmem_takedown(dev); | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | void | ||
196 | nvc0_instmem_takedown(struct drm_device *dev) | ||
197 | { | ||
198 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
199 | struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; | ||
200 | struct nouveau_vm *vm = NULL; | ||
201 | |||
202 | nvc0_instmem_suspend(dev); | ||
203 | |||
204 | nv_wr32(dev, 0x1704, 0x00000000); | ||
205 | nv_wr32(dev, 0x1714, 0x00000000); | ||
206 | |||
207 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); | ||
208 | |||
209 | nvc0_channel_del(&priv->bar1); | ||
210 | nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); | ||
211 | nouveau_gpuobj_ref(NULL, &priv->bar1_pgd); | ||
212 | |||
213 | nvc0_channel_del(&priv->bar3); | ||
214 | nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL); | ||
215 | nouveau_vm_ref(NULL, &vm, priv->bar3_pgd); | ||
216 | nouveau_gpuobj_ref(NULL, &priv->bar3_pgd); | ||
217 | nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); | ||
218 | nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); | ||
219 | |||
220 | dev_priv->engine.instmem.priv = NULL; | ||
221 | kfree(priv); | ||
222 | } | ||
223 | |||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c new file mode 100644 index 00000000000..9e352944a35 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | |||
27 | #include "nouveau_drv.h" | ||
28 | #include "nouveau_vm.h" | ||
29 | |||
30 | void | ||
31 | nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, | ||
32 | struct nouveau_gpuobj *pgt[2]) | ||
33 | { | ||
34 | u32 pde[2] = { 0, 0 }; | ||
35 | |||
36 | if (pgt[0]) | ||
37 | pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); | ||
38 | if (pgt[1]) | ||
39 | pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); | ||
40 | |||
41 | nv_wo32(pgd, (index * 8) + 0, pde[0]); | ||
42 | nv_wo32(pgd, (index * 8) + 4, pde[1]); | ||
43 | } | ||
44 | |||
45 | static inline u64 | ||
46 | nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) | ||
47 | { | ||
48 | phys >>= 8; | ||
49 | |||
50 | phys |= 0x00000001; /* present */ | ||
51 | if (vma->access & NV_MEM_ACCESS_SYS) | ||
52 | phys |= 0x00000002; | ||
53 | |||
54 | phys |= ((u64)target << 32); | ||
55 | phys |= ((u64)memtype << 36); | ||
56 | |||
57 | return phys; | ||
58 | } | ||
59 | |||
60 | void | ||
61 | nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
62 | struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) | ||
63 | { | ||
64 | u32 next = 1 << (vma->node->type - 8); | ||
65 | |||
66 | phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); | ||
67 | pte <<= 3; | ||
68 | while (cnt--) { | ||
69 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | ||
70 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | ||
71 | phys += next; | ||
72 | pte += 8; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | void | ||
77 | nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | ||
78 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) | ||
79 | { | ||
80 | pte <<= 3; | ||
81 | while (cnt--) { | ||
82 | u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5); | ||
83 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | ||
84 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | ||
85 | pte += 8; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | void | ||
90 | nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) | ||
91 | { | ||
92 | pte <<= 3; | ||
93 | while (cnt--) { | ||
94 | nv_wo32(pgt, pte + 0, 0x00000000); | ||
95 | nv_wo32(pgt, pte + 4, 0x00000000); | ||
96 | pte += 8; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | void | ||
101 | nvc0_vm_flush(struct nouveau_vm *vm) | ||
102 | { | ||
103 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | ||
104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | ||
105 | struct drm_device *dev = vm->dev; | ||
106 | struct nouveau_vm_pgd *vpgd; | ||
107 | unsigned long flags; | ||
108 | u32 engine; | ||
109 | |||
110 | engine = 1; | ||
111 | if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) | ||
112 | engine |= 4; | ||
113 | |||
114 | pinstmem->flush(vm->dev); | ||
115 | |||
116 | spin_lock_irqsave(&dev_priv->vm_lock, flags); | ||
117 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | ||
118 | /* looks like maybe a "free flush slots" counter, the | ||
119 | * faster you write to 0x100cbc to more it decreases | ||
120 | */ | ||
121 | if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { | ||
122 | NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", | ||
123 | nv_rd32(dev, 0x100c80), engine); | ||
124 | } | ||
125 | nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); | ||
126 | nv_wr32(dev, 0x100cbc, 0x80000000 | engine); | ||
127 | /* wait for flush to be queued? */ | ||
128 | if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { | ||
129 | NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", | ||
130 | nv_rd32(dev, 0x100c80), engine); | ||
131 | } | ||
132 | } | ||
133 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); | ||
134 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c new file mode 100644 index 00000000000..e45a24d84e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | /* 0 = unsupported | ||
30 | * 1 = non-compressed | ||
31 | * 3 = compressed | ||
32 | */ | ||
33 | static const u8 types[256] = { | ||
34 | 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, | ||
35 | 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, | ||
36 | 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, | ||
37 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, | ||
38 | 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
39 | 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
40 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
41 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, | ||
42 | 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0, | ||
43 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
44 | 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, | ||
45 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, | ||
46 | 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, | ||
47 | 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, | ||
48 | 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, | ||
49 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 | ||
50 | }; | ||
51 | |||
52 | bool | ||
53 | nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
54 | { | ||
55 | u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; | ||
56 | return likely((types[memtype] == 1)); | ||
57 | } | ||
58 | |||
59 | int | ||
60 | nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, | ||
61 | u32 type, struct nouveau_mem **pmem) | ||
62 | { | ||
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
64 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; | ||
65 | struct nouveau_mm_node *r; | ||
66 | struct nouveau_mem *mem; | ||
67 | int ret; | ||
68 | |||
69 | size >>= 12; | ||
70 | align >>= 12; | ||
71 | ncmin >>= 12; | ||
72 | |||
73 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | ||
74 | if (!mem) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | INIT_LIST_HEAD(&mem->regions); | ||
78 | mem->dev = dev_priv->dev; | ||
79 | mem->memtype = (type & 0xff); | ||
80 | mem->size = size; | ||
81 | |||
82 | mutex_lock(&mm->mutex); | ||
83 | do { | ||
84 | ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r); | ||
85 | if (ret) { | ||
86 | mutex_unlock(&mm->mutex); | ||
87 | nv50_vram_del(dev, &mem); | ||
88 | return ret; | ||
89 | } | ||
90 | |||
91 | list_add_tail(&r->rl_entry, &mem->regions); | ||
92 | size -= r->length; | ||
93 | } while (size); | ||
94 | mutex_unlock(&mm->mutex); | ||
95 | |||
96 | r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); | ||
97 | mem->offset = (u64)r->offset << 12; | ||
98 | *pmem = mem; | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int | ||
103 | nvc0_vram_init(struct drm_device *dev) | ||
104 | { | ||
105 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | ||
108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | ||
109 | u32 length; | ||
110 | |||
111 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | ||
112 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | ||
113 | |||
114 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; | ||
115 | |||
116 | return nouveau_mm_init(&vram->mm, rsvd_head, length, 1); | ||
117 | } | ||
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 00000000000..9a8cbdd9836 --- /dev/null +++ b/drivers/gpu/ion/Kconfig | |||
@@ -0,0 +1,17 @@ | |||
1 | menuconfig ION | ||
2 | tristate "Ion Memory Manager" | ||
3 | select GENERIC_ALLOCATOR | ||
4 | help | ||
5 | Chose this option to enable the ION Memory Manager. | ||
6 | |||
7 | config ION_IOMMU | ||
8 | bool | ||
9 | |||
10 | config ION_TEGRA | ||
11 | tristate "Ion for Tegra" | ||
12 | depends on ARCH_TEGRA && ION | ||
13 | select TEGRA_IOMMU_SMMU if !ARCH_TEGRA_2x_SOC | ||
14 | select ION_IOMMU if TEGRA_IOMMU_SMMU | ||
15 | help | ||
16 | Choose this option if you wish to use ion on an nVidia Tegra. | ||
17 | |||
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 00000000000..4ddc78e9d41 --- /dev/null +++ b/drivers/gpu/ion/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o | ||
2 | obj-$(CONFIG_ION_IOMMU) += ion_iommu_heap.o | ||
3 | obj-$(CONFIG_ION_TEGRA) += tegra/ | ||
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 00000000000..512ebc5cc8e --- /dev/null +++ b/drivers/gpu/ion/ion.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
18 | |||
19 | #include <linux/device.h> | ||
20 | #include <linux/file.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/anon_inodes.h> | ||
23 | #include <linux/ion.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/mm_types.h> | ||
28 | #include <linux/rbtree.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | |||
35 | #include "ion_priv.h" | ||
36 | #define DEBUG | ||
37 | |||
38 | /* this function should only be called while dev->lock is held */ | ||
39 | static void ion_buffer_add(struct ion_device *dev, | ||
40 | struct ion_buffer *buffer) | ||
41 | { | ||
42 | struct rb_node **p = &dev->buffers.rb_node; | ||
43 | struct rb_node *parent = NULL; | ||
44 | struct ion_buffer *entry; | ||
45 | |||
46 | while (*p) { | ||
47 | parent = *p; | ||
48 | entry = rb_entry(parent, struct ion_buffer, node); | ||
49 | |||
50 | if (buffer < entry) { | ||
51 | p = &(*p)->rb_left; | ||
52 | } else if (buffer > entry) { | ||
53 | p = &(*p)->rb_right; | ||
54 | } else { | ||
55 | pr_err("buffer already found."); | ||
56 | BUG(); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | rb_link_node(&buffer->node, parent, p); | ||
61 | rb_insert_color(&buffer->node, &dev->buffers); | ||
62 | } | ||
63 | |||
64 | /* this function should only be called while dev->lock is held */ | ||
65 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | ||
66 | struct ion_device *dev, | ||
67 | unsigned long len, | ||
68 | unsigned long align, | ||
69 | unsigned long flags) | ||
70 | { | ||
71 | struct ion_buffer *buffer; | ||
72 | int ret; | ||
73 | |||
74 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | ||
75 | if (!buffer) | ||
76 | return ERR_PTR(-ENOMEM); | ||
77 | |||
78 | buffer->heap = heap; | ||
79 | kref_init(&buffer->ref); | ||
80 | |||
81 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | ||
82 | if (ret) { | ||
83 | kfree(buffer); | ||
84 | return ERR_PTR(ret); | ||
85 | } | ||
86 | buffer->dev = dev; | ||
87 | buffer->size = len; | ||
88 | mutex_init(&buffer->lock); | ||
89 | ion_buffer_add(dev, buffer); | ||
90 | return buffer; | ||
91 | } | ||
92 | |||
93 | static void ion_buffer_destroy(struct kref *kref) | ||
94 | { | ||
95 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | ||
96 | struct ion_device *dev = buffer->dev; | ||
97 | |||
98 | buffer->heap->ops->free(buffer); | ||
99 | mutex_lock(&dev->lock); | ||
100 | rb_erase(&buffer->node, &dev->buffers); | ||
101 | mutex_unlock(&dev->lock); | ||
102 | kfree(buffer); | ||
103 | } | ||
104 | |||
105 | void ion_buffer_get(struct ion_buffer *buffer) | ||
106 | { | ||
107 | kref_get(&buffer->ref); | ||
108 | } | ||
109 | |||
110 | static int ion_buffer_put(struct ion_buffer *buffer) | ||
111 | { | ||
112 | return kref_put(&buffer->ref, ion_buffer_destroy); | ||
113 | } | ||
114 | |||
115 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
116 | struct ion_buffer *buffer) | ||
117 | { | ||
118 | struct ion_handle *handle; | ||
119 | |||
120 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | ||
121 | if (!handle) | ||
122 | return ERR_PTR(-ENOMEM); | ||
123 | kref_init(&handle->ref); | ||
124 | rb_init_node(&handle->node); | ||
125 | handle->client = client; | ||
126 | ion_buffer_get(buffer); | ||
127 | handle->buffer = buffer; | ||
128 | |||
129 | return handle; | ||
130 | } | ||
131 | |||
132 | static void ion_handle_destroy(struct kref *kref) | ||
133 | { | ||
134 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | ||
135 | /* XXX Can a handle be destroyed while it's map count is non-zero?: | ||
136 | if (handle->map_cnt) unmap | ||
137 | */ | ||
138 | ion_buffer_put(handle->buffer); | ||
139 | mutex_lock(&handle->client->lock); | ||
140 | if (!RB_EMPTY_NODE(&handle->node)) | ||
141 | rb_erase(&handle->node, &handle->client->handles); | ||
142 | mutex_unlock(&handle->client->lock); | ||
143 | kfree(handle); | ||
144 | } | ||
145 | |||
146 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | ||
147 | { | ||
148 | return handle->buffer; | ||
149 | } | ||
150 | |||
151 | void ion_handle_get(struct ion_handle *handle) | ||
152 | { | ||
153 | kref_get(&handle->ref); | ||
154 | } | ||
155 | |||
156 | int ion_handle_put(struct ion_handle *handle) | ||
157 | { | ||
158 | return kref_put(&handle->ref, ion_handle_destroy); | ||
159 | } | ||
160 | |||
161 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | ||
162 | struct ion_buffer *buffer) | ||
163 | { | ||
164 | struct rb_node *n; | ||
165 | |||
166 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
167 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
168 | node); | ||
169 | if (handle->buffer == buffer) | ||
170 | return handle; | ||
171 | } | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | ||
176 | { | ||
177 | struct rb_node *n = client->handles.rb_node; | ||
178 | |||
179 | while (n) { | ||
180 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | ||
181 | node); | ||
182 | if (handle < handle_node) | ||
183 | n = n->rb_left; | ||
184 | else if (handle > handle_node) | ||
185 | n = n->rb_right; | ||
186 | else | ||
187 | return true; | ||
188 | } | ||
189 | WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, | ||
190 | current->group_leader->comm); | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | ||
195 | { | ||
196 | struct rb_node **p = &client->handles.rb_node; | ||
197 | struct rb_node *parent = NULL; | ||
198 | struct ion_handle *entry; | ||
199 | |||
200 | while (*p) { | ||
201 | parent = *p; | ||
202 | entry = rb_entry(parent, struct ion_handle, node); | ||
203 | |||
204 | if (handle < entry) | ||
205 | p = &(*p)->rb_left; | ||
206 | else if (handle > entry) | ||
207 | p = &(*p)->rb_right; | ||
208 | else | ||
209 | WARN(1, "%s: buffer already found.", __func__); | ||
210 | } | ||
211 | |||
212 | rb_link_node(&handle->node, parent, p); | ||
213 | rb_insert_color(&handle->node, &client->handles); | ||
214 | } | ||
215 | |||
216 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | ||
217 | size_t align, unsigned int flags) | ||
218 | { | ||
219 | struct rb_node *n; | ||
220 | struct ion_handle *handle; | ||
221 | struct ion_device *dev = client->dev; | ||
222 | struct ion_buffer *buffer = NULL; | ||
223 | |||
224 | /* | ||
225 | * traverse the list of heaps available in this system in priority | ||
226 | * order. If the heap type is supported by the client, and matches the | ||
227 | * request of the caller allocate from it. Repeat until allocate has | ||
228 | * succeeded or all heaps have been tried | ||
229 | */ | ||
230 | mutex_lock(&dev->lock); | ||
231 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | ||
232 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | ||
233 | /* if the client doesn't support this heap type */ | ||
234 | if (!((1 << heap->type) & client->heap_mask)) | ||
235 | continue; | ||
236 | /* if the caller didn't specify this heap type */ | ||
237 | if (!((1 << heap->id) & flags)) | ||
238 | continue; | ||
239 | buffer = ion_buffer_create(heap, dev, len, align, flags); | ||
240 | if (!IS_ERR_OR_NULL(buffer)) | ||
241 | break; | ||
242 | } | ||
243 | mutex_unlock(&dev->lock); | ||
244 | |||
245 | if (IS_ERR_OR_NULL(buffer)) | ||
246 | return ERR_PTR(PTR_ERR(buffer)); | ||
247 | |||
248 | handle = ion_handle_create(client, buffer); | ||
249 | |||
250 | if (IS_ERR_OR_NULL(handle)) | ||
251 | goto end; | ||
252 | |||
253 | /* | ||
254 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | ||
255 | * and ion_handle_create will take a second reference, drop one here | ||
256 | */ | ||
257 | ion_buffer_put(buffer); | ||
258 | |||
259 | mutex_lock(&client->lock); | ||
260 | ion_handle_add(client, handle); | ||
261 | mutex_unlock(&client->lock); | ||
262 | return handle; | ||
263 | |||
264 | end: | ||
265 | ion_buffer_put(buffer); | ||
266 | return handle; | ||
267 | } | ||
268 | |||
269 | void ion_free(struct ion_client *client, struct ion_handle *handle) | ||
270 | { | ||
271 | bool valid_handle; | ||
272 | |||
273 | BUG_ON(client != handle->client); | ||
274 | |||
275 | mutex_lock(&client->lock); | ||
276 | valid_handle = ion_handle_validate(client, handle); | ||
277 | mutex_unlock(&client->lock); | ||
278 | |||
279 | if (!valid_handle) { | ||
280 | WARN(1, "%s: invalid handle passed to free.\n", __func__); | ||
281 | return; | ||
282 | } | ||
283 | ion_handle_put(handle); | ||
284 | } | ||
285 | |||
286 | static bool _ion_map(int *buffer_cnt, int *handle_cnt) | ||
287 | { | ||
288 | bool map; | ||
289 | |||
290 | BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); | ||
291 | |||
292 | if (*buffer_cnt) | ||
293 | map = false; | ||
294 | else | ||
295 | map = true; | ||
296 | if (*handle_cnt == 0) | ||
297 | (*buffer_cnt)++; | ||
298 | (*handle_cnt)++; | ||
299 | return map; | ||
300 | } | ||
301 | |||
302 | static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) | ||
303 | { | ||
304 | BUG_ON(*handle_cnt == 0); | ||
305 | (*handle_cnt)--; | ||
306 | if (*handle_cnt != 0) | ||
307 | return false; | ||
308 | BUG_ON(*buffer_cnt == 0); | ||
309 | (*buffer_cnt)--; | ||
310 | if (*buffer_cnt == 0) | ||
311 | return true; | ||
312 | return false; | ||
313 | } | ||
314 | |||
315 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
316 | ion_phys_addr_t *addr, size_t *len) | ||
317 | { | ||
318 | struct ion_buffer *buffer; | ||
319 | int ret; | ||
320 | |||
321 | mutex_lock(&client->lock); | ||
322 | if (!ion_handle_validate(client, handle)) { | ||
323 | mutex_unlock(&client->lock); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | |||
327 | buffer = handle->buffer; | ||
328 | |||
329 | if (!buffer->heap->ops->phys) { | ||
330 | pr_err("ion_phys is not implemented by this heap.\n"); | ||
331 | mutex_unlock(&client->lock); | ||
332 | return -ENODEV; | ||
333 | } | ||
334 | mutex_unlock(&client->lock); | ||
335 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) | ||
340 | { | ||
341 | struct ion_buffer *buffer; | ||
342 | void *vaddr; | ||
343 | |||
344 | mutex_lock(&client->lock); | ||
345 | if (!ion_handle_validate(client, handle)) { | ||
346 | WARN(1, "invalid handle passed to map_kernel.\n"); | ||
347 | mutex_unlock(&client->lock); | ||
348 | return ERR_PTR(-EINVAL); | ||
349 | } | ||
350 | |||
351 | buffer = handle->buffer; | ||
352 | mutex_lock(&buffer->lock); | ||
353 | |||
354 | if (!handle->buffer->heap->ops->map_kernel) { | ||
355 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
356 | mutex_unlock(&buffer->lock); | ||
357 | mutex_unlock(&client->lock); | ||
358 | return ERR_PTR(-ENODEV); | ||
359 | } | ||
360 | |||
361 | if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
362 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | ||
363 | if (IS_ERR_OR_NULL(vaddr)) | ||
364 | _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); | ||
365 | buffer->vaddr = vaddr; | ||
366 | } else { | ||
367 | vaddr = buffer->vaddr; | ||
368 | } | ||
369 | mutex_unlock(&buffer->lock); | ||
370 | mutex_unlock(&client->lock); | ||
371 | return vaddr; | ||
372 | } | ||
373 | |||
374 | struct scatterlist *ion_map_dma(struct ion_client *client, | ||
375 | struct ion_handle *handle) | ||
376 | { | ||
377 | struct ion_buffer *buffer; | ||
378 | struct scatterlist *sglist; | ||
379 | |||
380 | mutex_lock(&client->lock); | ||
381 | if (!ion_handle_validate(client, handle)) { | ||
382 | WARN(1, "invalid handle passed to map_dma.\n"); | ||
383 | mutex_unlock(&client->lock); | ||
384 | return ERR_PTR(-EINVAL); | ||
385 | } | ||
386 | buffer = handle->buffer; | ||
387 | mutex_lock(&buffer->lock); | ||
388 | |||
389 | if (!handle->buffer->heap->ops->map_dma) { | ||
390 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
391 | mutex_unlock(&buffer->lock); | ||
392 | mutex_unlock(&client->lock); | ||
393 | return ERR_PTR(-ENODEV); | ||
394 | } | ||
395 | if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
396 | sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); | ||
397 | if (IS_ERR_OR_NULL(sglist)) | ||
398 | _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); | ||
399 | buffer->sglist = sglist; | ||
400 | } else { | ||
401 | sglist = buffer->sglist; | ||
402 | } | ||
403 | mutex_unlock(&buffer->lock); | ||
404 | mutex_unlock(&client->lock); | ||
405 | return sglist; | ||
406 | } | ||
407 | |||
408 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
409 | struct ion_buffer *buf, | ||
410 | unsigned long addr); | ||
411 | int ion_remap_dma(struct ion_client *client, | ||
412 | struct ion_handle *handle, | ||
413 | unsigned long addr) | ||
414 | { | ||
415 | struct ion_buffer *buffer; | ||
416 | int ret; | ||
417 | |||
418 | mutex_lock(&client->lock); | ||
419 | if (!ion_handle_validate(client, handle)) { | ||
420 | pr_err("invalid handle passed to map_dma.\n"); | ||
421 | mutex_unlock(&client->lock); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | buffer = handle->buffer; | ||
425 | mutex_lock(&buffer->lock); | ||
426 | |||
427 | ret = iommu_heap_remap_dma(buffer->heap, buffer, addr); | ||
428 | |||
429 | mutex_unlock(&buffer->lock); | ||
430 | mutex_unlock(&client->lock); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) | ||
435 | { | ||
436 | struct ion_buffer *buffer; | ||
437 | |||
438 | mutex_lock(&client->lock); | ||
439 | buffer = handle->buffer; | ||
440 | mutex_lock(&buffer->lock); | ||
441 | if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
442 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | ||
443 | buffer->vaddr = NULL; | ||
444 | } | ||
445 | mutex_unlock(&buffer->lock); | ||
446 | mutex_unlock(&client->lock); | ||
447 | } | ||
448 | |||
449 | void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) | ||
450 | { | ||
451 | struct ion_buffer *buffer; | ||
452 | |||
453 | mutex_lock(&client->lock); | ||
454 | buffer = handle->buffer; | ||
455 | mutex_lock(&buffer->lock); | ||
456 | if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
457 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | ||
458 | buffer->sglist = NULL; | ||
459 | } | ||
460 | mutex_unlock(&buffer->lock); | ||
461 | mutex_unlock(&client->lock); | ||
462 | } | ||
463 | |||
464 | |||
465 | struct ion_buffer *ion_share(struct ion_client *client, | ||
466 | struct ion_handle *handle) | ||
467 | { | ||
468 | bool valid_handle; | ||
469 | |||
470 | mutex_lock(&client->lock); | ||
471 | valid_handle = ion_handle_validate(client, handle); | ||
472 | mutex_unlock(&client->lock); | ||
473 | if (!valid_handle) { | ||
474 | WARN(1, "%s: invalid handle passed to share.\n", __func__); | ||
475 | return ERR_PTR(-EINVAL); | ||
476 | } | ||
477 | |||
478 | /* do not take an extra reference here, the burden is on the caller | ||
479 | * to make sure the buffer doesn't go away while it's passing it | ||
480 | * to another client -- ion_free should not be called on this handle | ||
481 | * until the buffer has been imported into the other client | ||
482 | */ | ||
483 | return handle->buffer; | ||
484 | } | ||
485 | |||
486 | struct ion_handle *ion_import(struct ion_client *client, | ||
487 | struct ion_buffer *buffer) | ||
488 | { | ||
489 | struct ion_handle *handle = NULL; | ||
490 | |||
491 | mutex_lock(&client->lock); | ||
492 | /* if a handle exists for this buffer just take a reference to it */ | ||
493 | handle = ion_handle_lookup(client, buffer); | ||
494 | if (!IS_ERR_OR_NULL(handle)) { | ||
495 | ion_handle_get(handle); | ||
496 | goto end; | ||
497 | } | ||
498 | handle = ion_handle_create(client, buffer); | ||
499 | if (IS_ERR_OR_NULL(handle)) { | ||
500 | pr_err("error during handle create\n"); | ||
501 | goto end; | ||
502 | } | ||
503 | ion_handle_add(client, handle); | ||
504 | end: | ||
505 | mutex_unlock(&client->lock); | ||
506 | return handle; | ||
507 | } | ||
508 | |||
509 | static const struct file_operations ion_share_fops; | ||
510 | |||
511 | struct ion_handle *ion_import_fd(struct ion_client *client, int fd) | ||
512 | { | ||
513 | struct file *file = fget(fd); | ||
514 | struct ion_handle *handle; | ||
515 | |||
516 | if (!file) { | ||
517 | pr_err("imported fd not found in file table.\n"); | ||
518 | return ERR_PTR(-EINVAL); | ||
519 | } | ||
520 | if (file->f_op != &ion_share_fops) { | ||
521 | pr_err("imported file is not a shared ion file.\n"); | ||
522 | handle = ERR_PTR(-EINVAL); | ||
523 | goto end; | ||
524 | } | ||
525 | handle = ion_import(client, file->private_data); | ||
526 | end: | ||
527 | fput(file); | ||
528 | return handle; | ||
529 | } | ||
530 | |||
531 | static int ion_debug_client_show(struct seq_file *s, void *unused) | ||
532 | { | ||
533 | struct ion_client *client = s->private; | ||
534 | struct rb_node *n; | ||
535 | size_t sizes[ION_NUM_HEAPS] = {0}; | ||
536 | const char *names[ION_NUM_HEAPS] = {0}; | ||
537 | int i; | ||
538 | |||
539 | mutex_lock(&client->lock); | ||
540 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
541 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
542 | node); | ||
543 | enum ion_heap_type type = handle->buffer->heap->type; | ||
544 | |||
545 | if (!names[type]) | ||
546 | names[type] = handle->buffer->heap->name; | ||
547 | sizes[type] += handle->buffer->size; | ||
548 | } | ||
549 | mutex_unlock(&client->lock); | ||
550 | |||
551 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | ||
552 | for (i = 0; i < ION_NUM_HEAPS; i++) { | ||
553 | if (!names[i]) | ||
554 | continue; | ||
555 | seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], | ||
556 | atomic_read(&client->ref.refcount)); | ||
557 | } | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static int ion_debug_client_open(struct inode *inode, struct file *file) | ||
562 | { | ||
563 | return single_open(file, ion_debug_client_show, inode->i_private); | ||
564 | } | ||
565 | |||
566 | static const struct file_operations debug_client_fops = { | ||
567 | .open = ion_debug_client_open, | ||
568 | .read = seq_read, | ||
569 | .llseek = seq_lseek, | ||
570 | .release = single_release, | ||
571 | }; | ||
572 | |||
573 | static struct ion_client *ion_client_lookup(struct ion_device *dev, | ||
574 | struct task_struct *task) | ||
575 | { | ||
576 | struct rb_node *n = dev->user_clients.rb_node; | ||
577 | struct ion_client *client; | ||
578 | |||
579 | mutex_lock(&dev->lock); | ||
580 | while (n) { | ||
581 | client = rb_entry(n, struct ion_client, node); | ||
582 | if (task == client->task) { | ||
583 | ion_client_get(client); | ||
584 | mutex_unlock(&dev->lock); | ||
585 | return client; | ||
586 | } else if (task < client->task) { | ||
587 | n = n->rb_left; | ||
588 | } else if (task > client->task) { | ||
589 | n = n->rb_right; | ||
590 | } | ||
591 | } | ||
592 | mutex_unlock(&dev->lock); | ||
593 | return NULL; | ||
594 | } | ||
595 | |||
596 | struct ion_client *ion_client_create(struct ion_device *dev, | ||
597 | unsigned int heap_mask, | ||
598 | const char *name) | ||
599 | { | ||
600 | struct ion_client *client; | ||
601 | struct task_struct *task; | ||
602 | struct rb_node **p; | ||
603 | struct rb_node *parent = NULL; | ||
604 | struct ion_client *entry; | ||
605 | char debug_name[64]; | ||
606 | pid_t pid; | ||
607 | |||
608 | get_task_struct(current->group_leader); | ||
609 | task_lock(current->group_leader); | ||
610 | pid = task_pid_nr(current->group_leader); | ||
611 | /* don't bother to store task struct for kernel threads, | ||
612 | they can't be killed anyway */ | ||
613 | if (current->group_leader->flags & PF_KTHREAD) { | ||
614 | put_task_struct(current->group_leader); | ||
615 | task = NULL; | ||
616 | } else { | ||
617 | task = current->group_leader; | ||
618 | } | ||
619 | task_unlock(current->group_leader); | ||
620 | |||
621 | /* if this isn't a kernel thread, see if a client already | ||
622 | exists */ | ||
623 | if (task) { | ||
624 | client = ion_client_lookup(dev, task); | ||
625 | if (!IS_ERR_OR_NULL(client)) { | ||
626 | put_task_struct(current->group_leader); | ||
627 | return client; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); | ||
632 | if (!client) { | ||
633 | put_task_struct(current->group_leader); | ||
634 | return ERR_PTR(-ENOMEM); | ||
635 | } | ||
636 | |||
637 | client->dev = dev; | ||
638 | client->handles = RB_ROOT; | ||
639 | mutex_init(&client->lock); | ||
640 | client->name = name; | ||
641 | client->heap_mask = heap_mask; | ||
642 | client->task = task; | ||
643 | client->pid = pid; | ||
644 | kref_init(&client->ref); | ||
645 | |||
646 | mutex_lock(&dev->lock); | ||
647 | if (task) { | ||
648 | p = &dev->user_clients.rb_node; | ||
649 | while (*p) { | ||
650 | parent = *p; | ||
651 | entry = rb_entry(parent, struct ion_client, node); | ||
652 | |||
653 | if (task < entry->task) | ||
654 | p = &(*p)->rb_left; | ||
655 | else if (task > entry->task) | ||
656 | p = &(*p)->rb_right; | ||
657 | } | ||
658 | rb_link_node(&client->node, parent, p); | ||
659 | rb_insert_color(&client->node, &dev->user_clients); | ||
660 | } else { | ||
661 | p = &dev->kernel_clients.rb_node; | ||
662 | while (*p) { | ||
663 | parent = *p; | ||
664 | entry = rb_entry(parent, struct ion_client, node); | ||
665 | |||
666 | if (client < entry) | ||
667 | p = &(*p)->rb_left; | ||
668 | else if (client > entry) | ||
669 | p = &(*p)->rb_right; | ||
670 | } | ||
671 | rb_link_node(&client->node, parent, p); | ||
672 | rb_insert_color(&client->node, &dev->kernel_clients); | ||
673 | } | ||
674 | |||
675 | snprintf(debug_name, 64, "%u", client->pid); | ||
676 | client->debug_root = debugfs_create_file(debug_name, 0664, | ||
677 | dev->debug_root, client, | ||
678 | &debug_client_fops); | ||
679 | mutex_unlock(&dev->lock); | ||
680 | |||
681 | return client; | ||
682 | } | ||
683 | |||
684 | static void _ion_client_destroy(struct kref *kref) | ||
685 | { | ||
686 | struct ion_client *client = container_of(kref, struct ion_client, ref); | ||
687 | struct ion_device *dev = client->dev; | ||
688 | struct rb_node *n; | ||
689 | |||
690 | pr_debug("\n"); | ||
691 | while ((n = rb_first(&client->handles))) { | ||
692 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
693 | node); | ||
694 | ion_handle_destroy(&handle->ref); | ||
695 | } | ||
696 | mutex_lock(&dev->lock); | ||
697 | if (client->task) { | ||
698 | rb_erase(&client->node, &dev->user_clients); | ||
699 | put_task_struct(client->task); | ||
700 | } else { | ||
701 | rb_erase(&client->node, &dev->kernel_clients); | ||
702 | } | ||
703 | debugfs_remove_recursive(client->debug_root); | ||
704 | mutex_unlock(&dev->lock); | ||
705 | |||
706 | kfree(client); | ||
707 | } | ||
708 | |||
709 | void ion_client_get(struct ion_client *client) | ||
710 | { | ||
711 | kref_get(&client->ref); | ||
712 | } | ||
713 | |||
714 | int ion_client_put(struct ion_client *client) | ||
715 | { | ||
716 | return kref_put(&client->ref, _ion_client_destroy); | ||
717 | } | ||
718 | |||
719 | void ion_client_destroy(struct ion_client *client) | ||
720 | { | ||
721 | ion_client_put(client); | ||
722 | } | ||
723 | |||
724 | static int ion_share_release(struct inode *inode, struct file* file) | ||
725 | { | ||
726 | struct ion_buffer *buffer = file->private_data; | ||
727 | |||
728 | pr_debug("\n"); | ||
729 | /* drop the reference to the buffer -- this prevents the | ||
730 | buffer from going away because the client holding it exited | ||
731 | while it was being passed */ | ||
732 | ion_buffer_put(buffer); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static void ion_vma_open(struct vm_area_struct *vma) | ||
737 | { | ||
738 | |||
739 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
740 | struct ion_handle *handle = vma->vm_private_data; | ||
741 | struct ion_client *client; | ||
742 | |||
743 | pr_debug("\n"); | ||
744 | /* check that the client still exists and take a reference so | ||
745 | it can't go away until this vma is closed */ | ||
746 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
747 | if (IS_ERR_OR_NULL(client)) { | ||
748 | vma->vm_private_data = NULL; | ||
749 | return; | ||
750 | } | ||
751 | ion_buffer_get(buffer); | ||
752 | ion_handle_get(handle); | ||
753 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
754 | atomic_read(&client->ref.refcount), | ||
755 | atomic_read(&handle->ref.refcount), | ||
756 | atomic_read(&buffer->ref.refcount)); | ||
757 | } | ||
758 | |||
759 | static void ion_vma_close(struct vm_area_struct *vma) | ||
760 | { | ||
761 | struct ion_handle *handle = vma->vm_private_data; | ||
762 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
763 | struct ion_client *client; | ||
764 | |||
765 | pr_debug("\n"); | ||
766 | /* this indicates the client is gone, nothing to do here */ | ||
767 | if (!handle) | ||
768 | return; | ||
769 | client = handle->client; | ||
770 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
771 | atomic_read(&client->ref.refcount), | ||
772 | atomic_read(&handle->ref.refcount), | ||
773 | atomic_read(&buffer->ref.refcount)); | ||
774 | ion_handle_put(handle); | ||
775 | ion_client_put(client); | ||
776 | ion_buffer_put(buffer); | ||
777 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
778 | atomic_read(&client->ref.refcount), | ||
779 | atomic_read(&handle->ref.refcount), | ||
780 | atomic_read(&buffer->ref.refcount)); | ||
781 | } | ||
782 | |||
783 | static struct vm_operations_struct ion_vm_ops = { | ||
784 | .open = ion_vma_open, | ||
785 | .close = ion_vma_close, | ||
786 | }; | ||
787 | |||
788 | static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) | ||
789 | { | ||
790 | struct ion_buffer *buffer = file->private_data; | ||
791 | unsigned long size = vma->vm_end - vma->vm_start; | ||
792 | struct ion_client *client; | ||
793 | struct ion_handle *handle; | ||
794 | int ret; | ||
795 | |||
796 | pr_debug("\n"); | ||
797 | /* make sure the client still exists, it's possible for the client to | ||
798 | have gone away but the map/share fd still to be around, take | ||
799 | a reference to it so it can't go away while this mapping exists */ | ||
800 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
801 | if (IS_ERR_OR_NULL(client)) { | ||
802 | WARN(1, "trying to mmap an ion handle in a process with no " | ||
803 | "ion client\n"); | ||
804 | return -EINVAL; | ||
805 | } | ||
806 | |||
807 | if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > | ||
808 | buffer->size)) { | ||
809 | WARN(1, "trying to map larger area than handle has available" | ||
810 | "\n"); | ||
811 | ret = -EINVAL; | ||
812 | goto err; | ||
813 | } | ||
814 | |||
815 | /* find the handle and take a reference to it */ | ||
816 | handle = ion_import(client, buffer); | ||
817 | if (IS_ERR_OR_NULL(handle)) { | ||
818 | ret = -EINVAL; | ||
819 | goto err; | ||
820 | } | ||
821 | ion_buffer_get(buffer); | ||
822 | |||
823 | if (!handle->buffer->heap->ops->map_user) { | ||
824 | pr_err("this heap does not define a method for mapping " | ||
825 | "to userspace\n"); | ||
826 | ret = -EINVAL; | ||
827 | goto err1; | ||
828 | } | ||
829 | |||
830 | mutex_lock(&buffer->lock); | ||
831 | /* now map it to userspace */ | ||
832 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | ||
833 | mutex_unlock(&buffer->lock); | ||
834 | if (ret) { | ||
835 | pr_err("failure mapping buffer to userspace\n"); | ||
836 | goto err1; | ||
837 | } | ||
838 | |||
839 | vma->vm_ops = &ion_vm_ops; | ||
840 | /* move the handle into the vm_private_data so we can access it from | ||
841 | vma_open/close */ | ||
842 | vma->vm_private_data = handle; | ||
843 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
844 | atomic_read(&client->ref.refcount), | ||
845 | atomic_read(&handle->ref.refcount), | ||
846 | atomic_read(&buffer->ref.refcount)); | ||
847 | return 0; | ||
848 | |||
849 | err1: | ||
850 | /* drop the reference to the handle */ | ||
851 | ion_handle_put(handle); | ||
852 | err: | ||
853 | /* drop the reference to the client */ | ||
854 | ion_client_put(client); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | static const struct file_operations ion_share_fops = { | ||
859 | .owner = THIS_MODULE, | ||
860 | .release = ion_share_release, | ||
861 | .mmap = ion_share_mmap, | ||
862 | }; | ||
863 | |||
864 | static int ion_ioctl_share(struct file *parent, struct ion_client *client, | ||
865 | struct ion_handle *handle) | ||
866 | { | ||
867 | int fd = get_unused_fd(); | ||
868 | struct file *file; | ||
869 | |||
870 | if (fd < 0) | ||
871 | return -ENFILE; | ||
872 | |||
873 | file = anon_inode_getfile("ion_share_fd", &ion_share_fops, | ||
874 | handle->buffer, O_RDWR); | ||
875 | if (IS_ERR_OR_NULL(file)) | ||
876 | goto err; | ||
877 | ion_buffer_get(handle->buffer); | ||
878 | fd_install(fd, file); | ||
879 | |||
880 | return fd; | ||
881 | |||
882 | err: | ||
883 | put_unused_fd(fd); | ||
884 | return -ENFILE; | ||
885 | } | ||
886 | |||
887 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
888 | { | ||
889 | struct ion_client *client = filp->private_data; | ||
890 | |||
891 | switch (cmd) { | ||
892 | case ION_IOC_ALLOC: | ||
893 | { | ||
894 | struct ion_allocation_data data; | ||
895 | |||
896 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
897 | return -EFAULT; | ||
898 | data.handle = ion_alloc(client, data.len, data.align, | ||
899 | data.flags); | ||
900 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
901 | return -EFAULT; | ||
902 | break; | ||
903 | } | ||
904 | case ION_IOC_FREE: | ||
905 | { | ||
906 | struct ion_handle_data data; | ||
907 | bool valid; | ||
908 | |||
909 | if (copy_from_user(&data, (void __user *)arg, | ||
910 | sizeof(struct ion_handle_data))) | ||
911 | return -EFAULT; | ||
912 | mutex_lock(&client->lock); | ||
913 | valid = ion_handle_validate(client, data.handle); | ||
914 | mutex_unlock(&client->lock); | ||
915 | if (!valid) | ||
916 | return -EINVAL; | ||
917 | ion_free(client, data.handle); | ||
918 | break; | ||
919 | } | ||
920 | case ION_IOC_MAP: | ||
921 | case ION_IOC_SHARE: | ||
922 | { | ||
923 | struct ion_fd_data data; | ||
924 | |||
925 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
926 | return -EFAULT; | ||
927 | mutex_lock(&client->lock); | ||
928 | if (!ion_handle_validate(client, data.handle)) { | ||
929 | WARN(1, "invalid handle passed to share ioctl.\n"); | ||
930 | mutex_unlock(&client->lock); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | data.fd = ion_ioctl_share(filp, client, data.handle); | ||
934 | mutex_unlock(&client->lock); | ||
935 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
936 | return -EFAULT; | ||
937 | break; | ||
938 | } | ||
939 | case ION_IOC_IMPORT: | ||
940 | { | ||
941 | struct ion_fd_data data; | ||
942 | if (copy_from_user(&data, (void __user *)arg, | ||
943 | sizeof(struct ion_fd_data))) | ||
944 | return -EFAULT; | ||
945 | |||
946 | data.handle = ion_import_fd(client, data.fd); | ||
947 | if (IS_ERR(data.handle)) | ||
948 | data.handle = NULL; | ||
949 | if (copy_to_user((void __user *)arg, &data, | ||
950 | sizeof(struct ion_fd_data))) | ||
951 | return -EFAULT; | ||
952 | break; | ||
953 | } | ||
954 | case ION_IOC_CUSTOM: | ||
955 | { | ||
956 | struct ion_device *dev = client->dev; | ||
957 | struct ion_custom_data data; | ||
958 | |||
959 | if (!dev->custom_ioctl) | ||
960 | return -ENOTTY; | ||
961 | if (copy_from_user(&data, (void __user *)arg, | ||
962 | sizeof(struct ion_custom_data))) | ||
963 | return -EFAULT; | ||
964 | return dev->custom_ioctl(client, data.cmd, data.arg); | ||
965 | } | ||
966 | default: | ||
967 | return -ENOTTY; | ||
968 | } | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | static int ion_release(struct inode *inode, struct file *file) | ||
973 | { | ||
974 | struct ion_client *client = file->private_data; | ||
975 | |||
976 | pr_debug("\n"); | ||
977 | ion_client_put(client); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int ion_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | struct miscdevice *miscdev = file->private_data; | ||
984 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | ||
985 | struct ion_client *client; | ||
986 | |||
987 | pr_debug("\n"); | ||
988 | client = ion_client_create(dev, -1, "user"); | ||
989 | if (IS_ERR_OR_NULL(client)) | ||
990 | return PTR_ERR(client); | ||
991 | file->private_data = client; | ||
992 | |||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | static const struct file_operations ion_fops = { | ||
997 | .owner = THIS_MODULE, | ||
998 | .open = ion_open, | ||
999 | .release = ion_release, | ||
1000 | .unlocked_ioctl = ion_ioctl, | ||
1001 | }; | ||
1002 | |||
1003 | static size_t ion_debug_heap_total(struct ion_client *client, | ||
1004 | enum ion_heap_type type) | ||
1005 | { | ||
1006 | size_t size = 0; | ||
1007 | struct rb_node *n; | ||
1008 | |||
1009 | mutex_lock(&client->lock); | ||
1010 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
1011 | struct ion_handle *handle = rb_entry(n, | ||
1012 | struct ion_handle, | ||
1013 | node); | ||
1014 | if (handle->buffer->heap->type == type) | ||
1015 | size += handle->buffer->size; | ||
1016 | } | ||
1017 | mutex_unlock(&client->lock); | ||
1018 | return size; | ||
1019 | } | ||
1020 | |||
1021 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | ||
1022 | { | ||
1023 | struct ion_heap *heap = s->private; | ||
1024 | struct ion_device *dev = heap->dev; | ||
1025 | struct rb_node *n; | ||
1026 | |||
1027 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | ||
1028 | for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { | ||
1029 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1030 | node); | ||
1031 | char task_comm[TASK_COMM_LEN]; | ||
1032 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1033 | if (!size) | ||
1034 | continue; | ||
1035 | |||
1036 | get_task_comm(task_comm, client->task); | ||
1037 | seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, | ||
1038 | size); | ||
1039 | } | ||
1040 | |||
1041 | for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { | ||
1042 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1043 | node); | ||
1044 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1045 | if (!size) | ||
1046 | continue; | ||
1047 | seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, | ||
1048 | size); | ||
1049 | } | ||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | ||
1054 | { | ||
1055 | return single_open(file, ion_debug_heap_show, inode->i_private); | ||
1056 | } | ||
1057 | |||
1058 | static const struct file_operations debug_heap_fops = { | ||
1059 | .open = ion_debug_heap_open, | ||
1060 | .read = seq_read, | ||
1061 | .llseek = seq_lseek, | ||
1062 | .release = single_release, | ||
1063 | }; | ||
1064 | |||
1065 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | ||
1066 | { | ||
1067 | struct rb_node **p = &dev->heaps.rb_node; | ||
1068 | struct rb_node *parent = NULL; | ||
1069 | struct ion_heap *entry; | ||
1070 | |||
1071 | heap->dev = dev; | ||
1072 | mutex_lock(&dev->lock); | ||
1073 | while (*p) { | ||
1074 | parent = *p; | ||
1075 | entry = rb_entry(parent, struct ion_heap, node); | ||
1076 | |||
1077 | if (heap->id < entry->id) { | ||
1078 | p = &(*p)->rb_left; | ||
1079 | } else if (heap->id > entry->id ) { | ||
1080 | p = &(*p)->rb_right; | ||
1081 | } else { | ||
1082 | pr_err("can not insert multiple heaps with " | ||
1083 | "id %d\n", heap->id); | ||
1084 | goto end; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | rb_link_node(&heap->node, parent, p); | ||
1089 | rb_insert_color(&heap->node, &dev->heaps); | ||
1090 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | ||
1091 | &debug_heap_fops); | ||
1092 | end: | ||
1093 | mutex_unlock(&dev->lock); | ||
1094 | } | ||
1095 | |||
1096 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
1097 | (struct ion_client *client, | ||
1098 | unsigned int cmd, | ||
1099 | unsigned long arg)) | ||
1100 | { | ||
1101 | struct ion_device *idev; | ||
1102 | int ret; | ||
1103 | |||
1104 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | ||
1105 | if (!idev) | ||
1106 | return ERR_PTR(-ENOMEM); | ||
1107 | |||
1108 | idev->dev.minor = MISC_DYNAMIC_MINOR; | ||
1109 | idev->dev.name = "ion"; | ||
1110 | idev->dev.fops = &ion_fops; | ||
1111 | idev->dev.parent = NULL; | ||
1112 | ret = misc_register(&idev->dev); | ||
1113 | if (ret) { | ||
1114 | pr_err("ion: failed to register misc device.\n"); | ||
1115 | return ERR_PTR(ret); | ||
1116 | } | ||
1117 | |||
1118 | idev->debug_root = debugfs_create_dir("ion", NULL); | ||
1119 | if (IS_ERR_OR_NULL(idev->debug_root)) | ||
1120 | pr_err("ion: failed to create debug files.\n"); | ||
1121 | |||
1122 | idev->custom_ioctl = custom_ioctl; | ||
1123 | idev->buffers = RB_ROOT; | ||
1124 | mutex_init(&idev->lock); | ||
1125 | idev->heaps = RB_ROOT; | ||
1126 | idev->user_clients = RB_ROOT; | ||
1127 | idev->kernel_clients = RB_ROOT; | ||
1128 | return idev; | ||
1129 | } | ||
1130 | |||
1131 | void ion_device_destroy(struct ion_device *dev) | ||
1132 | { | ||
1133 | misc_deregister(&dev->dev); | ||
1134 | /* XXX need to free the heaps and clients ? */ | ||
1135 | kfree(dev); | ||
1136 | } | ||
1137 | |||
1138 | struct ion_client *ion_client_get_file(int fd) | ||
1139 | { | ||
1140 | struct ion_client *client = ERR_PTR(-EFAULT); | ||
1141 | struct file *f = fget(fd); | ||
1142 | if (!f) | ||
1143 | return ERR_PTR(-EINVAL); | ||
1144 | |||
1145 | if (f->f_op == &ion_fops) { | ||
1146 | client = f->private_data; | ||
1147 | ion_client_get(client); | ||
1148 | } | ||
1149 | |||
1150 | fput(f); | ||
1151 | return client; | ||
1152 | } | ||
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 00000000000..606adae13f4 --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_carveout_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/spinlock.h> | ||
17 | |||
18 | #include <linux/err.h> | ||
19 | #include <linux/genalloc.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/ion.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include "ion_priv.h" | ||
27 | |||
28 | #include <asm/mach/map.h> | ||
29 | |||
30 | struct ion_carveout_heap { | ||
31 | struct ion_heap heap; | ||
32 | struct gen_pool *pool; | ||
33 | ion_phys_addr_t base; | ||
34 | }; | ||
35 | |||
36 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, | ||
37 | unsigned long size, | ||
38 | unsigned long align) | ||
39 | { | ||
40 | struct ion_carveout_heap *carveout_heap = | ||
41 | container_of(heap, struct ion_carveout_heap, heap); | ||
42 | unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); | ||
43 | |||
44 | if (!offset) | ||
45 | return ION_CARVEOUT_ALLOCATE_FAIL; | ||
46 | |||
47 | return offset; | ||
48 | } | ||
49 | |||
50 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
51 | unsigned long size) | ||
52 | { | ||
53 | struct ion_carveout_heap *carveout_heap = | ||
54 | container_of(heap, struct ion_carveout_heap, heap); | ||
55 | |||
56 | if (addr == ION_CARVEOUT_ALLOCATE_FAIL) | ||
57 | return; | ||
58 | gen_pool_free(carveout_heap->pool, addr, size); | ||
59 | } | ||
60 | |||
61 | static int ion_carveout_heap_phys(struct ion_heap *heap, | ||
62 | struct ion_buffer *buffer, | ||
63 | ion_phys_addr_t *addr, size_t *len) | ||
64 | { | ||
65 | *addr = buffer->priv_phys; | ||
66 | *len = buffer->size; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int ion_carveout_heap_allocate(struct ion_heap *heap, | ||
71 | struct ion_buffer *buffer, | ||
72 | unsigned long size, unsigned long align, | ||
73 | unsigned long flags) | ||
74 | { | ||
75 | buffer->priv_phys = ion_carveout_allocate(heap, size, align); | ||
76 | return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; | ||
77 | } | ||
78 | |||
79 | static void ion_carveout_heap_free(struct ion_buffer *buffer) | ||
80 | { | ||
81 | struct ion_heap *heap = buffer->heap; | ||
82 | |||
83 | ion_carveout_free(heap, buffer->priv_phys, buffer->size); | ||
84 | buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; | ||
85 | } | ||
86 | |||
87 | struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, | ||
88 | struct ion_buffer *buffer) | ||
89 | { | ||
90 | return ERR_PTR(-EINVAL); | ||
91 | } | ||
92 | |||
93 | void ion_carveout_heap_unmap_dma(struct ion_heap *heap, | ||
94 | struct ion_buffer *buffer) | ||
95 | { | ||
96 | return; | ||
97 | } | ||
98 | |||
99 | void *ion_carveout_heap_map_kernel(struct ion_heap *heap, | ||
100 | struct ion_buffer *buffer) | ||
101 | { | ||
102 | return __arch_ioremap(buffer->priv_phys, buffer->size, | ||
103 | MT_MEMORY_NONCACHED); | ||
104 | } | ||
105 | |||
106 | void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, | ||
107 | struct ion_buffer *buffer) | ||
108 | { | ||
109 | __arch_iounmap(buffer->vaddr); | ||
110 | buffer->vaddr = NULL; | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
115 | struct vm_area_struct *vma) | ||
116 | { | ||
117 | return remap_pfn_range(vma, vma->vm_start, | ||
118 | __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, | ||
119 | buffer->size, | ||
120 | pgprot_noncached(vma->vm_page_prot)); | ||
121 | } | ||
122 | |||
123 | static struct ion_heap_ops carveout_heap_ops = { | ||
124 | .allocate = ion_carveout_heap_allocate, | ||
125 | .free = ion_carveout_heap_free, | ||
126 | .phys = ion_carveout_heap_phys, | ||
127 | .map_user = ion_carveout_heap_map_user, | ||
128 | .map_kernel = ion_carveout_heap_map_kernel, | ||
129 | .unmap_kernel = ion_carveout_heap_unmap_kernel, | ||
130 | }; | ||
131 | |||
132 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) | ||
133 | { | ||
134 | struct ion_carveout_heap *carveout_heap; | ||
135 | |||
136 | carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); | ||
137 | if (!carveout_heap) | ||
138 | return ERR_PTR(-ENOMEM); | ||
139 | |||
140 | carveout_heap->pool = gen_pool_create(12, -1); | ||
141 | if (!carveout_heap->pool) { | ||
142 | kfree(carveout_heap); | ||
143 | return ERR_PTR(-ENOMEM); | ||
144 | } | ||
145 | carveout_heap->base = heap_data->base; | ||
146 | gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, | ||
147 | -1); | ||
148 | carveout_heap->heap.ops = &carveout_heap_ops; | ||
149 | carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; | ||
150 | |||
151 | return &carveout_heap->heap; | ||
152 | } | ||
153 | |||
154 | void ion_carveout_heap_destroy(struct ion_heap *heap) | ||
155 | { | ||
156 | struct ion_carveout_heap *carveout_heap = | ||
157 | container_of(heap, struct ion_carveout_heap, heap); | ||
158 | |||
159 | gen_pool_destroy(carveout_heap->pool); | ||
160 | kfree(carveout_heap); | ||
161 | carveout_heap = NULL; | ||
162 | } | ||
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 00000000000..6d09778745c --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include "ion_priv.h" | ||
20 | |||
21 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) | ||
22 | { | ||
23 | struct ion_heap *heap = NULL; | ||
24 | |||
25 | switch (heap_data->type) { | ||
26 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
27 | heap = ion_system_contig_heap_create(heap_data); | ||
28 | break; | ||
29 | case ION_HEAP_TYPE_SYSTEM: | ||
30 | heap = ion_system_heap_create(heap_data); | ||
31 | break; | ||
32 | case ION_HEAP_TYPE_CARVEOUT: | ||
33 | heap = ion_carveout_heap_create(heap_data); | ||
34 | break; | ||
35 | case ION_HEAP_TYPE_IOMMU: | ||
36 | heap = ion_iommu_heap_create(heap_data); | ||
37 | break; | ||
38 | default: | ||
39 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
40 | heap_data->type); | ||
41 | return ERR_PTR(-EINVAL); | ||
42 | } | ||
43 | |||
44 | if (IS_ERR_OR_NULL(heap)) { | ||
45 | pr_err("%s: error creating heap %s type %d base %lu size %u\n", | ||
46 | __func__, heap_data->name, heap_data->type, | ||
47 | heap_data->base, heap_data->size); | ||
48 | return ERR_PTR(-EINVAL); | ||
49 | } | ||
50 | |||
51 | heap->name = heap_data->name; | ||
52 | heap->id = heap_data->id; | ||
53 | return heap; | ||
54 | } | ||
55 | |||
56 | void ion_heap_destroy(struct ion_heap *heap) | ||
57 | { | ||
58 | if (!heap) | ||
59 | return; | ||
60 | |||
61 | switch (heap->type) { | ||
62 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
63 | ion_system_contig_heap_destroy(heap); | ||
64 | break; | ||
65 | case ION_HEAP_TYPE_SYSTEM: | ||
66 | ion_system_heap_destroy(heap); | ||
67 | break; | ||
68 | case ION_HEAP_TYPE_CARVEOUT: | ||
69 | ion_carveout_heap_destroy(heap); | ||
70 | break; | ||
71 | case ION_HEAP_TYPE_IOMMU: | ||
72 | ion_iommu_heap_destroy(heap); | ||
73 | break; | ||
74 | default: | ||
75 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
76 | heap->type); | ||
77 | } | ||
78 | } | ||
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c new file mode 100644 index 00000000000..a3d2d726bda --- /dev/null +++ b/drivers/gpu/ion/ion_iommu_heap.c | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_iommu_heap.c | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | ||
19 | |||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | ||
21 | |||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/genalloc.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/ion.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/scatterlist.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/iommu.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | |||
35 | #include <asm/cacheflush.h> | ||
36 | |||
37 | #include "ion_priv.h" | ||
38 | |||
39 | #define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT) | ||
40 | |||
41 | #define GFP_ION (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) | ||
42 | |||
43 | struct ion_iommu_heap { | ||
44 | struct ion_heap heap; | ||
45 | struct gen_pool *pool; | ||
46 | struct iommu_domain *domain; | ||
47 | struct device *dev; | ||
48 | }; | ||
49 | |||
50 | static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap, | ||
51 | struct ion_buffer *buf) | ||
52 | { | ||
53 | struct ion_iommu_heap *h = | ||
54 | container_of(heap, struct ion_iommu_heap, heap); | ||
55 | int err, npages = NUM_PAGES(buf); | ||
56 | unsigned int i; | ||
57 | struct scatterlist *sg; | ||
58 | unsigned long da = (unsigned long)buf->priv_virt; | ||
59 | |||
60 | for_each_sg(buf->sglist, sg, npages, i) { | ||
61 | phys_addr_t pa; | ||
62 | |||
63 | pa = sg_phys(sg); | ||
64 | BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); | ||
65 | err = iommu_map(h->domain, da, pa, PAGE_SIZE, 0); | ||
66 | if (err) | ||
67 | goto err_out; | ||
68 | |||
69 | sg->dma_address = da; | ||
70 | da += PAGE_SIZE; | ||
71 | } | ||
72 | |||
73 | pr_debug("da:%p pa:%08x va:%p\n", | ||
74 | buf->priv_virt, sg_phys(buf->sglist), buf->vaddr); | ||
75 | |||
76 | return buf->sglist; | ||
77 | |||
78 | err_out: | ||
79 | if (i-- > 0) { | ||
80 | unsigned int j; | ||
81 | for_each_sg(buf->sglist, sg, i, j) | ||
82 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
83 | } | ||
84 | return ERR_PTR(err); | ||
85 | } | ||
86 | |||
87 | static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf) | ||
88 | { | ||
89 | struct ion_iommu_heap *h = | ||
90 | container_of(heap, struct ion_iommu_heap, heap); | ||
91 | unsigned int i; | ||
92 | struct scatterlist *sg; | ||
93 | int npages = NUM_PAGES(buf); | ||
94 | |||
95 | for_each_sg(buf->sglist, sg, npages, i) | ||
96 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
97 | |||
98 | pr_debug("da:%p\n", buf->priv_virt); | ||
99 | } | ||
100 | |||
101 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
102 | struct ion_buffer *buf, | ||
103 | unsigned long addr) | ||
104 | { | ||
105 | struct ion_iommu_heap *h = | ||
106 | container_of(heap, struct ion_iommu_heap, heap); | ||
107 | int err; | ||
108 | unsigned int i; | ||
109 | unsigned long da, da_to_free = (unsigned long)buf->priv_virt; | ||
110 | int npages = NUM_PAGES(buf); | ||
111 | |||
112 | BUG_ON(!buf->priv_virt); | ||
113 | |||
114 | da = gen_pool_alloc_addr(h->pool, buf->size, addr); | ||
115 | if (da == 0) { | ||
116 | pr_err("dma address alloc failed, addr=0x%lx", addr); | ||
117 | return ERR_PTR(-ENOMEM); | ||
118 | } else { | ||
119 | pr_err("iommu_heap_remap_dma passed, addr=0x%lx", | ||
120 | addr); | ||
121 | iommu_heap_unmap_dma(heap, buf); | ||
122 | gen_pool_free(h->pool, da_to_free, buf->size); | ||
123 | buf->priv_virt = (void *)da; | ||
124 | } | ||
125 | for (i = 0; i < npages; i++) { | ||
126 | phys_addr_t pa; | ||
127 | |||
128 | pa = page_to_phys(buf->pages[i]); | ||
129 | err = iommu_map(h->domain, da, pa, 0, 0); | ||
130 | if (err) | ||
131 | goto err_out; | ||
132 | da += PAGE_SIZE; | ||
133 | } | ||
134 | |||
135 | pr_debug("da:%p pa:%08x va:%p\n", | ||
136 | buf->priv_virt, page_to_phys(buf->pages[0]), buf->vaddr); | ||
137 | |||
138 | return (struct scatterlist *)buf->pages; | ||
139 | |||
140 | err_out: | ||
141 | if (i-- > 0) { | ||
142 | da = (unsigned long)buf->priv_virt; | ||
143 | iommu_unmap(h->domain, da + (i << PAGE_SHIFT), 0); | ||
144 | } | ||
145 | return ERR_PTR(err); | ||
146 | } | ||
147 | |||
148 | static int ion_buffer_allocate(struct ion_buffer *buf) | ||
149 | { | ||
150 | int i, npages = NUM_PAGES(buf); | ||
151 | |||
152 | buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL); | ||
153 | if (!buf->pages) | ||
154 | goto err_pages; | ||
155 | |||
156 | buf->sglist = vzalloc(npages * sizeof(*buf->sglist)); | ||
157 | if (!buf->sglist) | ||
158 | goto err_sgl; | ||
159 | |||
160 | sg_init_table(buf->sglist, npages); | ||
161 | |||
162 | for (i = 0; i < npages; i++) { | ||
163 | struct page *page; | ||
164 | phys_addr_t pa; | ||
165 | |||
166 | page = alloc_page(GFP_ION); | ||
167 | if (!page) | ||
168 | goto err_pgalloc; | ||
169 | pa = page_to_phys(page); | ||
170 | |||
171 | sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0); | ||
172 | |||
173 | flush_dcache_page(page); | ||
174 | outer_flush_range(pa, pa + PAGE_SIZE); | ||
175 | |||
176 | buf->pages[i] = page; | ||
177 | |||
178 | pr_debug_once("pa:%08x\n", pa); | ||
179 | } | ||
180 | return 0; | ||
181 | |||
182 | err_pgalloc: | ||
183 | while (i-- > 0) | ||
184 | __free_page(buf->pages[i]); | ||
185 | vfree(buf->sglist); | ||
186 | err_sgl: | ||
187 | kfree(buf->pages); | ||
188 | err_pages: | ||
189 | return -ENOMEM; | ||
190 | } | ||
191 | |||
192 | static void ion_buffer_free(struct ion_buffer *buf) | ||
193 | { | ||
194 | int i, npages = NUM_PAGES(buf); | ||
195 | |||
196 | for (i = 0; i < npages; i++) | ||
197 | __free_page(buf->pages[i]); | ||
198 | vfree(buf->sglist); | ||
199 | kfree(buf->pages); | ||
200 | } | ||
201 | |||
202 | static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf, | ||
203 | unsigned long len, unsigned long align, | ||
204 | unsigned long flags) | ||
205 | { | ||
206 | int err; | ||
207 | struct ion_iommu_heap *h = | ||
208 | container_of(heap, struct ion_iommu_heap, heap); | ||
209 | unsigned long da; | ||
210 | struct scatterlist *sgl; | ||
211 | |||
212 | len = round_up(len, PAGE_SIZE); | ||
213 | |||
214 | da = gen_pool_alloc(h->pool, len); | ||
215 | if (!da) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | buf->priv_virt = (void *)da; | ||
219 | buf->size = len; | ||
220 | |||
221 | WARN_ON(!IS_ALIGNED(da, PAGE_SIZE)); | ||
222 | |||
223 | err = ion_buffer_allocate(buf); | ||
224 | if (err) | ||
225 | goto err_alloc_buf; | ||
226 | |||
227 | sgl = iommu_heap_map_dma(heap, buf); | ||
228 | if (IS_ERR_OR_NULL(sgl)) | ||
229 | goto err_heap_map_dma; | ||
230 | buf->vaddr = 0; | ||
231 | return 0; | ||
232 | |||
233 | err_heap_map_dma: | ||
234 | ion_buffer_free(buf); | ||
235 | err_alloc_buf: | ||
236 | gen_pool_free(h->pool, da, len); | ||
237 | buf->size = 0; | ||
238 | buf->pages = NULL; | ||
239 | buf->priv_virt = NULL; | ||
240 | return err; | ||
241 | } | ||
242 | |||
243 | static void iommu_heap_free(struct ion_buffer *buf) | ||
244 | { | ||
245 | struct ion_heap *heap = buf->heap; | ||
246 | struct ion_iommu_heap *h = | ||
247 | container_of(heap, struct ion_iommu_heap, heap); | ||
248 | void *da = buf->priv_virt; | ||
249 | |||
250 | iommu_heap_unmap_dma(heap, buf); | ||
251 | ion_buffer_free(buf); | ||
252 | gen_pool_free(h->pool, (unsigned long)da, buf->size); | ||
253 | |||
254 | buf->pages = NULL; | ||
255 | buf->priv_virt = NULL; | ||
256 | pr_debug("da:%p\n", da); | ||
257 | } | ||
258 | |||
259 | static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf, | ||
260 | ion_phys_addr_t *addr, size_t *len) | ||
261 | { | ||
262 | *addr = (unsigned long)buf->priv_virt; | ||
263 | *len = buf->size; | ||
264 | pr_debug("da:%08lx(%x)\n", *addr, *len); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static void *iommu_heap_map_kernel(struct ion_heap *heap, | ||
269 | struct ion_buffer *buf) | ||
270 | { | ||
271 | int npages = NUM_PAGES(buf); | ||
272 | |||
273 | BUG_ON(!buf->pages); | ||
274 | buf->vaddr = vm_map_ram(buf->pages, npages, -1, | ||
275 | pgprot_noncached(pgprot_kernel)); | ||
276 | pr_debug("va:%p\n", buf->vaddr); | ||
277 | WARN_ON(!buf->vaddr); | ||
278 | return buf->vaddr; | ||
279 | } | ||
280 | |||
281 | static void iommu_heap_unmap_kernel(struct ion_heap *heap, | ||
282 | struct ion_buffer *buf) | ||
283 | { | ||
284 | int npages = NUM_PAGES(buf); | ||
285 | |||
286 | BUG_ON(!buf->pages); | ||
287 | WARN_ON(!buf->vaddr); | ||
288 | vm_unmap_ram(buf->vaddr, npages); | ||
289 | buf->vaddr = NULL; | ||
290 | pr_debug("va:%p\n", buf->vaddr); | ||
291 | } | ||
292 | |||
293 | static int iommu_heap_map_user(struct ion_heap *mapper, | ||
294 | struct ion_buffer *buf, | ||
295 | struct vm_area_struct *vma) | ||
296 | { | ||
297 | int i = vma->vm_pgoff >> PAGE_SHIFT; | ||
298 | unsigned long uaddr = vma->vm_start; | ||
299 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
300 | |||
301 | pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end); | ||
302 | BUG_ON(!buf->pages); | ||
303 | |||
304 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
305 | do { | ||
306 | int ret; | ||
307 | struct page *page = buf->pages[i++]; | ||
308 | |||
309 | ret = vm_insert_page(vma, uaddr, page); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | |||
313 | uaddr += PAGE_SIZE; | ||
314 | usize -= PAGE_SIZE; | ||
315 | } while (usize > 0); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | static struct ion_heap_ops iommu_heap_ops = { | ||
321 | .allocate = iommu_heap_allocate, | ||
322 | .free = iommu_heap_free, | ||
323 | .phys = iommu_heap_phys, | ||
324 | .map_dma = iommu_heap_map_dma, | ||
325 | .unmap_dma = iommu_heap_unmap_dma, | ||
326 | .map_kernel = iommu_heap_map_kernel, | ||
327 | .unmap_kernel = iommu_heap_unmap_kernel, | ||
328 | .map_user = iommu_heap_map_user, | ||
329 | }; | ||
330 | |||
331 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data) | ||
332 | { | ||
333 | struct ion_iommu_heap *h; | ||
334 | int err; | ||
335 | |||
336 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
337 | if (!h) { | ||
338 | err = -ENOMEM; | ||
339 | goto err_heap; | ||
340 | } | ||
341 | |||
342 | h->pool = gen_pool_create(12, -1); | ||
343 | if (!h->pool) { | ||
344 | err = -ENOMEM; | ||
345 | goto err_genpool; | ||
346 | } | ||
347 | gen_pool_add(h->pool, data->base, data->size, -1); | ||
348 | |||
349 | h->heap.ops = &iommu_heap_ops; | ||
350 | h->domain = iommu_domain_alloc(&platform_bus_type); | ||
351 | h->dev = data->priv; | ||
352 | if (!h->domain) { | ||
353 | err = -ENOMEM; | ||
354 | goto err_iommu_alloc; | ||
355 | } | ||
356 | |||
357 | err = iommu_attach_device(h->domain, h->dev); | ||
358 | if (err) | ||
359 | goto err_iommu_attach; | ||
360 | |||
361 | return &h->heap; | ||
362 | |||
363 | err_iommu_attach: | ||
364 | iommu_domain_free(h->domain); | ||
365 | err_iommu_alloc: | ||
366 | gen_pool_destroy(h->pool); | ||
367 | err_genpool: | ||
368 | kfree(h); | ||
369 | err_heap: | ||
370 | return ERR_PTR(err); | ||
371 | } | ||
372 | |||
373 | void ion_iommu_heap_destroy(struct ion_heap *heap) | ||
374 | { | ||
375 | struct ion_iommu_heap *h = | ||
376 | container_of(heap, struct ion_iommu_heap, heap); | ||
377 | |||
378 | iommu_detach_device(h->domain, h->dev); | ||
379 | gen_pool_destroy(h->pool); | ||
380 | iommu_domain_free(h->domain); | ||
381 | kfree(h); | ||
382 | } | ||
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 00000000000..bfe26da9c04 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_priv.h | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef _ION_PRIV_H | ||
18 | #define _ION_PRIV_H | ||
19 | |||
20 | #include <linux/kref.h> | ||
21 | #include <linux/mm_types.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/ion.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | |||
27 | struct ion_mapping; | ||
28 | |||
29 | struct ion_dma_mapping { | ||
30 | struct kref ref; | ||
31 | struct scatterlist *sglist; | ||
32 | }; | ||
33 | |||
34 | struct ion_kernel_mapping { | ||
35 | struct kref ref; | ||
36 | void *vaddr; | ||
37 | }; | ||
38 | |||
39 | /** | ||
40 | * struct ion_device - the metadata of the ion device node | ||
41 | * @dev: the actual misc device | ||
42 | * @buffers: an rb tree of all the existing buffers | ||
43 | * @lock: lock protecting the buffers & heaps trees | ||
44 | * @heaps: list of all the heaps in the system | ||
45 | * @user_clients: list of all the clients created from userspace | ||
46 | */ | ||
47 | struct ion_device { | ||
48 | struct miscdevice dev; | ||
49 | struct rb_root buffers; | ||
50 | struct mutex lock; | ||
51 | struct rb_root heaps; | ||
52 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, | ||
53 | unsigned long arg); | ||
54 | struct rb_root user_clients; | ||
55 | struct rb_root kernel_clients; | ||
56 | struct dentry *debug_root; | ||
57 | }; | ||
58 | |||
59 | /** | ||
60 | * struct ion_client - a process/hw block local address space | ||
61 | * @ref: for reference counting the client | ||
62 | * @node: node in the tree of all clients | ||
63 | * @dev: backpointer to ion device | ||
64 | * @handles: an rb tree of all the handles in this client | ||
65 | * @lock: lock protecting the tree of handles | ||
66 | * @heap_mask: mask of all supported heaps | ||
67 | * @name: used for debugging | ||
68 | * @task: used for debugging | ||
69 | * | ||
70 | * A client represents a list of buffers this client may access. | ||
71 | * The mutex stored here is used to protect both handles tree | ||
72 | * as well as the handles themselves, and should be held while modifying either. | ||
73 | */ | ||
74 | struct ion_client { | ||
75 | struct kref ref; | ||
76 | struct rb_node node; | ||
77 | struct ion_device *dev; | ||
78 | struct rb_root handles; | ||
79 | struct mutex lock; | ||
80 | unsigned int heap_mask; | ||
81 | const char *name; | ||
82 | struct task_struct *task; | ||
83 | pid_t pid; | ||
84 | struct dentry *debug_root; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * ion_handle - a client local reference to a buffer | ||
89 | * @ref: reference count | ||
90 | * @client: back pointer to the client the buffer resides in | ||
91 | * @buffer: pointer to the buffer | ||
92 | * @node: node in the client's handle rbtree | ||
93 | * @kmap_cnt: count of times this client has mapped to kernel | ||
94 | * @dmap_cnt: count of times this client has mapped for dma | ||
95 | * @usermap_cnt: count of times this client has mapped for userspace | ||
96 | * | ||
97 | * Modifications to node, map_cnt or mapping should be protected by the | ||
98 | * lock in the client. Other fields are never changed after initialization. | ||
99 | */ | ||
100 | struct ion_handle { | ||
101 | struct kref ref; | ||
102 | struct ion_client *client; | ||
103 | struct ion_buffer *buffer; | ||
104 | struct rb_node node; | ||
105 | unsigned int kmap_cnt; | ||
106 | unsigned int dmap_cnt; | ||
107 | unsigned int usermap_cnt; | ||
108 | }; | ||
109 | |||
110 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle); | ||
111 | |||
112 | void ion_buffer_get(struct ion_buffer *buffer); | ||
113 | |||
114 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); | ||
115 | |||
116 | struct ion_client *ion_client_get_file(int fd); | ||
117 | |||
118 | void ion_client_get(struct ion_client *client); | ||
119 | |||
120 | int ion_client_put(struct ion_client *client); | ||
121 | |||
122 | void ion_handle_get(struct ion_handle *handle); | ||
123 | |||
124 | int ion_handle_put(struct ion_handle *handle); | ||
125 | |||
126 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
127 | struct ion_buffer *buffer); | ||
128 | |||
129 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle); | ||
130 | |||
131 | int ion_remap_dma(struct ion_client *client, | ||
132 | struct ion_handle *handle, | ||
133 | unsigned long addr); | ||
134 | /** | ||
135 | * struct ion_buffer - metadata for a particular buffer | ||
136 | * @ref: refernce count | ||
137 | * @node: node in the ion_device buffers tree | ||
138 | * @dev: back pointer to the ion_device | ||
139 | * @heap: back pointer to the heap the buffer came from | ||
140 | * @flags: buffer specific flags | ||
141 | * @size: size of the buffer | ||
142 | * @priv_virt: private data to the buffer representable as | ||
143 | * a void * | ||
144 | * @priv_phys: private data to the buffer representable as | ||
145 | * an ion_phys_addr_t (and someday a phys_addr_t) | ||
146 | * @lock: protects the buffers cnt fields | ||
147 | * @kmap_cnt: number of times the buffer is mapped to the kernel | ||
148 | * @vaddr: the kenrel mapping if kmap_cnt is not zero | ||
149 | * @dmap_cnt: number of times the buffer is mapped for dma | ||
150 | * @sglist: the scatterlist for the buffer is dmap_cnt is not zero | ||
151 | * @pages: list for allocated pages for the buffer | ||
152 | */ | ||
153 | struct ion_buffer { | ||
154 | struct kref ref; | ||
155 | struct rb_node node; | ||
156 | struct ion_device *dev; | ||
157 | struct ion_heap *heap; | ||
158 | unsigned long flags; | ||
159 | size_t size; | ||
160 | union { | ||
161 | void *priv_virt; | ||
162 | ion_phys_addr_t priv_phys; | ||
163 | }; | ||
164 | struct mutex lock; | ||
165 | int kmap_cnt; | ||
166 | void *vaddr; | ||
167 | int dmap_cnt; | ||
168 | struct scatterlist *sglist; | ||
169 | struct page **pages; | ||
170 | }; | ||
171 | |||
172 | /** | ||
173 | * struct ion_heap_ops - ops to operate on a given heap | ||
174 | * @allocate: allocate memory | ||
175 | * @free: free memory | ||
176 | * @phys get physical address of a buffer (only define on | ||
177 | * physically contiguous heaps) | ||
178 | * @map_dma map the memory for dma to a scatterlist | ||
179 | * @unmap_dma unmap the memory for dma | ||
180 | * @map_kernel map memory to the kernel | ||
181 | * @unmap_kernel unmap memory to the kernel | ||
182 | * @map_user map memory to userspace | ||
183 | */ | ||
184 | struct ion_heap_ops { | ||
185 | int (*allocate) (struct ion_heap *heap, | ||
186 | struct ion_buffer *buffer, unsigned long len, | ||
187 | unsigned long align, unsigned long flags); | ||
188 | void (*free) (struct ion_buffer *buffer); | ||
189 | int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, | ||
190 | ion_phys_addr_t *addr, size_t *len); | ||
191 | struct scatterlist *(*map_dma) (struct ion_heap *heap, | ||
192 | struct ion_buffer *buffer); | ||
193 | void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
194 | void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
195 | void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
196 | int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, | ||
197 | struct vm_area_struct *vma); | ||
198 | }; | ||
199 | |||
200 | /** | ||
201 | * struct ion_heap - represents a heap in the system | ||
202 | * @node: rb node to put the heap on the device's tree of heaps | ||
203 | * @dev: back pointer to the ion_device | ||
204 | * @type: type of heap | ||
205 | * @ops: ops struct as above | ||
206 | * @id: id of heap, also indicates priority of this heap when | ||
207 | * allocating. These are specified by platform data and | ||
208 | * MUST be unique | ||
209 | * @name: used for debugging | ||
210 | * | ||
211 | * Represents a pool of memory from which buffers can be made. In some | ||
212 | * systems the only heap is regular system memory allocated via vmalloc. | ||
213 | * On others, some blocks might require large physically contiguous buffers | ||
214 | * that are allocated from a specially reserved heap. | ||
215 | */ | ||
216 | struct ion_heap { | ||
217 | struct rb_node node; | ||
218 | struct ion_device *dev; | ||
219 | enum ion_heap_type type; | ||
220 | struct ion_heap_ops *ops; | ||
221 | int id; | ||
222 | const char *name; | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * ion_device_create - allocates and returns an ion device | ||
227 | * @custom_ioctl: arch specific ioctl function if applicable | ||
228 | * | ||
229 | * returns a valid device or -PTR_ERR | ||
230 | */ | ||
231 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
232 | (struct ion_client *client, | ||
233 | unsigned int cmd, | ||
234 | unsigned long arg)); | ||
235 | |||
236 | /** | ||
237 | * ion_device_destroy - free and device and it's resource | ||
238 | * @dev: the device | ||
239 | */ | ||
240 | void ion_device_destroy(struct ion_device *dev); | ||
241 | |||
242 | /** | ||
243 | * ion_device_add_heap - adds a heap to the ion device | ||
244 | * @dev: the device | ||
245 | * @heap: the heap to add | ||
246 | */ | ||
247 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); | ||
248 | |||
249 | /** | ||
250 | * functions for creating and destroying the built in ion heaps. | ||
251 | * architectures can add their own custom architecture specific | ||
252 | * heaps as appropriate. | ||
253 | */ | ||
254 | |||
255 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); | ||
256 | void ion_heap_destroy(struct ion_heap *); | ||
257 | |||
258 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); | ||
259 | void ion_system_heap_destroy(struct ion_heap *); | ||
260 | |||
261 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); | ||
262 | void ion_system_contig_heap_destroy(struct ion_heap *); | ||
263 | |||
264 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); | ||
265 | void ion_carveout_heap_destroy(struct ion_heap *); | ||
266 | /** | ||
267 | * kernel api to allocate/free from carveout -- used when carveout is | ||
268 | * used to back an architecture specific custom heap | ||
269 | */ | ||
270 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, | ||
271 | unsigned long align); | ||
272 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
273 | unsigned long size); | ||
274 | |||
275 | #ifdef CONFIG_ION_IOMMU | ||
276 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); | ||
277 | void ion_iommu_heap_destroy(struct ion_heap *); | ||
278 | #else | ||
279 | static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *) | ||
280 | { | ||
281 | return NULL; | ||
282 | } | ||
283 | static inline void ion_iommu_heap_destroy(struct ion_heap *) | ||
284 | { | ||
285 | } | ||
286 | #endif | ||
287 | /** | ||
288 | * The carveout heap returns physical addresses, since 0 may be a valid | ||
289 | * physical address, this is used to indicate allocation failed | ||
290 | */ | ||
291 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 | ||
292 | |||
293 | #endif /* _ION_PRIV_H */ | ||
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 00000000000..c046cf1a321 --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_system_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "ion_priv.h" | ||
24 | |||
25 | static int ion_system_heap_allocate(struct ion_heap *heap, | ||
26 | struct ion_buffer *buffer, | ||
27 | unsigned long size, unsigned long align, | ||
28 | unsigned long flags) | ||
29 | { | ||
30 | buffer->priv_virt = vmalloc_user(size); | ||
31 | if (!buffer->priv_virt) | ||
32 | return -ENOMEM; | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void ion_system_heap_free(struct ion_buffer *buffer) | ||
37 | { | ||
38 | vfree(buffer->priv_virt); | ||
39 | } | ||
40 | |||
41 | struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, | ||
42 | struct ion_buffer *buffer) | ||
43 | { | ||
44 | struct scatterlist *sglist; | ||
45 | struct page *page; | ||
46 | int i; | ||
47 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | ||
48 | void *vaddr = buffer->priv_virt; | ||
49 | |||
50 | sglist = vmalloc(npages * sizeof(struct scatterlist)); | ||
51 | if (!sglist) | ||
52 | return ERR_PTR(-ENOMEM); | ||
53 | memset(sglist, 0, npages * sizeof(struct scatterlist)); | ||
54 | sg_init_table(sglist, npages); | ||
55 | for (i = 0; i < npages; i++) { | ||
56 | page = vmalloc_to_page(vaddr); | ||
57 | if (!page) | ||
58 | goto end; | ||
59 | sg_set_page(&sglist[i], page, PAGE_SIZE, 0); | ||
60 | vaddr += PAGE_SIZE; | ||
61 | } | ||
62 | /* XXX do cache maintenance for dma? */ | ||
63 | return sglist; | ||
64 | end: | ||
65 | vfree(sglist); | ||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | void ion_system_heap_unmap_dma(struct ion_heap *heap, | ||
70 | struct ion_buffer *buffer) | ||
71 | { | ||
72 | /* XXX undo cache maintenance for dma? */ | ||
73 | if (buffer->sglist) | ||
74 | vfree(buffer->sglist); | ||
75 | } | ||
76 | |||
77 | void *ion_system_heap_map_kernel(struct ion_heap *heap, | ||
78 | struct ion_buffer *buffer) | ||
79 | { | ||
80 | return buffer->priv_virt; | ||
81 | } | ||
82 | |||
83 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, | ||
84 | struct ion_buffer *buffer) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
89 | struct vm_area_struct *vma) | ||
90 | { | ||
91 | return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); | ||
92 | } | ||
93 | |||
94 | static struct ion_heap_ops vmalloc_ops = { | ||
95 | .allocate = ion_system_heap_allocate, | ||
96 | .free = ion_system_heap_free, | ||
97 | .map_dma = ion_system_heap_map_dma, | ||
98 | .unmap_dma = ion_system_heap_unmap_dma, | ||
99 | .map_kernel = ion_system_heap_map_kernel, | ||
100 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
101 | .map_user = ion_system_heap_map_user, | ||
102 | }; | ||
103 | |||
104 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) | ||
105 | { | ||
106 | struct ion_heap *heap; | ||
107 | |||
108 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
109 | if (!heap) | ||
110 | return ERR_PTR(-ENOMEM); | ||
111 | heap->ops = &vmalloc_ops; | ||
112 | heap->type = ION_HEAP_TYPE_SYSTEM; | ||
113 | return heap; | ||
114 | } | ||
115 | |||
116 | void ion_system_heap_destroy(struct ion_heap *heap) | ||
117 | { | ||
118 | kfree(heap); | ||
119 | } | ||
120 | |||
121 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, | ||
122 | struct ion_buffer *buffer, | ||
123 | unsigned long len, | ||
124 | unsigned long align, | ||
125 | unsigned long flags) | ||
126 | { | ||
127 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); | ||
128 | if (!buffer->priv_virt) | ||
129 | return -ENOMEM; | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | void ion_system_contig_heap_free(struct ion_buffer *buffer) | ||
134 | { | ||
135 | kfree(buffer->priv_virt); | ||
136 | } | ||
137 | |||
138 | static int ion_system_contig_heap_phys(struct ion_heap *heap, | ||
139 | struct ion_buffer *buffer, | ||
140 | ion_phys_addr_t *addr, size_t *len) | ||
141 | { | ||
142 | *addr = virt_to_phys(buffer->priv_virt); | ||
143 | *len = buffer->size; | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, | ||
148 | struct ion_buffer *buffer) | ||
149 | { | ||
150 | struct scatterlist *sglist; | ||
151 | |||
152 | sglist = vmalloc(sizeof(struct scatterlist)); | ||
153 | if (!sglist) | ||
154 | return ERR_PTR(-ENOMEM); | ||
155 | sg_init_table(sglist, 1); | ||
156 | sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); | ||
157 | return sglist; | ||
158 | } | ||
159 | |||
160 | int ion_system_contig_heap_map_user(struct ion_heap *heap, | ||
161 | struct ion_buffer *buffer, | ||
162 | struct vm_area_struct *vma) | ||
163 | { | ||
164 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); | ||
165 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
166 | vma->vm_end - vma->vm_start, | ||
167 | vma->vm_page_prot); | ||
168 | |||
169 | } | ||
170 | |||
171 | static struct ion_heap_ops kmalloc_ops = { | ||
172 | .allocate = ion_system_contig_heap_allocate, | ||
173 | .free = ion_system_contig_heap_free, | ||
174 | .phys = ion_system_contig_heap_phys, | ||
175 | .map_dma = ion_system_contig_heap_map_dma, | ||
176 | .unmap_dma = ion_system_heap_unmap_dma, | ||
177 | .map_kernel = ion_system_heap_map_kernel, | ||
178 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
179 | .map_user = ion_system_contig_heap_map_user, | ||
180 | }; | ||
181 | |||
182 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) | ||
183 | { | ||
184 | struct ion_heap *heap; | ||
185 | |||
186 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
187 | if (!heap) | ||
188 | return ERR_PTR(-ENOMEM); | ||
189 | heap->ops = &kmalloc_ops; | ||
190 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; | ||
191 | return heap; | ||
192 | } | ||
193 | |||
194 | void ion_system_contig_heap_destroy(struct ion_heap *heap) | ||
195 | { | ||
196 | kfree(heap); | ||
197 | } | ||
198 | |||
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 00000000000..692458e07b5 --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_system_mapper.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include <linux/memory.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "ion_priv.h" | ||
24 | /* | ||
25 | * This mapper is valid for any heap that allocates memory that already has | ||
26 | * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, | ||
27 | * pages obtained via io_remap, etc. | ||
28 | */ | ||
29 | static void *ion_kernel_mapper_map(struct ion_mapper *mapper, | ||
30 | struct ion_buffer *buffer, | ||
31 | struct ion_mapping **mapping) | ||
32 | { | ||
33 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
34 | pr_err("%s: attempting to map an unsupported heap\n", __func__); | ||
35 | return ERR_PTR(-EINVAL); | ||
36 | } | ||
37 | /* XXX REVISIT ME!!! */ | ||
38 | *((unsigned long *)mapping) = (unsigned long)buffer->priv; | ||
39 | return buffer->priv; | ||
40 | } | ||
41 | |||
42 | static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, | ||
43 | struct ion_buffer *buffer, | ||
44 | struct ion_mapping *mapping) | ||
45 | { | ||
46 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) | ||
47 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
48 | __func__); | ||
49 | } | ||
50 | |||
51 | static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, | ||
52 | struct ion_buffer *buffer, | ||
53 | struct ion_mapping *mapping) | ||
54 | { | ||
55 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
56 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
57 | __func__); | ||
58 | return ERR_PTR(-EINVAL); | ||
59 | } | ||
60 | return buffer->priv; | ||
61 | } | ||
62 | |||
63 | static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, | ||
64 | struct ion_buffer *buffer, | ||
65 | struct vm_area_struct *vma, | ||
66 | struct ion_mapping *mapping) | ||
67 | { | ||
68 | int ret; | ||
69 | |||
70 | switch (buffer->heap->type) { | ||
71 | case ION_HEAP_KMALLOC: | ||
72 | { | ||
73 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); | ||
74 | ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
75 | vma->vm_end - vma->vm_start, | ||
76 | vma->vm_page_prot); | ||
77 | break; | ||
78 | } | ||
79 | case ION_HEAP_VMALLOC: | ||
80 | ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); | ||
81 | break; | ||
82 | default: | ||
83 | pr_err("%s: attempting to map unsupported heap to userspace\n", | ||
84 | __func__); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | return ret; | ||
89 | } | ||
90 | |||
91 | static struct ion_mapper_ops ops = { | ||
92 | .map = ion_kernel_mapper_map, | ||
93 | .map_kernel = ion_kernel_mapper_map_kernel, | ||
94 | .map_user = ion_kernel_mapper_map_user, | ||
95 | .unmap = ion_kernel_mapper_unmap, | ||
96 | }; | ||
97 | |||
98 | struct ion_mapper *ion_system_mapper_create(void) | ||
99 | { | ||
100 | struct ion_mapper *mapper; | ||
101 | mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); | ||
102 | if (!mapper) | ||
103 | return ERR_PTR(-ENOMEM); | ||
104 | mapper->type = ION_SYSTEM_MAPPER; | ||
105 | mapper->ops = &ops; | ||
106 | mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); | ||
107 | return mapper; | ||
108 | } | ||
109 | |||
110 | void ion_system_mapper_destroy(struct ion_mapper *mapper) | ||
111 | { | ||
112 | kfree(mapper); | ||
113 | } | ||
114 | |||
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 00000000000..11cd003fb08 --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y += tegra_ion.o | |||
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000..2252079279e --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c | |||
@@ -0,0 +1,599 @@ | |||
1 | /* | ||
2 | * drivers/gpu/tegra/tegra_ion.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * Copyright (C) 2011, NVIDIA Corporation. | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
19 | |||
20 | #include <linux/err.h> | ||
21 | #include <linux/ion.h> | ||
22 | #include <linux/tegra_ion.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/io.h> | ||
28 | #include "../ion_priv.h" | ||
29 | |||
30 | #define CLIENT_HEAP_MASK 0xFFFFFFFF | ||
31 | #define HEAP_FLAGS 0xFF | ||
32 | |||
33 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
34 | #include "mach/nvmap.h" | ||
35 | struct nvmap_device *nvmap_dev; | ||
36 | #endif | ||
37 | |||
38 | static struct ion_device *idev; | ||
39 | static int num_heaps; | ||
40 | static struct ion_heap **heaps; | ||
41 | |||
42 | static int tegra_ion_pin(struct ion_client *client, | ||
43 | unsigned int cmd, | ||
44 | unsigned long arg) | ||
45 | { | ||
46 | struct tegra_ion_pin_data data; | ||
47 | int ret; | ||
48 | struct ion_handle *on_stack[16]; | ||
49 | struct ion_handle **refs = on_stack; | ||
50 | int i; | ||
51 | bool valid_handle; | ||
52 | |||
53 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
54 | return -EFAULT; | ||
55 | if (data.count) { | ||
56 | size_t bytes = data.count * sizeof(struct ion_handle *); | ||
57 | |||
58 | if (data.count > ARRAY_SIZE(on_stack)) | ||
59 | refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL); | ||
60 | else | ||
61 | refs = on_stack; | ||
62 | if (!refs) | ||
63 | return -ENOMEM; | ||
64 | if (copy_from_user(refs, (void *)data.handles, bytes)) { | ||
65 | ret = -EFAULT; | ||
66 | goto err; | ||
67 | } | ||
68 | } else | ||
69 | return -EINVAL; | ||
70 | |||
71 | mutex_lock(&client->lock); | ||
72 | for (i = 0; i < data.count; i++) { | ||
73 | /* Ignore NULL pointers during unpin operation. */ | ||
74 | if (!refs[i] && cmd == TEGRA_ION_UNPIN) | ||
75 | continue; | ||
76 | valid_handle = ion_handle_validate(client, refs[i]); | ||
77 | if (!valid_handle) { | ||
78 | WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]); | ||
79 | mutex_unlock(&client->lock); | ||
80 | ret = -EINVAL; | ||
81 | goto err; | ||
82 | } | ||
83 | } | ||
84 | mutex_unlock(&client->lock); | ||
85 | |||
86 | if (cmd == TEGRA_ION_PIN) { | ||
87 | ion_phys_addr_t addr; | ||
88 | size_t len; | ||
89 | |||
90 | for (i = 0; i < data.count; i++) { | ||
91 | ret = ion_phys(client, refs[i], &addr, &len); | ||
92 | if (ret) | ||
93 | goto err; | ||
94 | ion_handle_get(refs[i]); | ||
95 | ret = put_user(addr, &data.addr[i]); | ||
96 | if (ret) | ||
97 | return ret; | ||
98 | } | ||
99 | } else if (cmd == TEGRA_ION_UNPIN) { | ||
100 | for (i = 0; i < data.count; i++) { | ||
101 | if (refs[i]) | ||
102 | ion_handle_put(refs[i]); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | err: | ||
107 | if (ret) { | ||
108 | pr_err("error, ret=0x%x", ret); | ||
109 | /* FIXME: undo pinning. */ | ||
110 | } | ||
111 | if (refs != on_stack) | ||
112 | kfree(refs); | ||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | static int tegra_ion_alloc_from_id(struct ion_client *client, | ||
117 | unsigned int cmd, | ||
118 | unsigned long arg) | ||
119 | { | ||
120 | struct tegra_ion_id_data data; | ||
121 | struct ion_buffer *buffer; | ||
122 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
123 | |||
124 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
125 | return -EFAULT; | ||
126 | buffer = (struct ion_buffer *)data.id; | ||
127 | data.handle = ion_import(client, buffer); | ||
128 | data.size = buffer->size; | ||
129 | if (put_user(data.handle, &user_data->handle)) | ||
130 | return -EFAULT; | ||
131 | if (put_user(data.size, &user_data->size)) | ||
132 | return -EFAULT; | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int tegra_ion_get_id(struct ion_client *client, | ||
137 | unsigned int cmd, | ||
138 | unsigned long arg) | ||
139 | { | ||
140 | bool valid_handle; | ||
141 | struct tegra_ion_id_data data; | ||
142 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
143 | |||
144 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
145 | return -EFAULT; | ||
146 | |||
147 | mutex_lock(&client->lock); | ||
148 | valid_handle = ion_handle_validate(client, data.handle); | ||
149 | mutex_unlock(&client->lock); | ||
150 | |||
151 | if (!valid_handle) { | ||
152 | WARN(1, "invalid handle passed\n"); | ||
153 | return -EINVAL; | ||
154 | } | ||
155 | |||
156 | pr_debug("h=0x%x, b=0x%x, bref=%d", | ||
157 | (u32)data.handle, (u32)data.handle->buffer, | ||
158 | atomic_read(&data.handle->buffer->ref.refcount)); | ||
159 | if (put_user((unsigned long)ion_handle_buffer(data.handle), | ||
160 | &user_data->id)) | ||
161 | return -EFAULT; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int tegra_ion_cache_maint(struct ion_client *client, | ||
166 | unsigned int cmd, | ||
167 | unsigned long arg) | ||
168 | { | ||
169 | wmb(); | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int tegra_ion_rw(struct ion_client *client, | ||
174 | unsigned int cmd, | ||
175 | unsigned long arg) | ||
176 | { | ||
177 | bool valid_handle; | ||
178 | struct tegra_ion_rw_data data; | ||
179 | char *kern_addr, *src; | ||
180 | int ret = 0; | ||
181 | size_t copied = 0; | ||
182 | |||
183 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
184 | return -EFAULT; | ||
185 | |||
186 | if (!data.handle || !data.addr || !data.count || !data.elem_size) | ||
187 | return -EINVAL; | ||
188 | |||
189 | mutex_lock(&client->lock); | ||
190 | valid_handle = ion_handle_validate(client, data.handle); | ||
191 | mutex_unlock(&client->lock); | ||
192 | |||
193 | if (!valid_handle) { | ||
194 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
195 | return -EINVAL; | ||
196 | } | ||
197 | |||
198 | if (data.elem_size == data.mem_stride && | ||
199 | data.elem_size == data.user_stride) { | ||
200 | data.elem_size *= data.count; | ||
201 | data.mem_stride = data.elem_size; | ||
202 | data.user_stride = data.elem_size; | ||
203 | data.count = 1; | ||
204 | } | ||
205 | |||
206 | kern_addr = ion_map_kernel(client, data.handle); | ||
207 | |||
208 | while (data.count--) { | ||
209 | if (data.offset + data.elem_size > data.handle->buffer->size) { | ||
210 | WARN(1, "read/write outside of handle\n"); | ||
211 | ret = -EFAULT; | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | src = kern_addr + data.offset; | ||
216 | if (cmd == TEGRA_ION_READ) | ||
217 | ret = copy_to_user((void *)data.addr, | ||
218 | src, data.elem_size); | ||
219 | else | ||
220 | ret = copy_from_user(src, | ||
221 | (void *)data.addr, data.elem_size); | ||
222 | |||
223 | if (ret) | ||
224 | break; | ||
225 | |||
226 | copied += data.elem_size; | ||
227 | data.addr += data.user_stride; | ||
228 | data.offset += data.mem_stride; | ||
229 | } | ||
230 | |||
231 | ion_unmap_kernel(client, data.handle); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | static int tegra_ion_get_param(struct ion_client *client, | ||
236 | unsigned int cmd, | ||
237 | unsigned long arg) | ||
238 | { | ||
239 | bool valid_handle; | ||
240 | struct tegra_ion_get_params_data data; | ||
241 | struct tegra_ion_get_params_data *user_data = | ||
242 | (struct tegra_ion_get_params_data *)arg; | ||
243 | struct ion_buffer *buffer; | ||
244 | |||
245 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
246 | return -EFAULT; | ||
247 | |||
248 | mutex_lock(&client->lock); | ||
249 | valid_handle = ion_handle_validate(client, data.handle); | ||
250 | mutex_unlock(&client->lock); | ||
251 | |||
252 | if (!valid_handle) { | ||
253 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | buffer = ion_handle_buffer(data.handle); | ||
258 | data.align = 4096; | ||
259 | data.heap = 1; | ||
260 | ion_phys(client, data.handle, &data.addr, &data.size); | ||
261 | |||
262 | if (copy_to_user(user_data, &data, sizeof(data))) | ||
263 | return -EFAULT; | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static long tegra_ion_ioctl(struct ion_client *client, | ||
269 | unsigned int cmd, | ||
270 | unsigned long arg) | ||
271 | { | ||
272 | int ret = -ENOTTY; | ||
273 | |||
274 | switch (cmd) { | ||
275 | case TEGRA_ION_ALLOC_FROM_ID: | ||
276 | ret = tegra_ion_alloc_from_id(client, cmd, arg); | ||
277 | break; | ||
278 | case TEGRA_ION_GET_ID: | ||
279 | ret = tegra_ion_get_id(client, cmd, arg); | ||
280 | break; | ||
281 | case TEGRA_ION_PIN: | ||
282 | case TEGRA_ION_UNPIN: | ||
283 | ret = tegra_ion_pin(client, cmd, arg); | ||
284 | break; | ||
285 | case TEGRA_ION_CACHE_MAINT: | ||
286 | ret = tegra_ion_cache_maint(client, cmd, arg); | ||
287 | break; | ||
288 | case TEGRA_ION_READ: | ||
289 | case TEGRA_ION_WRITE: | ||
290 | ret = tegra_ion_rw(client, cmd, arg); | ||
291 | break; | ||
292 | case TEGRA_ION_GET_PARAM: | ||
293 | ret = tegra_ion_get_param(client, cmd, arg); | ||
294 | break; | ||
295 | default: | ||
296 | WARN(1, "Unknown custom ioctl\n"); | ||
297 | return -ENOTTY; | ||
298 | } | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | int tegra_ion_probe(struct platform_device *pdev) | ||
303 | { | ||
304 | struct ion_platform_data *pdata = pdev->dev.platform_data; | ||
305 | int i; | ||
306 | |||
307 | num_heaps = pdata->nr; | ||
308 | |||
309 | heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); | ||
310 | |||
311 | idev = ion_device_create(tegra_ion_ioctl); | ||
312 | if (IS_ERR_OR_NULL(idev)) { | ||
313 | kfree(heaps); | ||
314 | return PTR_ERR(idev); | ||
315 | } | ||
316 | |||
317 | /* create the heaps as specified in the board file */ | ||
318 | for (i = 0; i < num_heaps; i++) { | ||
319 | struct ion_platform_heap *heap_data = &pdata->heaps[i]; | ||
320 | |||
321 | heaps[i] = ion_heap_create(heap_data); | ||
322 | if (IS_ERR_OR_NULL(heaps[i])) { | ||
323 | pr_warn("%s(type:%d id:%d) isn't supported\n", | ||
324 | heap_data->name, | ||
325 | heap_data->type, heap_data->id); | ||
326 | continue; | ||
327 | } | ||
328 | ion_device_add_heap(idev, heaps[i]); | ||
329 | } | ||
330 | platform_set_drvdata(pdev, idev); | ||
331 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
332 | nvmap_dev = (struct nvmap_device *)idev; | ||
333 | #endif | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | int tegra_ion_remove(struct platform_device *pdev) | ||
338 | { | ||
339 | struct ion_device *idev = platform_get_drvdata(pdev); | ||
340 | int i; | ||
341 | |||
342 | ion_device_destroy(idev); | ||
343 | for (i = 0; i < num_heaps; i++) | ||
344 | ion_heap_destroy(heaps[i]); | ||
345 | kfree(heaps); | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static struct platform_driver ion_driver = { | ||
350 | .probe = tegra_ion_probe, | ||
351 | .remove = tegra_ion_remove, | ||
352 | .driver = { .name = "ion-tegra" } | ||
353 | }; | ||
354 | |||
355 | static int __init ion_init(void) | ||
356 | { | ||
357 | return platform_driver_register(&ion_driver); | ||
358 | } | ||
359 | |||
360 | static void __exit ion_exit(void) | ||
361 | { | ||
362 | platform_driver_unregister(&ion_driver); | ||
363 | } | ||
364 | |||
365 | fs_initcall(ion_init); | ||
366 | module_exit(ion_exit); | ||
367 | |||
368 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
369 | struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, | ||
370 | const char *name) | ||
371 | { | ||
372 | return ion_client_create(dev, CLIENT_HEAP_MASK, name); | ||
373 | } | ||
374 | |||
375 | struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, | ||
376 | size_t align, unsigned int flags, | ||
377 | unsigned int heap_mask) | ||
378 | { | ||
379 | return ion_alloc(client, size, align, HEAP_FLAGS); | ||
380 | } | ||
381 | |||
382 | void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
383 | { | ||
384 | ion_free(client, r); | ||
385 | } | ||
386 | |||
387 | void *nvmap_mmap(struct nvmap_handle_ref *r) | ||
388 | { | ||
389 | return ion_map_kernel(r->client, r); | ||
390 | } | ||
391 | |||
392 | void nvmap_munmap(struct nvmap_handle_ref *r, void *addr) | ||
393 | { | ||
394 | ion_unmap_kernel(r->client, r); | ||
395 | } | ||
396 | |||
397 | struct nvmap_client *nvmap_client_get_file(int fd) | ||
398 | { | ||
399 | return ion_client_get_file(fd); | ||
400 | } | ||
401 | |||
402 | struct nvmap_client *nvmap_client_get(struct nvmap_client *client) | ||
403 | { | ||
404 | ion_client_get(client); | ||
405 | return client; | ||
406 | } | ||
407 | |||
408 | void nvmap_client_put(struct nvmap_client *c) | ||
409 | { | ||
410 | ion_client_put(c); | ||
411 | } | ||
412 | |||
413 | phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r) | ||
414 | { | ||
415 | ion_phys_addr_t addr; | ||
416 | size_t len; | ||
417 | |||
418 | ion_handle_get(r); | ||
419 | ion_phys(c, r, &addr, &len); | ||
420 | wmb(); | ||
421 | return addr; | ||
422 | } | ||
423 | |||
424 | phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) | ||
425 | { | ||
426 | struct ion_handle *handle; | ||
427 | ion_phys_addr_t addr; | ||
428 | size_t len; | ||
429 | |||
430 | handle = nvmap_convert_handle_u2k(id); | ||
431 | ion_phys(c, handle, &addr, &len); | ||
432 | return addr; | ||
433 | } | ||
434 | |||
435 | void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
436 | { | ||
437 | if (r) | ||
438 | ion_handle_put(r); | ||
439 | } | ||
440 | |||
441 | static int nvmap_reloc_pin_array(struct ion_client *client, | ||
442 | const struct nvmap_pinarray_elem *arr, | ||
443 | int nr, struct ion_handle *gather) | ||
444 | { | ||
445 | struct ion_handle *last_patch = NULL; | ||
446 | void *patch_addr; | ||
447 | ion_phys_addr_t pin_addr; | ||
448 | size_t len; | ||
449 | int i; | ||
450 | |||
451 | for (i = 0; i < nr; i++) { | ||
452 | struct ion_handle *patch; | ||
453 | struct ion_handle *pin; | ||
454 | ion_phys_addr_t reloc_addr; | ||
455 | |||
456 | /* all of the handles are validated and get'ted prior to | ||
457 | * calling this function, so casting is safe here */ | ||
458 | pin = (struct ion_handle *)arr[i].pin_mem; | ||
459 | |||
460 | if (arr[i].patch_mem == (unsigned long)last_patch) { | ||
461 | patch = last_patch; | ||
462 | } else if (arr[i].patch_mem == (unsigned long)gather) { | ||
463 | patch = gather; | ||
464 | } else { | ||
465 | if (last_patch) | ||
466 | ion_handle_put(last_patch); | ||
467 | |||
468 | ion_handle_get((struct ion_handle *)arr[i].patch_mem); | ||
469 | patch = (struct ion_handle *)arr[i].patch_mem; | ||
470 | if (!patch) | ||
471 | return -EPERM; | ||
472 | last_patch = patch; | ||
473 | } | ||
474 | |||
475 | patch_addr = ion_map_kernel(client, patch); | ||
476 | patch_addr = patch_addr + arr[i].patch_offset; | ||
477 | |||
478 | ion_phys(client, pin, &pin_addr, &len); | ||
479 | reloc_addr = pin_addr + arr[i].pin_offset; | ||
480 | __raw_writel(reloc_addr, patch_addr); | ||
481 | ion_unmap_kernel(client, patch); | ||
482 | } | ||
483 | |||
484 | if (last_patch) | ||
485 | ion_handle_put(last_patch); | ||
486 | |||
487 | wmb(); | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, | ||
492 | const struct nvmap_pinarray_elem *arr, int nr, | ||
493 | struct nvmap_handle **unique) | ||
494 | { | ||
495 | int i; | ||
496 | int count = 0; | ||
497 | |||
498 | /* FIXME: take care of duplicate ones & validation. */ | ||
499 | for (i = 0; i < nr; i++) { | ||
500 | unique[i] = (struct nvmap_handle *)arr[i].pin_mem; | ||
501 | nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]); | ||
502 | count++; | ||
503 | } | ||
504 | nvmap_reloc_pin_array((struct ion_client *)client, | ||
505 | arr, nr, (struct ion_handle *)gather); | ||
506 | return nr; | ||
507 | } | ||
508 | |||
509 | void nvmap_unpin_handles(struct nvmap_client *client, | ||
510 | struct nvmap_handle **h, int nr) | ||
511 | { | ||
512 | int i; | ||
513 | |||
514 | for (i = 0; i < nr; i++) | ||
515 | nvmap_unpin(client, h[i]); | ||
516 | } | ||
517 | |||
518 | int nvmap_patch_word(struct nvmap_client *client, | ||
519 | struct nvmap_handle *patch, | ||
520 | u32 patch_offset, u32 patch_value) | ||
521 | { | ||
522 | void *vaddr; | ||
523 | u32 *patch_addr; | ||
524 | |||
525 | vaddr = ion_map_kernel(client, patch); | ||
526 | patch_addr = vaddr + patch_offset; | ||
527 | __raw_writel(patch_value, patch_addr); | ||
528 | wmb(); | ||
529 | ion_unmap_kernel(client, patch); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); | ||
534 | struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, | ||
535 | unsigned long id) | ||
536 | { | ||
537 | struct ion_handle *handle; | ||
538 | |||
539 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
540 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
541 | (u32)id, (u32)handle, (u32)client); | ||
542 | nvmap_handle_get(handle); | ||
543 | return handle; | ||
544 | } | ||
545 | |||
546 | struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, | ||
547 | unsigned long id) | ||
548 | { | ||
549 | struct ion_buffer *buffer; | ||
550 | struct ion_handle *handle; | ||
551 | struct ion_client *ion_client = client; | ||
552 | |||
553 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
554 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
555 | (u32)id, (u32)handle, (u32)client); | ||
556 | buffer = handle->buffer; | ||
557 | |||
558 | handle = ion_handle_create(client, buffer); | ||
559 | |||
560 | mutex_lock(&ion_client->lock); | ||
561 | ion_handle_add(ion_client, handle); | ||
562 | mutex_unlock(&ion_client->lock); | ||
563 | |||
564 | pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle); | ||
565 | return handle; | ||
566 | } | ||
567 | |||
568 | void _nvmap_handle_free(struct nvmap_handle *h) | ||
569 | { | ||
570 | ion_handle_put(h); | ||
571 | } | ||
572 | |||
573 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
574 | size_t size, size_t align, unsigned int flags, unsigned int iova_start) | ||
575 | { | ||
576 | struct ion_handle *h; | ||
577 | |||
578 | h = ion_alloc(client, size, align, 0xFF); | ||
579 | ion_remap_dma(client, h, iova_start); | ||
580 | return h; | ||
581 | } | ||
582 | |||
583 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
584 | { | ||
585 | ion_free(client, r); | ||
586 | } | ||
587 | |||
588 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) | ||
589 | { | ||
590 | ion_handle_get(h); | ||
591 | return h; | ||
592 | } | ||
593 | |||
594 | void nvmap_handle_put(struct nvmap_handle *h) | ||
595 | { | ||
596 | ion_handle_put(h); | ||
597 | } | ||
598 | |||
599 | #endif | ||