diff options
Diffstat (limited to 'drivers/gpu')
154 files changed, 10327 insertions, 2131 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index cc9277885dd..ca2d3b34dbf 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile | |||
@@ -1 +1 @@ | |||
obj-y += drm/ vga/ stub/ | obj-y += drm/ vga/ stub/ ion/ | ||
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 3f46772f0cb..ba23790450e 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c | |||
@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, | |||
101 | * Searches and unlinks the entry in drm_device::magiclist with the magic | 101 | * Searches and unlinks the entry in drm_device::magiclist with the magic |
102 | * number hash key, while holding the drm_device::struct_mutex lock. | 102 | * number hash key, while holding the drm_device::struct_mutex lock. |
103 | */ | 103 | */ |
104 | static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) | 104 | int drm_remove_magic(struct drm_master *master, drm_magic_t magic) |
105 | { | 105 | { |
106 | struct drm_magic_entry *pt; | 106 | struct drm_magic_entry *pt; |
107 | struct drm_hash_item *hash; | 107 | struct drm_hash_item *hash; |
@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) | |||
136 | * If there is a magic number in drm_file::magic then use it, otherwise | 136 | * If there is a magic number in drm_file::magic then use it, otherwise |
137 | * searches an unique non-zero magic number and add it associating it with \p | 137 | * searches an unique non-zero magic number and add it associating it with \p |
138 | * file_priv. | 138 | * file_priv. |
139 | * This ioctl needs protection by the drm_global_mutex, which protects | ||
140 | * struct drm_file::magic and struct drm_magic_entry::priv. | ||
139 | */ | 141 | */ |
140 | int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) | 142 | int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) |
141 | { | 143 | { |
@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
173 | * \return zero if authentication successed, or a negative number otherwise. | 175 | * \return zero if authentication successed, or a negative number otherwise. |
174 | * | 176 | * |
175 | * Checks if \p file_priv is associated with the magic number passed in \arg. | 177 | * Checks if \p file_priv is associated with the magic number passed in \arg. |
178 | * This ioctl needs protection by the drm_global_mutex, which protects | ||
179 | * struct drm_file::magic and struct drm_magic_entry::priv. | ||
176 | */ | 180 | */ |
177 | int drm_authmagic(struct drm_device *dev, void *data, | 181 | int drm_authmagic(struct drm_device *dev, void *data, |
178 | struct drm_file *file_priv) | 182 | struct drm_file *file_priv) |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 82db1850666..2410c4078f3 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
499 | mutex_lock(&dev->mode_config.mutex); | 499 | mutex_lock(&dev->mode_config.mutex); |
500 | drm_mode_object_put(dev, &connector->base); | 500 | drm_mode_object_put(dev, &connector->base); |
501 | list_del(&connector->head); | 501 | list_del(&connector->head); |
502 | dev->mode_config.num_connector--; | ||
502 | mutex_unlock(&dev->mode_config.mutex); | 503 | mutex_unlock(&dev->mode_config.mutex); |
503 | } | 504 | } |
504 | EXPORT_SYMBOL(drm_connector_cleanup); | 505 | EXPORT_SYMBOL(drm_connector_cleanup); |
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) | |||
529 | mutex_lock(&dev->mode_config.mutex); | 530 | mutex_lock(&dev->mode_config.mutex); |
530 | drm_mode_object_put(dev, &encoder->base); | 531 | drm_mode_object_put(dev, &encoder->base); |
531 | list_del(&encoder->head); | 532 | list_del(&encoder->head); |
533 | dev->mode_config.num_encoder--; | ||
532 | mutex_unlock(&dev->mode_config.mutex); | 534 | mutex_unlock(&dev->mode_config.mutex); |
533 | } | 535 | } |
534 | EXPORT_SYMBOL(drm_encoder_cleanup); | 536 | EXPORT_SYMBOL(drm_encoder_cleanup); |
@@ -1866,6 +1868,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
1866 | } | 1868 | } |
1867 | 1869 | ||
1868 | if (num_clips && clips_ptr) { | 1870 | if (num_clips && clips_ptr) { |
1871 | if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { | ||
1872 | ret = -EINVAL; | ||
1873 | goto out_err1; | ||
1874 | } | ||
1869 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | 1875 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); |
1870 | if (!clips) { | 1876 | if (!clips) { |
1871 | ret = -ENOMEM; | 1877 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 92369655dca..f88a9b2c977 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
560 | mode_changed = true; | 560 | mode_changed = true; |
561 | } else if (set->fb == NULL) { | 561 | } else if (set->fb == NULL) { |
562 | mode_changed = true; | 562 | mode_changed = true; |
563 | } else if (set->fb->depth != set->crtc->fb->depth) { | ||
564 | mode_changed = true; | ||
565 | } else if (set->fb->bits_per_pixel != | ||
566 | set->crtc->fb->bits_per_pixel) { | ||
567 | mode_changed = true; | ||
563 | } else | 568 | } else |
564 | fb_changed = true; | 569 | fb_changed = true; |
565 | } | 570 | } |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d8c892d07c..9d2668a5087 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
90 | struct drm_device *dev = minor->dev; | 90 | struct drm_device *dev = minor->dev; |
91 | struct dentry *ent; | 91 | struct dentry *ent; |
92 | struct drm_info_node *tmp; | 92 | struct drm_info_node *tmp; |
93 | char name[64]; | ||
94 | int i, ret; | 93 | int i, ret; |
95 | 94 | ||
96 | for (i = 0; i < count; i++) { | 95 | for (i = 0; i < count; i++) { |
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
108 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, | 107 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, |
109 | root, tmp, &drm_debugfs_fops); | 108 | root, tmp, &drm_debugfs_fops); |
110 | if (!ent) { | 109 | if (!ent) { |
110 | char name[64]; | ||
111 | strncpy(name, root->d_name.name, | ||
112 | min(root->d_name.len, 64U)); | ||
111 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", | 113 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", |
112 | name, files[i].name); | 114 | name, files[i].name); |
113 | kfree(tmp); | 115 | kfree(tmp); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 09292193daf..7425e5c9bd7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -127,6 +127,23 @@ static const u8 edid_header[] = { | |||
127 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 | 127 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 |
128 | }; | 128 | }; |
129 | 129 | ||
130 | /* | ||
131 | * Sanity check the header of the base EDID block. Return 8 if the header | ||
132 | * is perfect, down to 0 if it's totally wrong. | ||
133 | */ | ||
134 | int drm_edid_header_is_valid(const u8 *raw_edid) | ||
135 | { | ||
136 | int i, score = 0; | ||
137 | |||
138 | for (i = 0; i < sizeof(edid_header); i++) | ||
139 | if (raw_edid[i] == edid_header[i]) | ||
140 | score++; | ||
141 | |||
142 | return score; | ||
143 | } | ||
144 | EXPORT_SYMBOL(drm_edid_header_is_valid); | ||
145 | |||
146 | |||
130 | /* | 147 | /* |
131 | * Sanity check the EDID block (base or extension). Return 0 if the block | 148 | * Sanity check the EDID block (base or extension). Return 0 if the block |
132 | * doesn't check out, or 1 if it's valid. | 149 | * doesn't check out, or 1 if it's valid. |
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid) | |||
139 | struct edid *edid = (struct edid *)raw_edid; | 156 | struct edid *edid = (struct edid *)raw_edid; |
140 | 157 | ||
141 | if (raw_edid[0] == 0x00) { | 158 | if (raw_edid[0] == 0x00) { |
142 | int score = 0; | 159 | int score = drm_edid_header_is_valid(raw_edid); |
143 | |||
144 | for (i = 0; i < sizeof(edid_header); i++) | ||
145 | if (raw_edid[i] == edid_header[i]) | ||
146 | score++; | ||
147 | |||
148 | if (score == 8) ; | 160 | if (score == 8) ; |
149 | else if (score >= 6) { | 161 | else if (score >= 6) { |
150 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | 162 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); |
@@ -185,8 +197,8 @@ drm_edid_block_valid(u8 *raw_edid) | |||
185 | bad: | 197 | bad: |
186 | if (raw_edid) { | 198 | if (raw_edid) { |
187 | printk(KERN_ERR "Raw EDID:\n"); | 199 | printk(KERN_ERR "Raw EDID:\n"); |
188 | print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); | 200 | print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, |
189 | printk(KERN_ERR "\n"); | 201 | raw_edid, EDID_LENGTH, false); |
190 | } | 202 | } |
191 | return 0; | 203 | return 0; |
192 | } | 204 | } |
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio); | |||
1439 | static void drm_add_display_info(struct edid *edid, | 1451 | static void drm_add_display_info(struct edid *edid, |
1440 | struct drm_display_info *info) | 1452 | struct drm_display_info *info) |
1441 | { | 1453 | { |
1454 | u8 *edid_ext; | ||
1455 | |||
1442 | info->width_mm = edid->width_cm * 10; | 1456 | info->width_mm = edid->width_cm * 10; |
1443 | info->height_mm = edid->height_cm * 10; | 1457 | info->height_mm = edid->height_cm * 10; |
1444 | 1458 | ||
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid, | |||
1483 | info->color_formats = DRM_COLOR_FORMAT_YCRCB444; | 1497 | info->color_formats = DRM_COLOR_FORMAT_YCRCB444; |
1484 | if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) | 1498 | if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) |
1485 | info->color_formats = DRM_COLOR_FORMAT_YCRCB422; | 1499 | info->color_formats = DRM_COLOR_FORMAT_YCRCB422; |
1500 | |||
1501 | /* Get data from CEA blocks if present */ | ||
1502 | edid_ext = drm_find_cea_extension(edid); | ||
1503 | if (!edid_ext) | ||
1504 | return; | ||
1505 | |||
1506 | info->cea_rev = edid_ext[1]; | ||
1486 | } | 1507 | } |
1487 | 1508 | ||
1488 | /** | 1509 | /** |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac313..f7c6854eb4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, | |||
256 | { | 256 | { |
257 | printk(KERN_ERR "panic occurred, switching back to text console\n"); | 257 | printk(KERN_ERR "panic occurred, switching back to text console\n"); |
258 | return drm_fb_helper_force_kernel_mode(); | 258 | return drm_fb_helper_force_kernel_mode(); |
259 | return 0; | ||
260 | } | 259 | } |
261 | EXPORT_SYMBOL(drm_fb_helper_panic); | 260 | EXPORT_SYMBOL(drm_fb_helper_panic); |
262 | 261 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 2ec7d48fc4a..c42e12cc2dd 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -486,6 +486,11 @@ int drm_release(struct inode *inode, struct file *filp) | |||
486 | (long)old_encode_dev(file_priv->minor->device), | 486 | (long)old_encode_dev(file_priv->minor->device), |
487 | dev->open_count); | 487 | dev->open_count); |
488 | 488 | ||
489 | /* Release any auth tokens that might point to this file_priv, | ||
490 | (do that under the drm_global_mutex) */ | ||
491 | if (file_priv->magic) | ||
492 | (void) drm_remove_magic(file_priv->master, file_priv->magic); | ||
493 | |||
489 | /* if the master has gone away we can't do anything with the lock */ | 494 | /* if the master has gone away we can't do anything with the lock */ |
490 | if (file_priv->minor->master) | 495 | if (file_priv->minor->master) |
491 | drm_master_release(dev, filp); | 496 | drm_master_release(dev, filp); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4012fe42346..186d62eb063 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -129,7 +129,7 @@ drm_gem_destroy(struct drm_device *dev) | |||
129 | } | 129 | } |
130 | 130 | ||
131 | /** | 131 | /** |
132 | * Initialize an already allocate GEM object of the specified size with | 132 | * Initialize an already allocated GEM object of the specified size with |
133 | * shmfs backing store. | 133 | * shmfs backing store. |
134 | */ | 134 | */ |
135 | int drm_gem_object_init(struct drm_device *dev, | 135 | int drm_gem_object_init(struct drm_device *dev, |
@@ -151,6 +151,27 @@ int drm_gem_object_init(struct drm_device *dev, | |||
151 | EXPORT_SYMBOL(drm_gem_object_init); | 151 | EXPORT_SYMBOL(drm_gem_object_init); |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * Initialize an already allocated GEM object of the specified size with | ||
155 | * no GEM provided backing store. Instead the caller is responsible for | ||
156 | * backing the object and handling it. | ||
157 | */ | ||
158 | int drm_gem_private_object_init(struct drm_device *dev, | ||
159 | struct drm_gem_object *obj, size_t size) | ||
160 | { | ||
161 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | ||
162 | |||
163 | obj->dev = dev; | ||
164 | obj->filp = NULL; | ||
165 | |||
166 | kref_init(&obj->refcount); | ||
167 | atomic_set(&obj->handle_count, 0); | ||
168 | obj->size = size; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | EXPORT_SYMBOL(drm_gem_private_object_init); | ||
173 | |||
174 | /** | ||
154 | * Allocate a GEM object of the specified size with shmfs backing store | 175 | * Allocate a GEM object of the specified size with shmfs backing store |
155 | */ | 176 | */ |
156 | struct drm_gem_object * | 177 | struct drm_gem_object * |
@@ -211,6 +232,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
211 | idr_remove(&filp->object_idr, handle); | 232 | idr_remove(&filp->object_idr, handle); |
212 | spin_unlock(&filp->table_lock); | 233 | spin_unlock(&filp->table_lock); |
213 | 234 | ||
235 | if (dev->driver->gem_close_object) | ||
236 | dev->driver->gem_close_object(obj, filp); | ||
214 | drm_gem_object_handle_unreference_unlocked(obj); | 237 | drm_gem_object_handle_unreference_unlocked(obj); |
215 | 238 | ||
216 | return 0; | 239 | return 0; |
@@ -227,7 +250,8 @@ drm_gem_handle_create(struct drm_file *file_priv, | |||
227 | struct drm_gem_object *obj, | 250 | struct drm_gem_object *obj, |
228 | u32 *handlep) | 251 | u32 *handlep) |
229 | { | 252 | { |
230 | int ret; | 253 | struct drm_device *dev = obj->dev; |
254 | int ret; | ||
231 | 255 | ||
232 | /* | 256 | /* |
233 | * Get the user-visible handle using idr. | 257 | * Get the user-visible handle using idr. |
@@ -248,6 +272,15 @@ again: | |||
248 | return ret; | 272 | return ret; |
249 | 273 | ||
250 | drm_gem_object_handle_reference(obj); | 274 | drm_gem_object_handle_reference(obj); |
275 | |||
276 | if (dev->driver->gem_open_object) { | ||
277 | ret = dev->driver->gem_open_object(obj, file_priv); | ||
278 | if (ret) { | ||
279 | drm_gem_handle_delete(file_priv, *handlep); | ||
280 | return ret; | ||
281 | } | ||
282 | } | ||
283 | |||
251 | return 0; | 284 | return 0; |
252 | } | 285 | } |
253 | EXPORT_SYMBOL(drm_gem_handle_create); | 286 | EXPORT_SYMBOL(drm_gem_handle_create); |
@@ -402,7 +435,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | |||
402 | static int | 435 | static int |
403 | drm_gem_object_release_handle(int id, void *ptr, void *data) | 436 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
404 | { | 437 | { |
438 | struct drm_file *file_priv = data; | ||
405 | struct drm_gem_object *obj = ptr; | 439 | struct drm_gem_object *obj = ptr; |
440 | struct drm_device *dev = obj->dev; | ||
441 | |||
442 | if (dev->driver->gem_close_object) | ||
443 | dev->driver->gem_close_object(obj, file_priv); | ||
406 | 444 | ||
407 | drm_gem_object_handle_unreference_unlocked(obj); | 445 | drm_gem_object_handle_unreference_unlocked(obj); |
408 | 446 | ||
@@ -418,7 +456,7 @@ void | |||
418 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | 456 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
419 | { | 457 | { |
420 | idr_for_each(&file_private->object_idr, | 458 | idr_for_each(&file_private->object_idr, |
421 | &drm_gem_object_release_handle, NULL); | 459 | &drm_gem_object_release_handle, file_private); |
422 | 460 | ||
423 | idr_remove_all(&file_private->object_idr); | 461 | idr_remove_all(&file_private->object_idr); |
424 | idr_destroy(&file_private->object_idr); | 462 | idr_destroy(&file_private->object_idr); |
@@ -427,7 +465,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |||
427 | void | 465 | void |
428 | drm_gem_object_release(struct drm_gem_object *obj) | 466 | drm_gem_object_release(struct drm_gem_object *obj) |
429 | { | 467 | { |
430 | fput(obj->filp); | 468 | if (obj->filp) |
469 | fput(obj->filp); | ||
431 | } | 470 | } |
432 | EXPORT_SYMBOL(drm_gem_object_release); | 471 | EXPORT_SYMBOL(drm_gem_object_release); |
433 | 472 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2022a5c966b..3830e9e478c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state) | |||
291 | if (!dev->irq_enabled) | 291 | if (!dev->irq_enabled) |
292 | return; | 292 | return; |
293 | 293 | ||
294 | if (state) | 294 | if (state) { |
295 | dev->driver->irq_uninstall(dev); | 295 | if (dev->driver->irq_uninstall) |
296 | else { | 296 | dev->driver->irq_uninstall(dev); |
297 | dev->driver->irq_preinstall(dev); | 297 | } else { |
298 | dev->driver->irq_postinstall(dev); | 298 | if (dev->driver->irq_preinstall) |
299 | dev->driver->irq_preinstall(dev); | ||
300 | if (dev->driver->irq_postinstall) | ||
301 | dev->driver->irq_postinstall(dev); | ||
299 | } | 302 | } |
300 | } | 303 | } |
301 | 304 | ||
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev) | |||
338 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); | 341 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); |
339 | 342 | ||
340 | /* Before installing handler */ | 343 | /* Before installing handler */ |
341 | dev->driver->irq_preinstall(dev); | 344 | if (dev->driver->irq_preinstall) |
345 | dev->driver->irq_preinstall(dev); | ||
342 | 346 | ||
343 | /* Install handler */ | 347 | /* Install handler */ |
344 | if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) | 348 | if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) |
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev) | |||
363 | vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); | 367 | vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); |
364 | 368 | ||
365 | /* After installing handler */ | 369 | /* After installing handler */ |
366 | ret = dev->driver->irq_postinstall(dev); | 370 | if (dev->driver->irq_postinstall) |
371 | ret = dev->driver->irq_postinstall(dev); | ||
372 | |||
367 | if (ret < 0) { | 373 | if (ret < 0) { |
368 | mutex_lock(&dev->struct_mutex); | 374 | mutex_lock(&dev->struct_mutex); |
369 | dev->irq_enabled = 0; | 375 | dev->irq_enabled = 0; |
370 | mutex_unlock(&dev->struct_mutex); | 376 | mutex_unlock(&dev->struct_mutex); |
377 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
378 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
379 | free_irq(drm_dev_to_irq(dev), dev); | ||
371 | } | 380 | } |
372 | 381 | ||
373 | return ret; | 382 | return ret; |
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
413 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 422 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
414 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 423 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
415 | 424 | ||
416 | dev->driver->irq_uninstall(dev); | 425 | if (dev->driver->irq_uninstall) |
426 | dev->driver->irq_uninstall(dev); | ||
417 | 427 | ||
418 | free_irq(drm_dev_to_irq(dev), dev); | 428 | free_irq(drm_dev_to_irq(dev), dev); |
419 | 429 | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index c2d32f20e2f..ad74fb4dc54 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -994,9 +994,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, | |||
994 | { | 994 | { |
995 | const char *name; | 995 | const char *name; |
996 | unsigned int namelen; | 996 | unsigned int namelen; |
997 | int res_specified = 0, bpp_specified = 0, refresh_specified = 0; | 997 | bool res_specified = false, bpp_specified = false, refresh_specified = false; |
998 | unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; | 998 | unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; |
999 | int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; | 999 | bool yres_specified = false, cvt = false, rb = false; |
1000 | bool interlace = false, margins = false, was_digit = false; | ||
1000 | int i; | 1001 | int i; |
1001 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; | 1002 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; |
1002 | 1003 | ||
@@ -1015,54 +1016,65 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, | |||
1015 | for (i = namelen-1; i >= 0; i--) { | 1016 | for (i = namelen-1; i >= 0; i--) { |
1016 | switch (name[i]) { | 1017 | switch (name[i]) { |
1017 | case '@': | 1018 | case '@': |
1018 | namelen = i; | ||
1019 | if (!refresh_specified && !bpp_specified && | 1019 | if (!refresh_specified && !bpp_specified && |
1020 | !yres_specified) { | 1020 | !yres_specified && !cvt && !rb && was_digit) { |
1021 | refresh = simple_strtol(&name[i+1], NULL, 10); | 1021 | refresh = simple_strtol(&name[i+1], NULL, 10); |
1022 | refresh_specified = 1; | 1022 | refresh_specified = true; |
1023 | if (cvt || rb) | 1023 | was_digit = false; |
1024 | cvt = 0; | ||
1025 | } else | 1024 | } else |
1026 | goto done; | 1025 | goto done; |
1027 | break; | 1026 | break; |
1028 | case '-': | 1027 | case '-': |
1029 | namelen = i; | 1028 | if (!bpp_specified && !yres_specified && !cvt && |
1030 | if (!bpp_specified && !yres_specified) { | 1029 | !rb && was_digit) { |
1031 | bpp = simple_strtol(&name[i+1], NULL, 10); | 1030 | bpp = simple_strtol(&name[i+1], NULL, 10); |
1032 | bpp_specified = 1; | 1031 | bpp_specified = true; |
1033 | if (cvt || rb) | 1032 | was_digit = false; |
1034 | cvt = 0; | ||
1035 | } else | 1033 | } else |
1036 | goto done; | 1034 | goto done; |
1037 | break; | 1035 | break; |
1038 | case 'x': | 1036 | case 'x': |
1039 | if (!yres_specified) { | 1037 | if (!yres_specified && was_digit) { |
1040 | yres = simple_strtol(&name[i+1], NULL, 10); | 1038 | yres = simple_strtol(&name[i+1], NULL, 10); |
1041 | yres_specified = 1; | 1039 | yres_specified = true; |
1040 | was_digit = false; | ||
1042 | } else | 1041 | } else |
1043 | goto done; | 1042 | goto done; |
1044 | case '0' ... '9': | 1043 | case '0' ... '9': |
1044 | was_digit = true; | ||
1045 | break; | 1045 | break; |
1046 | case 'M': | 1046 | case 'M': |
1047 | if (!yres_specified) | 1047 | if (yres_specified || cvt || was_digit) |
1048 | cvt = 1; | 1048 | goto done; |
1049 | cvt = true; | ||
1049 | break; | 1050 | break; |
1050 | case 'R': | 1051 | case 'R': |
1051 | if (cvt) | 1052 | if (yres_specified || cvt || rb || was_digit) |
1052 | rb = 1; | 1053 | goto done; |
1054 | rb = true; | ||
1053 | break; | 1055 | break; |
1054 | case 'm': | 1056 | case 'm': |
1055 | if (!cvt) | 1057 | if (cvt || yres_specified || was_digit) |
1056 | margins = 1; | 1058 | goto done; |
1059 | margins = true; | ||
1057 | break; | 1060 | break; |
1058 | case 'i': | 1061 | case 'i': |
1059 | if (!cvt) | 1062 | if (cvt || yres_specified || was_digit) |
1060 | interlace = 1; | 1063 | goto done; |
1064 | interlace = true; | ||
1061 | break; | 1065 | break; |
1062 | case 'e': | 1066 | case 'e': |
1067 | if (yres_specified || bpp_specified || refresh_specified || | ||
1068 | was_digit || (force != DRM_FORCE_UNSPECIFIED)) | ||
1069 | goto done; | ||
1070 | |||
1063 | force = DRM_FORCE_ON; | 1071 | force = DRM_FORCE_ON; |
1064 | break; | 1072 | break; |
1065 | case 'D': | 1073 | case 'D': |
1074 | if (yres_specified || bpp_specified || refresh_specified || | ||
1075 | was_digit || (force != DRM_FORCE_UNSPECIFIED)) | ||
1076 | goto done; | ||
1077 | |||
1066 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && | 1078 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && |
1067 | (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) | 1079 | (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) |
1068 | force = DRM_FORCE_ON; | 1080 | force = DRM_FORCE_ON; |
@@ -1070,17 +1082,37 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, | |||
1070 | force = DRM_FORCE_ON_DIGITAL; | 1082 | force = DRM_FORCE_ON_DIGITAL; |
1071 | break; | 1083 | break; |
1072 | case 'd': | 1084 | case 'd': |
1085 | if (yres_specified || bpp_specified || refresh_specified || | ||
1086 | was_digit || (force != DRM_FORCE_UNSPECIFIED)) | ||
1087 | goto done; | ||
1088 | |||
1073 | force = DRM_FORCE_OFF; | 1089 | force = DRM_FORCE_OFF; |
1074 | break; | 1090 | break; |
1075 | default: | 1091 | default: |
1076 | goto done; | 1092 | goto done; |
1077 | } | 1093 | } |
1078 | } | 1094 | } |
1095 | |||
1079 | if (i < 0 && yres_specified) { | 1096 | if (i < 0 && yres_specified) { |
1080 | xres = simple_strtol(name, NULL, 10); | 1097 | char *ch; |
1081 | res_specified = 1; | 1098 | xres = simple_strtol(name, &ch, 10); |
1099 | if ((ch != NULL) && (*ch == 'x')) | ||
1100 | res_specified = true; | ||
1101 | else | ||
1102 | i = ch - name; | ||
1103 | } else if (!yres_specified && was_digit) { | ||
1104 | /* catch mode that begins with digits but has no 'x' */ | ||
1105 | i = 0; | ||
1082 | } | 1106 | } |
1083 | done: | 1107 | done: |
1108 | if (i >= 0) { | ||
1109 | printk(KERN_WARNING | ||
1110 | "parse error at position %i in video mode '%s'\n", | ||
1111 | i, name); | ||
1112 | mode->specified = false; | ||
1113 | return false; | ||
1114 | } | ||
1115 | |||
1084 | if (res_specified) { | 1116 | if (res_specified) { |
1085 | mode->specified = true; | 1117 | mode->specified = true; |
1086 | mode->xres = xres; | 1118 | mode->xres = xres; |
@@ -1096,9 +1128,10 @@ done: | |||
1096 | mode->bpp_specified = true; | 1128 | mode->bpp_specified = true; |
1097 | mode->bpp = bpp; | 1129 | mode->bpp = bpp; |
1098 | } | 1130 | } |
1099 | mode->rb = rb ? true : false; | 1131 | mode->rb = rb; |
1100 | mode->cvt = cvt ? true : false; | 1132 | mode->cvt = cvt; |
1101 | mode->interlace = interlace ? true : false; | 1133 | mode->interlace = interlace; |
1134 | mode->margins = margins; | ||
1102 | mode->force = force; | 1135 | mode->force = force; |
1103 | 1136 | ||
1104 | return true; | 1137 | return true; |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 7223f06d8e5..2a8b6265ad3 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -123,14 +123,15 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas | |||
123 | { | 123 | { |
124 | int len, ret; | 124 | int len, ret; |
125 | 125 | ||
126 | master->unique_len = 10 + strlen(dev->platformdev->name); | 126 | master->unique_len = 13 + strlen(dev->platformdev->name); |
127 | master->unique_size = master->unique_len; | ||
127 | master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); | 128 | master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); |
128 | 129 | ||
129 | if (master->unique == NULL) | 130 | if (master->unique == NULL) |
130 | return -ENOMEM; | 131 | return -ENOMEM; |
131 | 132 | ||
132 | len = snprintf(master->unique, master->unique_len, | 133 | len = snprintf(master->unique, master->unique_len, |
133 | "platform:%s", dev->platformdev->name); | 134 | "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id); |
134 | 135 | ||
135 | if (len > master->unique_len) { | 136 | if (len > master->unique_len) { |
136 | DRM_ERROR("Unique buffer overflowed\n"); | 137 | DRM_ERROR("Unique buffer overflowed\n"); |
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index d15e09b0ae0..7525e0311e5 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c | |||
@@ -83,30 +83,26 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) | |||
83 | if (dev->sg) | 83 | if (dev->sg) |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | 85 | ||
86 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 86 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
87 | if (!entry) | 87 | if (!entry) |
88 | return -ENOMEM; | 88 | return -ENOMEM; |
89 | 89 | ||
90 | memset(entry, 0, sizeof(*entry)); | ||
91 | pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; | 90 | pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; |
92 | DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); | 91 | DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); |
93 | 92 | ||
94 | entry->pages = pages; | 93 | entry->pages = pages; |
95 | entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL); | 94 | entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); |
96 | if (!entry->pagelist) { | 95 | if (!entry->pagelist) { |
97 | kfree(entry); | 96 | kfree(entry); |
98 | return -ENOMEM; | 97 | return -ENOMEM; |
99 | } | 98 | } |
100 | 99 | ||
101 | memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); | 100 | entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); |
102 | |||
103 | entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL); | ||
104 | if (!entry->busaddr) { | 101 | if (!entry->busaddr) { |
105 | kfree(entry->pagelist); | 102 | kfree(entry->pagelist); |
106 | kfree(entry); | 103 | kfree(entry); |
107 | return -ENOMEM; | 104 | return -ENOMEM; |
108 | } | 105 | } |
109 | memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); | ||
110 | 106 | ||
111 | entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); | 107 | entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); |
112 | if (!entry->virtual) { | 108 | if (!entry->virtual) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0a893f7400f..3c395a59da3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
499 | seq_printf(m, "Interrupts received: %d\n", | 499 | seq_printf(m, "Interrupts received: %d\n", |
500 | atomic_read(&dev_priv->irq_received)); | 500 | atomic_read(&dev_priv->irq_received)); |
501 | for (i = 0; i < I915_NUM_RINGS; i++) { | 501 | for (i = 0; i < I915_NUM_RINGS; i++) { |
502 | if (IS_GEN6(dev)) { | 502 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", | 503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", |
504 | dev_priv->ring[i].name, | 504 | dev_priv->ring[i].name, |
505 | I915_READ_IMR(&dev_priv->ring[i])); | 505 | I915_READ_IMR(&dev_priv->ring[i])); |
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
865 | MEMSTAT_VID_SHIFT); | 865 | MEMSTAT_VID_SHIFT); |
866 | seq_printf(m, "Current P-state: %d\n", | 866 | seq_printf(m, "Current P-state: %d\n", |
867 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 867 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); |
868 | } else if (IS_GEN6(dev)) { | 868 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { |
869 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 869 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
870 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 870 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
871 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 871 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused) | |||
1123 | return 0; | 1123 | return 0; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int i915_ring_freq_table(struct seq_file *m, void *unused) | ||
1127 | { | ||
1128 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1129 | struct drm_device *dev = node->minor->dev; | ||
1130 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1131 | int ret; | ||
1132 | int gpu_freq, ia_freq; | ||
1133 | |||
1134 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) { | ||
1135 | seq_printf(m, "unsupported on this chipset\n"); | ||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1140 | if (ret) | ||
1141 | return ret; | ||
1142 | |||
1143 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); | ||
1144 | |||
1145 | for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; | ||
1146 | gpu_freq++) { | ||
1147 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); | ||
1148 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
1149 | GEN6_PCODE_READ_MIN_FREQ_TABLE); | ||
1150 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
1151 | GEN6_PCODE_READY) == 0, 10)) { | ||
1152 | DRM_ERROR("pcode read of freq table timed out\n"); | ||
1153 | continue; | ||
1154 | } | ||
1155 | ia_freq = I915_READ(GEN6_PCODE_DATA); | ||
1156 | seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); | ||
1157 | } | ||
1158 | |||
1159 | mutex_unlock(&dev->struct_mutex); | ||
1160 | |||
1161 | return 0; | ||
1162 | } | ||
1163 | |||
1126 | static int i915_gfxec(struct seq_file *m, void *unused) | 1164 | static int i915_gfxec(struct seq_file *m, void *unused) |
1127 | { | 1165 | { |
1128 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1166 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -1300,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = { | |||
1300 | .llseek = default_llseek, | 1338 | .llseek = default_llseek, |
1301 | }; | 1339 | }; |
1302 | 1340 | ||
1341 | static int | ||
1342 | i915_max_freq_open(struct inode *inode, | ||
1343 | struct file *filp) | ||
1344 | { | ||
1345 | filp->private_data = inode->i_private; | ||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static ssize_t | ||
1350 | i915_max_freq_read(struct file *filp, | ||
1351 | char __user *ubuf, | ||
1352 | size_t max, | ||
1353 | loff_t *ppos) | ||
1354 | { | ||
1355 | struct drm_device *dev = filp->private_data; | ||
1356 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1357 | char buf[80]; | ||
1358 | int len; | ||
1359 | |||
1360 | len = snprintf(buf, sizeof (buf), | ||
1361 | "max freq: %d\n", dev_priv->max_delay * 50); | ||
1362 | |||
1363 | if (len > sizeof (buf)) | ||
1364 | len = sizeof (buf); | ||
1365 | |||
1366 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
1367 | } | ||
1368 | |||
1369 | static ssize_t | ||
1370 | i915_max_freq_write(struct file *filp, | ||
1371 | const char __user *ubuf, | ||
1372 | size_t cnt, | ||
1373 | loff_t *ppos) | ||
1374 | { | ||
1375 | struct drm_device *dev = filp->private_data; | ||
1376 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1377 | char buf[20]; | ||
1378 | int val = 1; | ||
1379 | |||
1380 | if (cnt > 0) { | ||
1381 | if (cnt > sizeof (buf) - 1) | ||
1382 | return -EINVAL; | ||
1383 | |||
1384 | if (copy_from_user(buf, ubuf, cnt)) | ||
1385 | return -EFAULT; | ||
1386 | buf[cnt] = 0; | ||
1387 | |||
1388 | val = simple_strtoul(buf, NULL, 0); | ||
1389 | } | ||
1390 | |||
1391 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); | ||
1392 | |||
1393 | /* | ||
1394 | * Turbo will still be enabled, but won't go above the set value. | ||
1395 | */ | ||
1396 | dev_priv->max_delay = val / 50; | ||
1397 | |||
1398 | gen6_set_rps(dev, val / 50); | ||
1399 | |||
1400 | return cnt; | ||
1401 | } | ||
1402 | |||
1403 | static const struct file_operations i915_max_freq_fops = { | ||
1404 | .owner = THIS_MODULE, | ||
1405 | .open = i915_max_freq_open, | ||
1406 | .read = i915_max_freq_read, | ||
1407 | .write = i915_max_freq_write, | ||
1408 | .llseek = default_llseek, | ||
1409 | }; | ||
1410 | |||
1411 | static int | ||
1412 | i915_cache_sharing_open(struct inode *inode, | ||
1413 | struct file *filp) | ||
1414 | { | ||
1415 | filp->private_data = inode->i_private; | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | static ssize_t | ||
1420 | i915_cache_sharing_read(struct file *filp, | ||
1421 | char __user *ubuf, | ||
1422 | size_t max, | ||
1423 | loff_t *ppos) | ||
1424 | { | ||
1425 | struct drm_device *dev = filp->private_data; | ||
1426 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1427 | char buf[80]; | ||
1428 | u32 snpcr; | ||
1429 | int len; | ||
1430 | |||
1431 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
1432 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | ||
1433 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
1434 | |||
1435 | len = snprintf(buf, sizeof (buf), | ||
1436 | "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> | ||
1437 | GEN6_MBC_SNPCR_SHIFT); | ||
1438 | |||
1439 | if (len > sizeof (buf)) | ||
1440 | len = sizeof (buf); | ||
1441 | |||
1442 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
1443 | } | ||
1444 | |||
1445 | static ssize_t | ||
1446 | i915_cache_sharing_write(struct file *filp, | ||
1447 | const char __user *ubuf, | ||
1448 | size_t cnt, | ||
1449 | loff_t *ppos) | ||
1450 | { | ||
1451 | struct drm_device *dev = filp->private_data; | ||
1452 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1453 | char buf[20]; | ||
1454 | u32 snpcr; | ||
1455 | int val = 1; | ||
1456 | |||
1457 | if (cnt > 0) { | ||
1458 | if (cnt > sizeof (buf) - 1) | ||
1459 | return -EINVAL; | ||
1460 | |||
1461 | if (copy_from_user(buf, ubuf, cnt)) | ||
1462 | return -EFAULT; | ||
1463 | buf[cnt] = 0; | ||
1464 | |||
1465 | val = simple_strtoul(buf, NULL, 0); | ||
1466 | } | ||
1467 | |||
1468 | if (val < 0 || val > 3) | ||
1469 | return -EINVAL; | ||
1470 | |||
1471 | DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); | ||
1472 | |||
1473 | /* Update the cache sharing policy here as well */ | ||
1474 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | ||
1475 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | ||
1476 | snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); | ||
1477 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | ||
1478 | |||
1479 | return cnt; | ||
1480 | } | ||
1481 | |||
1482 | static const struct file_operations i915_cache_sharing_fops = { | ||
1483 | .owner = THIS_MODULE, | ||
1484 | .open = i915_cache_sharing_open, | ||
1485 | .read = i915_cache_sharing_read, | ||
1486 | .write = i915_cache_sharing_write, | ||
1487 | .llseek = default_llseek, | ||
1488 | }; | ||
1489 | |||
1303 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | 1490 | /* As the drm_debugfs_init() routines are called before dev->dev_private is |
1304 | * allocated we need to hook into the minor for release. */ | 1491 | * allocated we need to hook into the minor for release. */ |
1305 | static int | 1492 | static int |
@@ -1399,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | |||
1399 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | 1586 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); |
1400 | } | 1587 | } |
1401 | 1588 | ||
1589 | static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) | ||
1590 | { | ||
1591 | struct drm_device *dev = minor->dev; | ||
1592 | struct dentry *ent; | ||
1593 | |||
1594 | ent = debugfs_create_file("i915_max_freq", | ||
1595 | S_IRUGO | S_IWUSR, | ||
1596 | root, dev, | ||
1597 | &i915_max_freq_fops); | ||
1598 | if (IS_ERR(ent)) | ||
1599 | return PTR_ERR(ent); | ||
1600 | |||
1601 | return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); | ||
1602 | } | ||
1603 | |||
1604 | static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) | ||
1605 | { | ||
1606 | struct drm_device *dev = minor->dev; | ||
1607 | struct dentry *ent; | ||
1608 | |||
1609 | ent = debugfs_create_file("i915_cache_sharing", | ||
1610 | S_IRUGO | S_IWUSR, | ||
1611 | root, dev, | ||
1612 | &i915_cache_sharing_fops); | ||
1613 | if (IS_ERR(ent)) | ||
1614 | return PTR_ERR(ent); | ||
1615 | |||
1616 | return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); | ||
1617 | } | ||
1618 | |||
1402 | static struct drm_info_list i915_debugfs_list[] = { | 1619 | static struct drm_info_list i915_debugfs_list[] = { |
1403 | {"i915_capabilities", i915_capabilities, 0}, | 1620 | {"i915_capabilities", i915_capabilities, 0}, |
1404 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1621 | {"i915_gem_objects", i915_gem_object_info, 0}, |
@@ -1430,6 +1647,7 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1430 | {"i915_inttoext_table", i915_inttoext_table, 0}, | 1647 | {"i915_inttoext_table", i915_inttoext_table, 0}, |
1431 | {"i915_drpc_info", i915_drpc_info, 0}, | 1648 | {"i915_drpc_info", i915_drpc_info, 0}, |
1432 | {"i915_emon_status", i915_emon_status, 0}, | 1649 | {"i915_emon_status", i915_emon_status, 0}, |
1650 | {"i915_ring_freq_table", i915_ring_freq_table, 0}, | ||
1433 | {"i915_gfxec", i915_gfxec, 0}, | 1651 | {"i915_gfxec", i915_gfxec, 0}, |
1434 | {"i915_fbc_status", i915_fbc_status, 0}, | 1652 | {"i915_fbc_status", i915_fbc_status, 0}, |
1435 | {"i915_sr_status", i915_sr_status, 0}, | 1653 | {"i915_sr_status", i915_sr_status, 0}, |
@@ -1451,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
1451 | ret = i915_forcewake_create(minor->debugfs_root, minor); | 1669 | ret = i915_forcewake_create(minor->debugfs_root, minor); |
1452 | if (ret) | 1670 | if (ret) |
1453 | return ret; | 1671 | return ret; |
1672 | ret = i915_max_freq_create(minor->debugfs_root, minor); | ||
1673 | if (ret) | ||
1674 | return ret; | ||
1675 | ret = i915_cache_sharing_create(minor->debugfs_root, minor); | ||
1676 | if (ret) | ||
1677 | return ret; | ||
1454 | 1678 | ||
1455 | return drm_debugfs_create_files(i915_debugfs_list, | 1679 | return drm_debugfs_create_files(i915_debugfs_list, |
1456 | I915_DEBUGFS_ENTRIES, | 1680 | I915_DEBUGFS_ENTRIES, |
@@ -1465,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
1465 | 1, minor); | 1689 | 1, minor); |
1466 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, | 1690 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, |
1467 | 1, minor); | 1691 | 1, minor); |
1692 | drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, | ||
1693 | 1, minor); | ||
1694 | drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, | ||
1695 | 1, minor); | ||
1468 | } | 1696 | } |
1469 | 1697 | ||
1470 | #endif /* CONFIG_DEBUG_FS */ | 1698 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 296fbd66f0e..c72b590f7d8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev) | |||
61 | static int i915_init_phys_hws(struct drm_device *dev) | 61 | static int i915_init_phys_hws(struct drm_device *dev) |
62 | { | 62 | { |
63 | drm_i915_private_t *dev_priv = dev->dev_private; | 63 | drm_i915_private_t *dev_priv = dev->dev_private; |
64 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
65 | 64 | ||
66 | /* Program Hardware Status Page */ | 65 | /* Program Hardware Status Page */ |
67 | dev_priv->status_page_dmah = | 66 | dev_priv->status_page_dmah = |
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
71 | DRM_ERROR("Can not allocate hardware status page\n"); | 70 | DRM_ERROR("Can not allocate hardware status page\n"); |
72 | return -ENOMEM; | 71 | return -ENOMEM; |
73 | } | 72 | } |
74 | ring->status_page.page_addr = | ||
75 | (void __force __iomem *)dev_priv->status_page_dmah->vaddr; | ||
76 | 73 | ||
77 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); | 74 | memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, |
75 | 0, PAGE_SIZE); | ||
78 | 76 | ||
79 | i915_write_hws_pga(dev); | 77 | i915_write_hws_pga(dev); |
80 | 78 | ||
@@ -1073,6 +1071,9 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1073 | unsigned long cfb_base; | 1071 | unsigned long cfb_base; |
1074 | unsigned long ll_base = 0; | 1072 | unsigned long ll_base = 0; |
1075 | 1073 | ||
1074 | /* Just in case the BIOS is doing something questionable. */ | ||
1075 | intel_disable_fbc(dev); | ||
1076 | |||
1076 | compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); | 1077 | compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
1077 | if (compressed_fb) | 1078 | if (compressed_fb) |
1078 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | 1079 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
@@ -1099,7 +1100,6 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1099 | 1100 | ||
1100 | dev_priv->cfb_size = size; | 1101 | dev_priv->cfb_size = size; |
1101 | 1102 | ||
1102 | intel_disable_fbc(dev); | ||
1103 | dev_priv->compressed_fb = compressed_fb; | 1103 | dev_priv->compressed_fb = compressed_fb; |
1104 | if (HAS_PCH_SPLIT(dev)) | 1104 | if (HAS_PCH_SPLIT(dev)) |
1105 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); | 1105 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
@@ -1453,6 +1453,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
1453 | 1453 | ||
1454 | diff1 = now - dev_priv->last_time1; | 1454 | diff1 = now - dev_priv->last_time1; |
1455 | 1455 | ||
1456 | /* Prevent division-by-zero if we are asking too fast. | ||
1457 | * Also, we don't get interesting results if we are polling | ||
1458 | * faster than once in 10ms, so just return the saved value | ||
1459 | * in such cases. | ||
1460 | */ | ||
1461 | if (diff1 <= 10) | ||
1462 | return dev_priv->chipset_power; | ||
1463 | |||
1456 | count1 = I915_READ(DMIEC); | 1464 | count1 = I915_READ(DMIEC); |
1457 | count2 = I915_READ(DDREC); | 1465 | count2 = I915_READ(DDREC); |
1458 | count3 = I915_READ(CSIEC); | 1466 | count3 = I915_READ(CSIEC); |
@@ -1483,6 +1491,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
1483 | dev_priv->last_count1 = total_count; | 1491 | dev_priv->last_count1 = total_count; |
1484 | dev_priv->last_time1 = now; | 1492 | dev_priv->last_time1 = now; |
1485 | 1493 | ||
1494 | dev_priv->chipset_power = ret; | ||
1495 | |||
1486 | return ret; | 1496 | return ret; |
1487 | } | 1497 | } |
1488 | 1498 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index eb91e2dd791..f07e4252b70 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -37,38 +37,70 @@ | |||
37 | #include <linux/console.h> | 37 | #include <linux/console.h> |
38 | #include "drm_crtc_helper.h" | 38 | #include "drm_crtc_helper.h" |
39 | 39 | ||
40 | static int i915_modeset = -1; | 40 | static int i915_modeset __read_mostly = -1; |
41 | module_param_named(modeset, i915_modeset, int, 0400); | 41 | module_param_named(modeset, i915_modeset, int, 0400); |
42 | MODULE_PARM_DESC(modeset, | ||
43 | "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " | ||
44 | "1=on, -1=force vga console preference [default])"); | ||
42 | 45 | ||
43 | unsigned int i915_fbpercrtc = 0; | 46 | unsigned int i915_fbpercrtc __always_unused = 0; |
44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 47 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
45 | 48 | ||
46 | int i915_panel_ignore_lid = 0; | 49 | int i915_panel_ignore_lid __read_mostly = 0; |
47 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | 50 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); |
51 | MODULE_PARM_DESC(panel_ignore_lid, | ||
52 | "Override lid status (0=autodetect [default], 1=lid open, " | ||
53 | "-1=lid closed)"); | ||
48 | 54 | ||
49 | unsigned int i915_powersave = 1; | 55 | unsigned int i915_powersave __read_mostly = 1; |
50 | module_param_named(powersave, i915_powersave, int, 0600); | 56 | module_param_named(powersave, i915_powersave, int, 0600); |
57 | MODULE_PARM_DESC(powersave, | ||
58 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); | ||
51 | 59 | ||
52 | unsigned int i915_semaphores = 0; | 60 | unsigned int i915_semaphores __read_mostly = 0; |
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | 61 | module_param_named(semaphores, i915_semaphores, int, 0600); |
62 | MODULE_PARM_DESC(semaphores, | ||
63 | "Use semaphores for inter-ring sync (default: false)"); | ||
54 | 64 | ||
55 | unsigned int i915_enable_rc6 = 0; | 65 | unsigned int i915_enable_rc6 __read_mostly = 0; |
56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 66 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
67 | MODULE_PARM_DESC(i915_enable_rc6, | ||
68 | "Enable power-saving render C-state 6 (default: true)"); | ||
57 | 69 | ||
58 | unsigned int i915_enable_fbc = 0; | 70 | unsigned int i915_enable_fbc __read_mostly = -1; |
59 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
72 | MODULE_PARM_DESC(i915_enable_fbc, | ||
73 | "Enable frame buffer compression for power savings " | ||
74 | "(default: -1 (use per-chip default))"); | ||
60 | 75 | ||
61 | unsigned int i915_lvds_downclock = 0; | 76 | unsigned int i915_lvds_downclock __read_mostly = 0; |
62 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
78 | MODULE_PARM_DESC(lvds_downclock, | ||
79 | "Use panel (LVDS/eDP) downclocking for power savings " | ||
80 | "(default: false)"); | ||
63 | 81 | ||
64 | unsigned int i915_panel_use_ssc = 1; | 82 | unsigned int i915_panel_use_ssc __read_mostly = 1; |
65 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 83 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
84 | MODULE_PARM_DESC(lvds_use_ssc, | ||
85 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " | ||
86 | "(default: true)"); | ||
66 | 87 | ||
67 | int i915_vbt_sdvo_panel_type = -1; | 88 | int i915_vbt_sdvo_panel_type __read_mostly = -1; |
68 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | 89 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
90 | MODULE_PARM_DESC(vbt_sdvo_panel_type, | ||
91 | "Override selection of SDVO panel mode in the VBT " | ||
92 | "(default: auto)"); | ||
69 | 93 | ||
70 | static bool i915_try_reset = true; | 94 | static bool i915_try_reset __read_mostly = true; |
71 | module_param_named(reset, i915_try_reset, bool, 0600); | 95 | module_param_named(reset, i915_try_reset, bool, 0600); |
96 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); | ||
97 | |||
98 | bool i915_enable_hangcheck __read_mostly = true; | ||
99 | module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); | ||
100 | MODULE_PARM_DESC(enable_hangcheck, | ||
101 | "Periodically check GPU activity for detecting hangs. " | ||
102 | "WARNING: Disabling this can cause system wide hangs. " | ||
103 | "(default: true)"); | ||
72 | 104 | ||
73 | static struct drm_driver driver; | 105 | static struct drm_driver driver; |
74 | extern int intel_agp_enabled; | 106 | extern int intel_agp_enabled; |
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
345 | 377 | ||
346 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | 378 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
347 | { | 379 | { |
348 | int loop = 500; | 380 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) { |
349 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 381 | int loop = 500; |
350 | while (fifo < 20 && loop--) { | 382 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
351 | udelay(10); | 383 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
352 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 384 | udelay(10); |
385 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
386 | } | ||
387 | WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES); | ||
388 | dev_priv->gt_fifo_count = fifo; | ||
353 | } | 389 | } |
390 | dev_priv->gt_fifo_count--; | ||
354 | } | 391 | } |
355 | 392 | ||
356 | static int i915_drm_freeze(struct drm_device *dev) | 393 | static int i915_drm_freeze(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ce7914c4c04..1a2a2d1790b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
37 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
38 | #include <drm/intel-gtt.h> | 38 | #include <drm/intel-gtt.h> |
39 | #include <linux/backlight.h> | ||
39 | 40 | ||
40 | /* General customization: | 41 | /* General customization: |
41 | */ | 42 | */ |
@@ -214,6 +215,8 @@ struct drm_i915_display_funcs { | |||
214 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, | 215 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
215 | struct drm_framebuffer *fb, | 216 | struct drm_framebuffer *fb, |
216 | struct drm_i915_gem_object *obj); | 217 | struct drm_i915_gem_object *obj); |
218 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
219 | int x, int y); | ||
217 | /* clock updates for mode set */ | 220 | /* clock updates for mode set */ |
218 | /* cursor updates */ | 221 | /* cursor updates */ |
219 | /* render clock increase/decrease */ | 222 | /* render clock increase/decrease */ |
@@ -265,6 +268,7 @@ enum intel_pch { | |||
265 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 268 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
266 | 269 | ||
267 | struct intel_fbdev; | 270 | struct intel_fbdev; |
271 | struct intel_fbc_work; | ||
268 | 272 | ||
269 | typedef struct drm_i915_private { | 273 | typedef struct drm_i915_private { |
270 | struct drm_device *dev; | 274 | struct drm_device *dev; |
@@ -275,6 +279,7 @@ typedef struct drm_i915_private { | |||
275 | int relative_constants_mode; | 279 | int relative_constants_mode; |
276 | 280 | ||
277 | void __iomem *regs; | 281 | void __iomem *regs; |
282 | u32 gt_fifo_count; | ||
278 | 283 | ||
279 | struct intel_gmbus { | 284 | struct intel_gmbus { |
280 | struct i2c_adapter adapter; | 285 | struct i2c_adapter adapter; |
@@ -329,11 +334,10 @@ typedef struct drm_i915_private { | |||
329 | uint32_t last_instdone1; | 334 | uint32_t last_instdone1; |
330 | 335 | ||
331 | unsigned long cfb_size; | 336 | unsigned long cfb_size; |
332 | unsigned long cfb_pitch; | 337 | unsigned int cfb_fb; |
333 | unsigned long cfb_offset; | 338 | enum plane cfb_plane; |
334 | int cfb_fence; | ||
335 | int cfb_plane; | ||
336 | int cfb_y; | 339 | int cfb_y; |
340 | struct intel_fbc_work *fbc_work; | ||
337 | 341 | ||
338 | struct intel_opregion opregion; | 342 | struct intel_opregion opregion; |
339 | 343 | ||
@@ -541,6 +545,7 @@ typedef struct drm_i915_private { | |||
541 | u32 savePIPEB_LINK_M1; | 545 | u32 savePIPEB_LINK_M1; |
542 | u32 savePIPEB_LINK_N1; | 546 | u32 savePIPEB_LINK_N1; |
543 | u32 saveMCHBAR_RENDER_STANDBY; | 547 | u32 saveMCHBAR_RENDER_STANDBY; |
548 | u32 savePCH_PORT_HOTPLUG; | ||
544 | 549 | ||
545 | struct { | 550 | struct { |
546 | /** Bridge to intel-gtt-ko */ | 551 | /** Bridge to intel-gtt-ko */ |
@@ -686,6 +691,7 @@ typedef struct drm_i915_private { | |||
686 | int child_dev_num; | 691 | int child_dev_num; |
687 | struct child_device_config *child_dev; | 692 | struct child_device_config *child_dev; |
688 | struct drm_connector *int_lvds_connector; | 693 | struct drm_connector *int_lvds_connector; |
694 | struct drm_connector *int_edp_connector; | ||
689 | 695 | ||
690 | bool mchbar_need_disable; | 696 | bool mchbar_need_disable; |
691 | 697 | ||
@@ -701,6 +707,7 @@ typedef struct drm_i915_private { | |||
701 | 707 | ||
702 | u64 last_count1; | 708 | u64 last_count1; |
703 | unsigned long last_time1; | 709 | unsigned long last_time1; |
710 | unsigned long chipset_power; | ||
704 | u64 last_count2; | 711 | u64 last_count2; |
705 | struct timespec last_time2; | 712 | struct timespec last_time2; |
706 | unsigned long gfx_power; | 713 | unsigned long gfx_power; |
@@ -719,6 +726,8 @@ typedef struct drm_i915_private { | |||
719 | /* list of fbdev register on this device */ | 726 | /* list of fbdev register on this device */ |
720 | struct intel_fbdev *fbdev; | 727 | struct intel_fbdev *fbdev; |
721 | 728 | ||
729 | struct backlight_device *backlight; | ||
730 | |||
722 | struct drm_property *broadcast_rgb_property; | 731 | struct drm_property *broadcast_rgb_property; |
723 | struct drm_property *force_audio_property; | 732 | struct drm_property *force_audio_property; |
724 | 733 | ||
@@ -986,15 +995,16 @@ struct drm_i915_file_private { | |||
986 | 995 | ||
987 | extern struct drm_ioctl_desc i915_ioctls[]; | 996 | extern struct drm_ioctl_desc i915_ioctls[]; |
988 | extern int i915_max_ioctl; | 997 | extern int i915_max_ioctl; |
989 | extern unsigned int i915_fbpercrtc; | 998 | extern unsigned int i915_fbpercrtc __always_unused; |
990 | extern int i915_panel_ignore_lid; | 999 | extern int i915_panel_ignore_lid __read_mostly; |
991 | extern unsigned int i915_powersave; | 1000 | extern unsigned int i915_powersave __read_mostly; |
992 | extern unsigned int i915_semaphores; | 1001 | extern unsigned int i915_semaphores __read_mostly; |
993 | extern unsigned int i915_lvds_downclock; | 1002 | extern unsigned int i915_lvds_downclock __read_mostly; |
994 | extern unsigned int i915_panel_use_ssc; | 1003 | extern unsigned int i915_panel_use_ssc __read_mostly; |
995 | extern int i915_vbt_sdvo_panel_type; | 1004 | extern int i915_vbt_sdvo_panel_type __read_mostly; |
996 | extern unsigned int i915_enable_rc6; | 1005 | extern unsigned int i915_enable_rc6 __read_mostly; |
997 | extern unsigned int i915_enable_fbc; | 1006 | extern unsigned int i915_enable_fbc __read_mostly; |
1007 | extern bool i915_enable_hangcheck __read_mostly; | ||
998 | 1008 | ||
999 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1009 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1000 | extern int i915_resume(struct drm_device *dev); | 1010 | extern int i915_resume(struct drm_device *dev); |
@@ -1164,7 +1174,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | |||
1164 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, | 1174 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1165 | uint32_t read_domains, | 1175 | uint32_t read_domains, |
1166 | uint32_t write_domain); | 1176 | uint32_t write_domain); |
1167 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); | 1177 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
1168 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); | 1178 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1169 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1179 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1170 | void i915_gem_do_init(struct drm_device *dev, | 1180 | void i915_gem_do_init(struct drm_device *dev, |
@@ -1183,7 +1193,8 @@ int __must_check | |||
1183 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1193 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1184 | bool write); | 1194 | bool write); |
1185 | int __must_check | 1195 | int __must_check |
1186 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | 1196 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
1197 | u32 alignment, | ||
1187 | struct intel_ring_buffer *pipelined); | 1198 | struct intel_ring_buffer *pipelined); |
1188 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1199 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1189 | struct drm_i915_gem_object *obj, | 1200 | struct drm_i915_gem_object *obj, |
@@ -1199,9 +1210,14 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | |||
1199 | uint32_t size, | 1210 | uint32_t size, |
1200 | int tiling_mode); | 1211 | int tiling_mode); |
1201 | 1212 | ||
1213 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | ||
1214 | enum i915_cache_level cache_level); | ||
1215 | |||
1202 | /* i915_gem_gtt.c */ | 1216 | /* i915_gem_gtt.c */ |
1203 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1217 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1204 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1218 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1219 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | ||
1220 | enum i915_cache_level cache_level); | ||
1205 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); | 1221 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1206 | 1222 | ||
1207 | /* i915_gem_evict.c */ | 1223 | /* i915_gem_evict.c */ |
@@ -1283,12 +1299,8 @@ extern void intel_modeset_init(struct drm_device *dev); | |||
1283 | extern void intel_modeset_gem_init(struct drm_device *dev); | 1299 | extern void intel_modeset_gem_init(struct drm_device *dev); |
1284 | extern void intel_modeset_cleanup(struct drm_device *dev); | 1300 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1285 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 1301 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1286 | extern void i8xx_disable_fbc(struct drm_device *dev); | ||
1287 | extern void g4x_disable_fbc(struct drm_device *dev); | ||
1288 | extern void ironlake_disable_fbc(struct drm_device *dev); | ||
1289 | extern void intel_disable_fbc(struct drm_device *dev); | ||
1290 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | ||
1291 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1302 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1303 | extern void intel_disable_fbc(struct drm_device *dev); | ||
1292 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1304 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1293 | extern void ironlake_enable_rc6(struct drm_device *dev); | 1305 | extern void ironlake_enable_rc6(struct drm_device *dev); |
1294 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | 1306 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a087e1bf0c2..346d5574f0a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1475,7 +1475,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1475 | 1475 | ||
1476 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | 1476 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1477 | ret = -E2BIG; | 1477 | ret = -E2BIG; |
1478 | goto unlock; | 1478 | goto out; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | if (obj->madv != I915_MADV_WILLNEED) { | 1481 | if (obj->madv != I915_MADV_WILLNEED) { |
@@ -1763,8 +1763,11 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1763 | ring->outstanding_lazy_request = false; | 1763 | ring->outstanding_lazy_request = false; |
1764 | 1764 | ||
1765 | if (!dev_priv->mm.suspended) { | 1765 | if (!dev_priv->mm.suspended) { |
1766 | mod_timer(&dev_priv->hangcheck_timer, | 1766 | if (i915_enable_hangcheck) { |
1767 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1767 | mod_timer(&dev_priv->hangcheck_timer, |
1768 | jiffies + | ||
1769 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1770 | } | ||
1768 | if (was_empty) | 1771 | if (was_empty) |
1769 | queue_delayed_work(dev_priv->wq, | 1772 | queue_delayed_work(dev_priv->wq, |
1770 | &dev_priv->mm.retire_work, HZ); | 1773 | &dev_priv->mm.retire_work, HZ); |
@@ -2135,6 +2138,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | |||
2135 | return 0; | 2138 | return 0; |
2136 | } | 2139 | } |
2137 | 2140 | ||
2141 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) | ||
2142 | { | ||
2143 | u32 old_write_domain, old_read_domains; | ||
2144 | |||
2145 | /* Act a barrier for all accesses through the GTT */ | ||
2146 | mb(); | ||
2147 | |||
2148 | /* Force a pagefault for domain tracking on next user access */ | ||
2149 | i915_gem_release_mmap(obj); | ||
2150 | |||
2151 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | ||
2152 | return; | ||
2153 | |||
2154 | old_read_domains = obj->base.read_domains; | ||
2155 | old_write_domain = obj->base.write_domain; | ||
2156 | |||
2157 | obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; | ||
2158 | obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; | ||
2159 | |||
2160 | trace_i915_gem_object_change_domain(obj, | ||
2161 | old_read_domains, | ||
2162 | old_write_domain); | ||
2163 | } | ||
2164 | |||
2138 | /** | 2165 | /** |
2139 | * Unbinds an object from the GTT aperture. | 2166 | * Unbinds an object from the GTT aperture. |
2140 | */ | 2167 | */ |
@@ -2151,23 +2178,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2151 | return -EINVAL; | 2178 | return -EINVAL; |
2152 | } | 2179 | } |
2153 | 2180 | ||
2154 | /* blow away mappings if mapped through GTT */ | 2181 | ret = i915_gem_object_finish_gpu(obj); |
2155 | i915_gem_release_mmap(obj); | ||
2156 | |||
2157 | /* Move the object to the CPU domain to ensure that | ||
2158 | * any possible CPU writes while it's not in the GTT | ||
2159 | * are flushed when we go to remap it. This will | ||
2160 | * also ensure that all pending GPU writes are finished | ||
2161 | * before we unbind. | ||
2162 | */ | ||
2163 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
2164 | if (ret == -ERESTARTSYS) | 2182 | if (ret == -ERESTARTSYS) |
2165 | return ret; | 2183 | return ret; |
2166 | /* Continue on if we fail due to EIO, the GPU is hung so we | 2184 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2167 | * should be safe and we need to cleanup or else we might | 2185 | * should be safe and we need to cleanup or else we might |
2168 | * cause memory corruption through use-after-free. | 2186 | * cause memory corruption through use-after-free. |
2169 | */ | 2187 | */ |
2188 | |||
2189 | i915_gem_object_finish_gtt(obj); | ||
2190 | |||
2191 | /* Move the object to the CPU domain to ensure that | ||
2192 | * any possible CPU writes while it's not in the GTT | ||
2193 | * are flushed when we go to remap it. | ||
2194 | */ | ||
2195 | if (ret == 0) | ||
2196 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
2197 | if (ret == -ERESTARTSYS) | ||
2198 | return ret; | ||
2170 | if (ret) { | 2199 | if (ret) { |
2200 | /* In the event of a disaster, abandon all caches and | ||
2201 | * hope for the best. | ||
2202 | */ | ||
2171 | i915_gem_clflush_object(obj); | 2203 | i915_gem_clflush_object(obj); |
2172 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 2204 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2173 | } | 2205 | } |
@@ -2996,51 +3028,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2996 | return 0; | 3028 | return 0; |
2997 | } | 3029 | } |
2998 | 3030 | ||
3031 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | ||
3032 | enum i915_cache_level cache_level) | ||
3033 | { | ||
3034 | int ret; | ||
3035 | |||
3036 | if (obj->cache_level == cache_level) | ||
3037 | return 0; | ||
3038 | |||
3039 | if (obj->pin_count) { | ||
3040 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | ||
3041 | return -EBUSY; | ||
3042 | } | ||
3043 | |||
3044 | if (obj->gtt_space) { | ||
3045 | ret = i915_gem_object_finish_gpu(obj); | ||
3046 | if (ret) | ||
3047 | return ret; | ||
3048 | |||
3049 | i915_gem_object_finish_gtt(obj); | ||
3050 | |||
3051 | /* Before SandyBridge, you could not use tiling or fence | ||
3052 | * registers with snooped memory, so relinquish any fences | ||
3053 | * currently pointing to our region in the aperture. | ||
3054 | */ | ||
3055 | if (INTEL_INFO(obj->base.dev)->gen < 6) { | ||
3056 | ret = i915_gem_object_put_fence(obj); | ||
3057 | if (ret) | ||
3058 | return ret; | ||
3059 | } | ||
3060 | |||
3061 | i915_gem_gtt_rebind_object(obj, cache_level); | ||
3062 | } | ||
3063 | |||
3064 | if (cache_level == I915_CACHE_NONE) { | ||
3065 | u32 old_read_domains, old_write_domain; | ||
3066 | |||
3067 | /* If we're coming from LLC cached, then we haven't | ||
3068 | * actually been tracking whether the data is in the | ||
3069 | * CPU cache or not, since we only allow one bit set | ||
3070 | * in obj->write_domain and have been skipping the clflushes. | ||
3071 | * Just set it to the CPU cache for now. | ||
3072 | */ | ||
3073 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); | ||
3074 | WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); | ||
3075 | |||
3076 | old_read_domains = obj->base.read_domains; | ||
3077 | old_write_domain = obj->base.write_domain; | ||
3078 | |||
3079 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | ||
3080 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
3081 | |||
3082 | trace_i915_gem_object_change_domain(obj, | ||
3083 | old_read_domains, | ||
3084 | old_write_domain); | ||
3085 | } | ||
3086 | |||
3087 | obj->cache_level = cache_level; | ||
3088 | return 0; | ||
3089 | } | ||
3090 | |||
2999 | /* | 3091 | /* |
3000 | * Prepare buffer for display plane. Use uninterruptible for possible flush | 3092 | * Prepare buffer for display plane (scanout, cursors, etc). |
3001 | * wait, as in modesetting process we're not supposed to be interrupted. | 3093 | * Can be called from an uninterruptible phase (modesetting) and allows |
3094 | * any flushes to be pipelined (for pageflips). | ||
3095 | * | ||
3096 | * For the display plane, we want to be in the GTT but out of any write | ||
3097 | * domains. So in many ways this looks like set_to_gtt_domain() apart from the | ||
3098 | * ability to pipeline the waits, pinning and any additional subtleties | ||
3099 | * that may differentiate the display plane from ordinary buffers. | ||
3002 | */ | 3100 | */ |
3003 | int | 3101 | int |
3004 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | 3102 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3103 | u32 alignment, | ||
3005 | struct intel_ring_buffer *pipelined) | 3104 | struct intel_ring_buffer *pipelined) |
3006 | { | 3105 | { |
3007 | uint32_t old_read_domains; | 3106 | u32 old_read_domains, old_write_domain; |
3008 | int ret; | 3107 | int ret; |
3009 | 3108 | ||
3010 | /* Not valid to be called on unbound objects. */ | ||
3011 | if (obj->gtt_space == NULL) | ||
3012 | return -EINVAL; | ||
3013 | |||
3014 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3109 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3015 | if (ret) | 3110 | if (ret) |
3016 | return ret; | 3111 | return ret; |
3017 | 3112 | ||
3018 | |||
3019 | /* Currently, we are always called from an non-interruptible context. */ | ||
3020 | if (pipelined != obj->ring) { | 3113 | if (pipelined != obj->ring) { |
3021 | ret = i915_gem_object_wait_rendering(obj); | 3114 | ret = i915_gem_object_wait_rendering(obj); |
3022 | if (ret) | 3115 | if (ret == -ERESTARTSYS) |
3023 | return ret; | 3116 | return ret; |
3024 | } | 3117 | } |
3025 | 3118 | ||
3119 | /* The display engine is not coherent with the LLC cache on gen6. As | ||
3120 | * a result, we make sure that the pinning that is about to occur is | ||
3121 | * done with uncached PTEs. This is lowest common denominator for all | ||
3122 | * chipsets. | ||
3123 | * | ||
3124 | * However for gen6+, we could do better by using the GFDT bit instead | ||
3125 | * of uncaching, which would allow us to flush all the LLC-cached data | ||
3126 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. | ||
3127 | */ | ||
3128 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); | ||
3129 | if (ret) | ||
3130 | return ret; | ||
3131 | |||
3132 | /* As the user may map the buffer once pinned in the display plane | ||
3133 | * (e.g. libkms for the bootup splash), we have to ensure that we | ||
3134 | * always use map_and_fenceable for all scanout buffers. | ||
3135 | */ | ||
3136 | ret = i915_gem_object_pin(obj, alignment, true); | ||
3137 | if (ret) | ||
3138 | return ret; | ||
3139 | |||
3026 | i915_gem_object_flush_cpu_write_domain(obj); | 3140 | i915_gem_object_flush_cpu_write_domain(obj); |
3027 | 3141 | ||
3142 | old_write_domain = obj->base.write_domain; | ||
3028 | old_read_domains = obj->base.read_domains; | 3143 | old_read_domains = obj->base.read_domains; |
3144 | |||
3145 | /* It should now be out of any other write domains, and we can update | ||
3146 | * the domain values for our changes. | ||
3147 | */ | ||
3148 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
3029 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; | 3149 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
3030 | 3150 | ||
3031 | trace_i915_gem_object_change_domain(obj, | 3151 | trace_i915_gem_object_change_domain(obj, |
3032 | old_read_domains, | 3152 | old_read_domains, |
3033 | obj->base.write_domain); | 3153 | old_write_domain); |
3034 | 3154 | ||
3035 | return 0; | 3155 | return 0; |
3036 | } | 3156 | } |
3037 | 3157 | ||
3038 | int | 3158 | int |
3039 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) | 3159 | i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
3040 | { | 3160 | { |
3041 | int ret; | 3161 | int ret; |
3042 | 3162 | ||
3043 | if (!obj->active) | 3163 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
3044 | return 0; | 3164 | return 0; |
3045 | 3165 | ||
3046 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3166 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
@@ -3049,6 +3169,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) | |||
3049 | return ret; | 3169 | return ret; |
3050 | } | 3170 | } |
3051 | 3171 | ||
3172 | /* Ensure that we invalidate the GPU's caches and TLBs. */ | ||
3173 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | ||
3174 | |||
3052 | return i915_gem_object_wait_rendering(obj); | 3175 | return i915_gem_object_wait_rendering(obj); |
3053 | } | 3176 | } |
3054 | 3177 | ||
@@ -3575,7 +3698,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3575 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3698 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3576 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3699 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3577 | 3700 | ||
3578 | obj->cache_level = I915_CACHE_NONE; | 3701 | if (IS_GEN6(dev)) { |
3702 | /* On Gen6, we can have the GPU use the LLC (the CPU | ||
3703 | * cache) for about a 10% performance improvement | ||
3704 | * compared to uncached. Graphics requests other than | ||
3705 | * display scanout are coherent with the CPU in | ||
3706 | * accessing this cache. This means in this mode we | ||
3707 | * don't need to clflush on the CPU side, and on the | ||
3708 | * GPU side we only need to flush internal caches to | ||
3709 | * get data visible to the CPU. | ||
3710 | * | ||
3711 | * However, we maintain the display planes as UC, and so | ||
3712 | * need to rebind when first used as such. | ||
3713 | */ | ||
3714 | obj->cache_level = I915_CACHE_LLC; | ||
3715 | } else | ||
3716 | obj->cache_level = I915_CACHE_NONE; | ||
3717 | |||
3579 | obj->base.driver_private = NULL; | 3718 | obj->base.driver_private = NULL; |
3580 | obj->fence_reg = I915_FENCE_REG_NONE; | 3719 | obj->fence_reg = I915_FENCE_REG_NONE; |
3581 | INIT_LIST_HEAD(&obj->mm_list); | 3720 | INIT_LIST_HEAD(&obj->mm_list); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e46b645773c..7a709cd8d54 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
59 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | 59 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
60 | 60 | ||
61 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 61 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
62 | unsigned int agp_type = | ||
63 | cache_level_to_agp_type(dev, obj->cache_level); | ||
64 | |||
65 | i915_gem_clflush_object(obj); | 62 | i915_gem_clflush_object(obj); |
66 | 63 | i915_gem_gtt_rebind_object(obj, obj->cache_level); | |
67 | if (dev_priv->mm.gtt->needs_dmar) { | ||
68 | BUG_ON(!obj->sg_list); | ||
69 | |||
70 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
71 | obj->num_sg, | ||
72 | obj->gtt_space->start >> PAGE_SHIFT, | ||
73 | agp_type); | ||
74 | } else | ||
75 | intel_gtt_insert_pages(obj->gtt_space->start | ||
76 | >> PAGE_SHIFT, | ||
77 | obj->base.size >> PAGE_SHIFT, | ||
78 | obj->pages, | ||
79 | agp_type); | ||
80 | } | 64 | } |
81 | 65 | ||
82 | intel_gtt_chipset_flush(); | 66 | intel_gtt_chipset_flush(); |
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) | |||
110 | return 0; | 94 | return 0; |
111 | } | 95 | } |
112 | 96 | ||
97 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | ||
98 | enum i915_cache_level cache_level) | ||
99 | { | ||
100 | struct drm_device *dev = obj->base.dev; | ||
101 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
102 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); | ||
103 | |||
104 | if (dev_priv->mm.gtt->needs_dmar) { | ||
105 | BUG_ON(!obj->sg_list); | ||
106 | |||
107 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
108 | obj->num_sg, | ||
109 | obj->gtt_space->start >> PAGE_SHIFT, | ||
110 | agp_type); | ||
111 | } else | ||
112 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, | ||
113 | obj->base.size >> PAGE_SHIFT, | ||
114 | obj->pages, | ||
115 | agp_type); | ||
116 | } | ||
117 | |||
113 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 118 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
114 | { | 119 | { |
115 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | 120 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3b03f85ea62..73248d0ea17 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
306 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
307 | struct intel_encoder *encoder; | 307 | struct intel_encoder *encoder; |
308 | 308 | ||
309 | mutex_lock(&mode_config->mutex); | ||
309 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | 310 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
310 | 311 | ||
311 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 312 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
312 | if (encoder->hot_plug) | 313 | if (encoder->hot_plug) |
313 | encoder->hot_plug(encoder); | 314 | encoder->hot_plug(encoder); |
314 | 315 | ||
316 | mutex_unlock(&mode_config->mutex); | ||
317 | |||
315 | /* Just fire off a uevent and let userspace tell us what to do */ | 318 | /* Just fire off a uevent and let userspace tell us what to do */ |
316 | drm_helper_hpd_irq_event(dev); | 319 | drm_helper_hpd_irq_event(dev); |
317 | } | 320 | } |
@@ -361,10 +364,12 @@ static void notify_ring(struct drm_device *dev, | |||
361 | 364 | ||
362 | ring->irq_seqno = seqno; | 365 | ring->irq_seqno = seqno; |
363 | wake_up_all(&ring->irq_queue); | 366 | wake_up_all(&ring->irq_queue); |
364 | 367 | if (i915_enable_hangcheck) { | |
365 | dev_priv->hangcheck_count = 0; | 368 | dev_priv->hangcheck_count = 0; |
366 | mod_timer(&dev_priv->hangcheck_timer, | 369 | mod_timer(&dev_priv->hangcheck_timer, |
367 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 370 | jiffies + |
371 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
372 | } | ||
368 | } | 373 | } |
369 | 374 | ||
370 | static void gen6_pm_rps_work(struct work_struct *work) | 375 | static void gen6_pm_rps_work(struct work_struct *work) |
@@ -817,6 +822,7 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
817 | 822 | ||
818 | /* Fences */ | 823 | /* Fences */ |
819 | switch (INTEL_INFO(dev)->gen) { | 824 | switch (INTEL_INFO(dev)->gen) { |
825 | case 7: | ||
820 | case 6: | 826 | case 6: |
821 | for (i = 0; i < 16; i++) | 827 | for (i = 0; i < 16; i++) |
822 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 828 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
@@ -1664,6 +1670,9 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1664 | uint32_t acthd, instdone, instdone1; | 1670 | uint32_t acthd, instdone, instdone1; |
1665 | bool err = false; | 1671 | bool err = false; |
1666 | 1672 | ||
1673 | if (!i915_enable_hangcheck) | ||
1674 | return; | ||
1675 | |||
1667 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1676 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1668 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && | 1677 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && |
1669 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | 1678 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && |
@@ -2050,8 +2059,10 @@ void intel_irq_init(struct drm_device *dev) | |||
2050 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 2059 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
2051 | } | 2060 | } |
2052 | 2061 | ||
2053 | 2062 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
2054 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 2063 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
2064 | else | ||
2065 | dev->driver->get_vblank_timestamp = NULL; | ||
2055 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 2066 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
2056 | 2067 | ||
2057 | if (IS_IVYBRIDGE(dev)) { | 2068 | if (IS_IVYBRIDGE(dev)) { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5d5def756c9..2ae29de4172 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -78,6 +78,14 @@ | |||
78 | #define GRDOM_RENDER (1<<2) | 78 | #define GRDOM_RENDER (1<<2) |
79 | #define GRDOM_MEDIA (3<<2) | 79 | #define GRDOM_MEDIA (3<<2) |
80 | 80 | ||
81 | #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ | ||
82 | #define GEN6_MBC_SNPCR_SHIFT 21 | ||
83 | #define GEN6_MBC_SNPCR_MASK (3<<21) | ||
84 | #define GEN6_MBC_SNPCR_MAX (0<<21) | ||
85 | #define GEN6_MBC_SNPCR_MED (1<<21) | ||
86 | #define GEN6_MBC_SNPCR_LOW (2<<21) | ||
87 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ | ||
88 | |||
81 | #define GEN6_GDRST 0x941c | 89 | #define GEN6_GDRST 0x941c |
82 | #define GEN6_GRDOM_FULL (1 << 0) | 90 | #define GEN6_GRDOM_FULL (1 << 0) |
83 | #define GEN6_GRDOM_RENDER (1 << 1) | 91 | #define GEN6_GRDOM_RENDER (1 << 1) |
@@ -367,6 +375,7 @@ | |||
367 | # define MI_FLUSH_ENABLE (1 << 11) | 375 | # define MI_FLUSH_ENABLE (1 << 11) |
368 | 376 | ||
369 | #define GFX_MODE 0x02520 | 377 | #define GFX_MODE 0x02520 |
378 | #define GFX_MODE_GEN7 0x0229c | ||
370 | #define GFX_RUN_LIST_ENABLE (1<<15) | 379 | #define GFX_RUN_LIST_ENABLE (1<<15) |
371 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | 380 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) |
372 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 381 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
@@ -374,6 +383,9 @@ | |||
374 | #define GFX_PSMI_GRANULARITY (1<<10) | 383 | #define GFX_PSMI_GRANULARITY (1<<10) |
375 | #define GFX_PPGTT_ENABLE (1<<9) | 384 | #define GFX_PPGTT_ENABLE (1<<9) |
376 | 385 | ||
386 | #define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) | ||
387 | #define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) | ||
388 | |||
377 | #define SCPD0 0x0209c /* 915+ only */ | 389 | #define SCPD0 0x0209c /* 915+ only */ |
378 | #define IER 0x020a0 | 390 | #define IER 0x020a0 |
379 | #define IIR 0x020a4 | 391 | #define IIR 0x020a4 |
@@ -579,6 +591,7 @@ | |||
579 | #define DPFC_CTL_PLANEA (0<<30) | 591 | #define DPFC_CTL_PLANEA (0<<30) |
580 | #define DPFC_CTL_PLANEB (1<<30) | 592 | #define DPFC_CTL_PLANEB (1<<30) |
581 | #define DPFC_CTL_FENCE_EN (1<<29) | 593 | #define DPFC_CTL_FENCE_EN (1<<29) |
594 | #define DPFC_CTL_PERSISTENT_MODE (1<<25) | ||
582 | #define DPFC_SR_EN (1<<10) | 595 | #define DPFC_SR_EN (1<<10) |
583 | #define DPFC_CTL_LIMIT_1X (0<<6) | 596 | #define DPFC_CTL_LIMIT_1X (0<<6) |
584 | #define DPFC_CTL_LIMIT_2X (1<<6) | 597 | #define DPFC_CTL_LIMIT_2X (1<<6) |
@@ -1309,6 +1322,7 @@ | |||
1309 | #define ADPA_PIPE_SELECT_MASK (1<<30) | 1322 | #define ADPA_PIPE_SELECT_MASK (1<<30) |
1310 | #define ADPA_PIPE_A_SELECT 0 | 1323 | #define ADPA_PIPE_A_SELECT 0 |
1311 | #define ADPA_PIPE_B_SELECT (1<<30) | 1324 | #define ADPA_PIPE_B_SELECT (1<<30) |
1325 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) | ||
1312 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 1326 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
1313 | #define ADPA_SETS_HVPOLARITY 0 | 1327 | #define ADPA_SETS_HVPOLARITY 0 |
1314 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) | 1328 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) |
@@ -1451,6 +1465,7 @@ | |||
1451 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1465 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
1452 | #define LVDS_PIPEB_SELECT (1 << 30) | 1466 | #define LVDS_PIPEB_SELECT (1 << 30) |
1453 | #define LVDS_PIPE_MASK (1 << 30) | 1467 | #define LVDS_PIPE_MASK (1 << 30) |
1468 | #define LVDS_PIPE(pipe) ((pipe) << 30) | ||
1454 | /* LVDS dithering flag on 965/g4x platform */ | 1469 | /* LVDS dithering flag on 965/g4x platform */ |
1455 | #define LVDS_ENABLE_DITHER (1 << 25) | 1470 | #define LVDS_ENABLE_DITHER (1 << 25) |
1456 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | 1471 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ |
@@ -1490,9 +1505,6 @@ | |||
1490 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1505 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1491 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1506 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1492 | 1507 | ||
1493 | #define LVDS_PIPE_ENABLED(V, P) \ | ||
1494 | (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) | ||
1495 | |||
1496 | /* Video Data Island Packet control */ | 1508 | /* Video Data Island Packet control */ |
1497 | #define VIDEO_DIP_DATA 0x61178 | 1509 | #define VIDEO_DIP_DATA 0x61178 |
1498 | #define VIDEO_DIP_CTL 0x61170 | 1510 | #define VIDEO_DIP_CTL 0x61170 |
@@ -1505,6 +1517,7 @@ | |||
1505 | #define VIDEO_DIP_SELECT_AVI (0 << 19) | 1517 | #define VIDEO_DIP_SELECT_AVI (0 << 19) |
1506 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) | 1518 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) |
1507 | #define VIDEO_DIP_SELECT_SPD (3 << 19) | 1519 | #define VIDEO_DIP_SELECT_SPD (3 << 19) |
1520 | #define VIDEO_DIP_SELECT_MASK (3 << 19) | ||
1508 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) | 1521 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) |
1509 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) | 1522 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) |
1510 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | 1523 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) |
@@ -2083,9 +2096,6 @@ | |||
2083 | #define DP_PIPEB_SELECT (1 << 30) | 2096 | #define DP_PIPEB_SELECT (1 << 30) |
2084 | #define DP_PIPE_MASK (1 << 30) | 2097 | #define DP_PIPE_MASK (1 << 30) |
2085 | 2098 | ||
2086 | #define DP_PIPE_ENABLED(V, P) \ | ||
2087 | (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) | ||
2088 | |||
2089 | /* Link training mode - select a suitable mode for each stage */ | 2099 | /* Link training mode - select a suitable mode for each stage */ |
2090 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) | 2100 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) |
2091 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) | 2101 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) |
@@ -2544,10 +2554,18 @@ | |||
2544 | #define _CURBBASE 0x700c4 | 2554 | #define _CURBBASE 0x700c4 |
2545 | #define _CURBPOS 0x700c8 | 2555 | #define _CURBPOS 0x700c8 |
2546 | 2556 | ||
2557 | #define _CURBCNTR_IVB 0x71080 | ||
2558 | #define _CURBBASE_IVB 0x71084 | ||
2559 | #define _CURBPOS_IVB 0x71088 | ||
2560 | |||
2547 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) | 2561 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) |
2548 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) | 2562 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) |
2549 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) | 2563 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) |
2550 | 2564 | ||
2565 | #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) | ||
2566 | #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) | ||
2567 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) | ||
2568 | |||
2551 | /* Display A control */ | 2569 | /* Display A control */ |
2552 | #define _DSPACNTR 0x70180 | 2570 | #define _DSPACNTR 0x70180 |
2553 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2571 | #define DISPLAY_PLANE_ENABLE (1<<31) |
@@ -3023,6 +3041,20 @@ | |||
3023 | #define _TRANSA_DP_LINK_M2 0xe0048 | 3041 | #define _TRANSA_DP_LINK_M2 0xe0048 |
3024 | #define _TRANSA_DP_LINK_N2 0xe004c | 3042 | #define _TRANSA_DP_LINK_N2 0xe004c |
3025 | 3043 | ||
3044 | /* Per-transcoder DIP controls */ | ||
3045 | |||
3046 | #define _VIDEO_DIP_CTL_A 0xe0200 | ||
3047 | #define _VIDEO_DIP_DATA_A 0xe0208 | ||
3048 | #define _VIDEO_DIP_GCP_A 0xe0210 | ||
3049 | |||
3050 | #define _VIDEO_DIP_CTL_B 0xe1200 | ||
3051 | #define _VIDEO_DIP_DATA_B 0xe1208 | ||
3052 | #define _VIDEO_DIP_GCP_B 0xe1210 | ||
3053 | |||
3054 | #define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) | ||
3055 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | ||
3056 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | ||
3057 | |||
3026 | #define _TRANS_HTOTAL_B 0xe1000 | 3058 | #define _TRANS_HTOTAL_B 0xe1000 |
3027 | #define _TRANS_HBLANK_B 0xe1004 | 3059 | #define _TRANS_HBLANK_B 0xe1004 |
3028 | #define _TRANS_HSYNC_B 0xe1008 | 3060 | #define _TRANS_HSYNC_B 0xe1008 |
@@ -3075,6 +3107,16 @@ | |||
3075 | #define TRANS_6BPC (2<<5) | 3107 | #define TRANS_6BPC (2<<5) |
3076 | #define TRANS_12BPC (3<<5) | 3108 | #define TRANS_12BPC (3<<5) |
3077 | 3109 | ||
3110 | #define _TRANSA_CHICKEN2 0xf0064 | ||
3111 | #define _TRANSB_CHICKEN2 0xf1064 | ||
3112 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) | ||
3113 | #define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) | ||
3114 | |||
3115 | #define SOUTH_CHICKEN1 0xc2000 | ||
3116 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 | ||
3117 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 | ||
3118 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) | ||
3119 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) | ||
3078 | #define SOUTH_CHICKEN2 0xc2004 | 3120 | #define SOUTH_CHICKEN2 0xc2004 |
3079 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) | 3121 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
3080 | 3122 | ||
@@ -3133,6 +3175,7 @@ | |||
3133 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) | 3175 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) |
3134 | 3176 | ||
3135 | /* both Tx and Rx */ | 3177 | /* both Tx and Rx */ |
3178 | #define FDI_COMPOSITE_SYNC (1<<11) | ||
3136 | #define FDI_LINK_TRAIN_AUTO (1<<10) | 3179 | #define FDI_LINK_TRAIN_AUTO (1<<10) |
3137 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 3180 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
3138 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 3181 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
@@ -3225,15 +3268,13 @@ | |||
3225 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 3268 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
3226 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 3269 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
3227 | 3270 | ||
3228 | #define ADPA_PIPE_ENABLED(V, P) \ | ||
3229 | (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) | ||
3230 | |||
3231 | /* or SDVOB */ | 3271 | /* or SDVOB */ |
3232 | #define HDMIB 0xe1140 | 3272 | #define HDMIB 0xe1140 |
3233 | #define PORT_ENABLE (1 << 31) | 3273 | #define PORT_ENABLE (1 << 31) |
3234 | #define TRANSCODER_A (0) | 3274 | #define TRANSCODER(pipe) ((pipe) << 30) |
3235 | #define TRANSCODER_B (1 << 30) | 3275 | #define TRANSCODER_CPT(pipe) ((pipe) << 29) |
3236 | #define TRANSCODER_MASK (1 << 30) | 3276 | #define TRANSCODER_MASK (1 << 30) |
3277 | #define TRANSCODER_MASK_CPT (3 << 29) | ||
3237 | #define COLOR_FORMAT_8bpc (0) | 3278 | #define COLOR_FORMAT_8bpc (0) |
3238 | #define COLOR_FORMAT_12bpc (3 << 26) | 3279 | #define COLOR_FORMAT_12bpc (3 << 26) |
3239 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) | 3280 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) |
@@ -3249,9 +3290,6 @@ | |||
3249 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 3290 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
3250 | #define PORT_DETECTED (1 << 2) | 3291 | #define PORT_DETECTED (1 << 2) |
3251 | 3292 | ||
3252 | #define HDMI_PIPE_ENABLED(V, P) \ | ||
3253 | (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) | ||
3254 | |||
3255 | /* PCH SDVOB multiplex with HDMIB */ | 3293 | /* PCH SDVOB multiplex with HDMIB */ |
3256 | #define PCH_SDVOB HDMIB | 3294 | #define PCH_SDVOB HDMIB |
3257 | 3295 | ||
@@ -3318,6 +3356,7 @@ | |||
3318 | #define PORT_TRANS_B_SEL_CPT (1<<29) | 3356 | #define PORT_TRANS_B_SEL_CPT (1<<29) |
3319 | #define PORT_TRANS_C_SEL_CPT (2<<29) | 3357 | #define PORT_TRANS_C_SEL_CPT (2<<29) |
3320 | #define PORT_TRANS_SEL_MASK (3<<29) | 3358 | #define PORT_TRANS_SEL_MASK (3<<29) |
3359 | #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) | ||
3321 | 3360 | ||
3322 | #define TRANS_DP_CTL_A 0xe0300 | 3361 | #define TRANS_DP_CTL_A 0xe0300 |
3323 | #define TRANS_DP_CTL_B 0xe1300 | 3362 | #define TRANS_DP_CTL_B 0xe1300 |
@@ -3360,6 +3399,11 @@ | |||
3360 | #define FORCEWAKE_ACK 0x130090 | 3399 | #define FORCEWAKE_ACK 0x130090 |
3361 | 3400 | ||
3362 | #define GT_FIFO_FREE_ENTRIES 0x120008 | 3401 | #define GT_FIFO_FREE_ENTRIES 0x120008 |
3402 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | ||
3403 | |||
3404 | #define GEN6_UCGCTL2 0x9404 | ||
3405 | # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) | ||
3406 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) | ||
3363 | 3407 | ||
3364 | #define GEN6_RPNSWREQ 0xA008 | 3408 | #define GEN6_RPNSWREQ 0xA008 |
3365 | #define GEN6_TURBO_DISABLE (1<<31) | 3409 | #define GEN6_TURBO_DISABLE (1<<31) |
@@ -3434,7 +3478,9 @@ | |||
3434 | #define GEN6_PCODE_MAILBOX 0x138124 | 3478 | #define GEN6_PCODE_MAILBOX 0x138124 |
3435 | #define GEN6_PCODE_READY (1<<31) | 3479 | #define GEN6_PCODE_READY (1<<31) |
3436 | #define GEN6_READ_OC_PARAMS 0xc | 3480 | #define GEN6_READ_OC_PARAMS 0xc |
3437 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 | 3481 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 |
3482 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 | ||
3438 | #define GEN6_PCODE_DATA 0x138128 | 3483 | #define GEN6_PCODE_DATA 0x138128 |
3484 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | ||
3439 | 3485 | ||
3440 | #endif /* _I915_REG_H_ */ | 3486 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5257cfc34c3..9c7706a4c71 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
370 | 370 | ||
371 | /* Fences */ | 371 | /* Fences */ |
372 | switch (INTEL_INFO(dev)->gen) { | 372 | switch (INTEL_INFO(dev)->gen) { |
373 | case 7: | ||
373 | case 6: | 374 | case 6: |
374 | for (i = 0; i < 16; i++) | 375 | for (i = 0; i < 16; i++) |
375 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 376 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
404 | 405 | ||
405 | /* Fences */ | 406 | /* Fences */ |
406 | switch (INTEL_INFO(dev)->gen) { | 407 | switch (INTEL_INFO(dev)->gen) { |
408 | case 7: | ||
407 | case 6: | 409 | case 6: |
408 | for (i = 0; i < 16; i++) | 410 | for (i = 0; i < 16; i++) |
409 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | 411 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); |
@@ -760,15 +762,13 @@ static void i915_restore_display(struct drm_device *dev) | |||
760 | /* FIXME: restore TV & SDVO state */ | 762 | /* FIXME: restore TV & SDVO state */ |
761 | 763 | ||
762 | /* only restore FBC info on the platform that supports FBC*/ | 764 | /* only restore FBC info on the platform that supports FBC*/ |
765 | intel_disable_fbc(dev); | ||
763 | if (I915_HAS_FBC(dev)) { | 766 | if (I915_HAS_FBC(dev)) { |
764 | if (HAS_PCH_SPLIT(dev)) { | 767 | if (HAS_PCH_SPLIT(dev)) { |
765 | ironlake_disable_fbc(dev); | ||
766 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 768 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
767 | } else if (IS_GM45(dev)) { | 769 | } else if (IS_GM45(dev)) { |
768 | g4x_disable_fbc(dev); | ||
769 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 770 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
770 | } else { | 771 | } else { |
771 | i8xx_disable_fbc(dev); | ||
772 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); | 772 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); |
773 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); | 773 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); |
774 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); | 774 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); |
@@ -814,6 +814,7 @@ int i915_save_state(struct drm_device *dev) | |||
814 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); | 814 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); |
815 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 815 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
816 | I915_READ(RSTDBYCTL); | 816 | I915_READ(RSTDBYCTL); |
817 | dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); | ||
817 | } else { | 818 | } else { |
818 | dev_priv->saveIER = I915_READ(IER); | 819 | dev_priv->saveIER = I915_READ(IER); |
819 | dev_priv->saveIMR = I915_READ(IMR); | 820 | dev_priv->saveIMR = I915_READ(IMR); |
@@ -865,21 +866,25 @@ int i915_restore_state(struct drm_device *dev) | |||
865 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | 866 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); |
866 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | 867 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); |
867 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | 868 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); |
869 | I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); | ||
868 | } else { | 870 | } else { |
869 | I915_WRITE(IER, dev_priv->saveIER); | 871 | I915_WRITE(IER, dev_priv->saveIER); |
870 | I915_WRITE(IMR, dev_priv->saveIMR); | 872 | I915_WRITE(IMR, dev_priv->saveIMR); |
871 | } | 873 | } |
872 | mutex_unlock(&dev->struct_mutex); | 874 | mutex_unlock(&dev->struct_mutex); |
873 | 875 | ||
874 | intel_init_clock_gating(dev); | 876 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
877 | intel_init_clock_gating(dev); | ||
875 | 878 | ||
876 | if (IS_IRONLAKE_M(dev)) { | 879 | if (IS_IRONLAKE_M(dev)) { |
877 | ironlake_enable_drps(dev); | 880 | ironlake_enable_drps(dev); |
878 | intel_init_emon(dev); | 881 | intel_init_emon(dev); |
879 | } | 882 | } |
880 | 883 | ||
881 | if (IS_GEN6(dev)) | 884 | if (IS_GEN6(dev)) { |
882 | gen6_enable_rps(dev_priv); | 885 | gen6_enable_rps(dev_priv); |
886 | gen6_update_ring_freq(dev_priv); | ||
887 | } | ||
883 | 888 | ||
884 | mutex_lock(&dev->struct_mutex); | 889 | mutex_lock(&dev->struct_mutex); |
885 | 890 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 927442a1192..61abef8a811 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -74,7 +74,7 @@ get_blocksize(void *p) | |||
74 | 74 | ||
75 | static void | 75 | static void |
76 | fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | 76 | fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, |
77 | struct lvds_dvo_timing *dvo_timing) | 77 | const struct lvds_dvo_timing *dvo_timing) |
78 | { | 78 | { |
79 | panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | | 79 | panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | |
80 | dvo_timing->hactive_lo; | 80 | dvo_timing->hactive_lo; |
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
115 | drm_mode_set_name(panel_fixed_mode); | 115 | drm_mode_set_name(panel_fixed_mode); |
116 | } | 116 | } |
117 | 117 | ||
118 | static bool | ||
119 | lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, | ||
120 | const struct lvds_dvo_timing *b) | ||
121 | { | ||
122 | if (a->hactive_hi != b->hactive_hi || | ||
123 | a->hactive_lo != b->hactive_lo) | ||
124 | return false; | ||
125 | |||
126 | if (a->hsync_off_hi != b->hsync_off_hi || | ||
127 | a->hsync_off_lo != b->hsync_off_lo) | ||
128 | return false; | ||
129 | |||
130 | if (a->hsync_pulse_width != b->hsync_pulse_width) | ||
131 | return false; | ||
132 | |||
133 | if (a->hblank_hi != b->hblank_hi || | ||
134 | a->hblank_lo != b->hblank_lo) | ||
135 | return false; | ||
136 | |||
137 | if (a->vactive_hi != b->vactive_hi || | ||
138 | a->vactive_lo != b->vactive_lo) | ||
139 | return false; | ||
140 | |||
141 | if (a->vsync_off != b->vsync_off) | ||
142 | return false; | ||
143 | |||
144 | if (a->vsync_pulse_width != b->vsync_pulse_width) | ||
145 | return false; | ||
146 | |||
147 | if (a->vblank_hi != b->vblank_hi || | ||
148 | a->vblank_lo != b->vblank_lo) | ||
149 | return false; | ||
150 | |||
151 | return true; | ||
152 | } | ||
153 | |||
154 | static const struct lvds_dvo_timing * | ||
155 | get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, | ||
156 | const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, | ||
157 | int index) | ||
158 | { | ||
159 | /* | ||
160 | * the size of fp_timing varies on the different platform. | ||
161 | * So calculate the DVO timing relative offset in LVDS data | ||
162 | * entry to get the DVO timing entry | ||
163 | */ | ||
164 | |||
165 | int lfp_data_size = | ||
166 | lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - | ||
167 | lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; | ||
168 | int dvo_timing_offset = | ||
169 | lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - | ||
170 | lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; | ||
171 | char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index; | ||
172 | |||
173 | return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); | ||
174 | } | ||
175 | |||
118 | /* Try to find integrated panel data */ | 176 | /* Try to find integrated panel data */ |
119 | static void | 177 | static void |
120 | parse_lfp_panel_data(struct drm_i915_private *dev_priv, | 178 | parse_lfp_panel_data(struct drm_i915_private *dev_priv, |
121 | struct bdb_header *bdb) | 179 | struct bdb_header *bdb) |
122 | { | 180 | { |
123 | struct bdb_lvds_options *lvds_options; | 181 | const struct bdb_lvds_options *lvds_options; |
124 | struct bdb_lvds_lfp_data *lvds_lfp_data; | 182 | const struct bdb_lvds_lfp_data *lvds_lfp_data; |
125 | struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; | 183 | const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; |
126 | struct bdb_lvds_lfp_data_entry *entry; | 184 | const struct lvds_dvo_timing *panel_dvo_timing; |
127 | struct lvds_dvo_timing *dvo_timing; | ||
128 | struct drm_display_mode *panel_fixed_mode; | 185 | struct drm_display_mode *panel_fixed_mode; |
129 | int lfp_data_size, dvo_timing_offset; | 186 | int i, downclock; |
130 | int i, temp_downclock; | ||
131 | struct drm_display_mode *temp_mode; | ||
132 | 187 | ||
133 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); | 188 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); |
134 | if (!lvds_options) | 189 | if (!lvds_options) |
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
150 | 205 | ||
151 | dev_priv->lvds_vbt = 1; | 206 | dev_priv->lvds_vbt = 1; |
152 | 207 | ||
153 | lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - | 208 | panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, |
154 | lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; | 209 | lvds_lfp_data_ptrs, |
155 | entry = (struct bdb_lvds_lfp_data_entry *) | 210 | lvds_options->panel_type); |
156 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * | ||
157 | lvds_options->panel_type)); | ||
158 | dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - | ||
159 | lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; | ||
160 | |||
161 | /* | ||
162 | * the size of fp_timing varies on the different platform. | ||
163 | * So calculate the DVO timing relative offset in LVDS data | ||
164 | * entry to get the DVO timing entry | ||
165 | */ | ||
166 | dvo_timing = (struct lvds_dvo_timing *) | ||
167 | ((unsigned char *)entry + dvo_timing_offset); | ||
168 | 211 | ||
169 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 212 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
170 | if (!panel_fixed_mode) | 213 | if (!panel_fixed_mode) |
171 | return; | 214 | return; |
172 | 215 | ||
173 | fill_detail_timing_data(panel_fixed_mode, dvo_timing); | 216 | fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); |
174 | 217 | ||
175 | dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; | 218 | dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; |
176 | 219 | ||
177 | DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); | 220 | DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); |
178 | drm_mode_debug_printmodeline(panel_fixed_mode); | 221 | drm_mode_debug_printmodeline(panel_fixed_mode); |
179 | 222 | ||
180 | temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL); | ||
181 | temp_downclock = panel_fixed_mode->clock; | ||
182 | /* | 223 | /* |
183 | * enumerate the LVDS panel timing info entry in VBT to check whether | 224 | * Iterate over the LVDS panel timing info to find the lowest clock |
184 | * the LVDS downclock is found. | 225 | * for the native resolution. |
185 | */ | 226 | */ |
227 | downclock = panel_dvo_timing->clock; | ||
186 | for (i = 0; i < 16; i++) { | 228 | for (i = 0; i < 16; i++) { |
187 | entry = (struct bdb_lvds_lfp_data_entry *) | 229 | const struct lvds_dvo_timing *dvo_timing; |
188 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); | 230 | |
189 | dvo_timing = (struct lvds_dvo_timing *) | 231 | dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, |
190 | ((unsigned char *)entry + dvo_timing_offset); | 232 | lvds_lfp_data_ptrs, |
191 | 233 | i); | |
192 | fill_detail_timing_data(temp_mode, dvo_timing); | 234 | if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && |
193 | 235 | dvo_timing->clock < downclock) | |
194 | if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && | 236 | downclock = dvo_timing->clock; |
195 | temp_mode->hsync_start == panel_fixed_mode->hsync_start && | ||
196 | temp_mode->hsync_end == panel_fixed_mode->hsync_end && | ||
197 | temp_mode->htotal == panel_fixed_mode->htotal && | ||
198 | temp_mode->vdisplay == panel_fixed_mode->vdisplay && | ||
199 | temp_mode->vsync_start == panel_fixed_mode->vsync_start && | ||
200 | temp_mode->vsync_end == panel_fixed_mode->vsync_end && | ||
201 | temp_mode->vtotal == panel_fixed_mode->vtotal && | ||
202 | temp_mode->clock < temp_downclock) { | ||
203 | /* | ||
204 | * downclock is already found. But we expect | ||
205 | * to find the lower downclock. | ||
206 | */ | ||
207 | temp_downclock = temp_mode->clock; | ||
208 | } | ||
209 | /* clear it to zero */ | ||
210 | memset(temp_mode, 0, sizeof(*temp_mode)); | ||
211 | } | 237 | } |
212 | kfree(temp_mode); | 238 | |
213 | if (temp_downclock < panel_fixed_mode->clock && | 239 | if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { |
214 | i915_lvds_downclock) { | ||
215 | dev_priv->lvds_downclock_avail = 1; | 240 | dev_priv->lvds_downclock_avail = 1; |
216 | dev_priv->lvds_downclock = temp_downclock; | 241 | dev_priv->lvds_downclock = downclock * 10; |
217 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. " | 242 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. " |
218 | "Normal Clock %dKHz, downclock %dKHz\n", | 243 | "Normal Clock %dKHz, downclock %dKHz\n", |
219 | temp_downclock, panel_fixed_mode->clock); | 244 | panel_fixed_mode->clock, 10*downclock); |
220 | } | 245 | } |
221 | return; | ||
222 | } | 246 | } |
223 | 247 | ||
224 | /* Try to find sdvo panel data */ | 248 | /* Try to find sdvo panel data */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0f1c799afea..07e7cf38068 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/cpufreq.h> | ||
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <linux/input.h> | 29 | #include <linux/input.h> |
29 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
@@ -877,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | |||
877 | int pp_reg, lvds_reg; | 878 | int pp_reg, lvds_reg; |
878 | u32 val; | 879 | u32 val; |
879 | enum pipe panel_pipe = PIPE_A; | 880 | enum pipe panel_pipe = PIPE_A; |
880 | bool locked = locked; | 881 | bool locked = true; |
881 | 882 | ||
882 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | 883 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
883 | pp_reg = PCH_PP_CONTROL; | 884 | pp_reg = PCH_PP_CONTROL; |
@@ -979,11 +980,76 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | |||
979 | pipe_name(pipe)); | 980 | pipe_name(pipe)); |
980 | } | 981 | } |
981 | 982 | ||
983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, | ||
984 | enum pipe pipe, u32 port_sel, u32 val) | ||
985 | { | ||
986 | if ((val & DP_PORT_EN) == 0) | ||
987 | return false; | ||
988 | |||
989 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
990 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); | ||
991 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); | ||
992 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) | ||
993 | return false; | ||
994 | } else { | ||
995 | if ((val & DP_PIPE_MASK) != (pipe << 30)) | ||
996 | return false; | ||
997 | } | ||
998 | return true; | ||
999 | } | ||
1000 | |||
1001 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1002 | enum pipe pipe, u32 val) | ||
1003 | { | ||
1004 | if ((val & PORT_ENABLE) == 0) | ||
1005 | return false; | ||
1006 | |||
1007 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1008 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1009 | return false; | ||
1010 | } else { | ||
1011 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) | ||
1012 | return false; | ||
1013 | } | ||
1014 | return true; | ||
1015 | } | ||
1016 | |||
1017 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1018 | enum pipe pipe, u32 val) | ||
1019 | { | ||
1020 | if ((val & LVDS_PORT_EN) == 0) | ||
1021 | return false; | ||
1022 | |||
1023 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1024 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1025 | return false; | ||
1026 | } else { | ||
1027 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) | ||
1028 | return false; | ||
1029 | } | ||
1030 | return true; | ||
1031 | } | ||
1032 | |||
1033 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | ||
1034 | enum pipe pipe, u32 val) | ||
1035 | { | ||
1036 | if ((val & ADPA_DAC_ENABLE) == 0) | ||
1037 | return false; | ||
1038 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
1039 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | ||
1040 | return false; | ||
1041 | } else { | ||
1042 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) | ||
1043 | return false; | ||
1044 | } | ||
1045 | return true; | ||
1046 | } | ||
1047 | |||
982 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1048 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
983 | enum pipe pipe, int reg) | 1049 | enum pipe pipe, int reg, u32 port_sel) |
984 | { | 1050 | { |
985 | u32 val = I915_READ(reg); | 1051 | u32 val = I915_READ(reg); |
986 | WARN(DP_PIPE_ENABLED(val, pipe), | 1052 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
987 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1053 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
988 | reg, pipe_name(pipe)); | 1054 | reg, pipe_name(pipe)); |
989 | } | 1055 | } |
@@ -992,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
992 | enum pipe pipe, int reg) | 1058 | enum pipe pipe, int reg) |
993 | { | 1059 | { |
994 | u32 val = I915_READ(reg); | 1060 | u32 val = I915_READ(reg); |
995 | WARN(HDMI_PIPE_ENABLED(val, pipe), | 1061 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
996 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1062 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
997 | reg, pipe_name(pipe)); | 1063 | reg, pipe_name(pipe)); |
998 | } | 1064 | } |
@@ -1003,19 +1069,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1003 | int reg; | 1069 | int reg; |
1004 | u32 val; | 1070 | u32 val; |
1005 | 1071 | ||
1006 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); | 1072 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1007 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); | 1073 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1008 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); | 1074 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1009 | 1075 | ||
1010 | reg = PCH_ADPA; | 1076 | reg = PCH_ADPA; |
1011 | val = I915_READ(reg); | 1077 | val = I915_READ(reg); |
1012 | WARN(ADPA_PIPE_ENABLED(val, pipe), | 1078 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
1013 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1079 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1014 | pipe_name(pipe)); | 1080 | pipe_name(pipe)); |
1015 | 1081 | ||
1016 | reg = PCH_LVDS; | 1082 | reg = PCH_LVDS; |
1017 | val = I915_READ(reg); | 1083 | val = I915_READ(reg); |
1018 | WARN(LVDS_PIPE_ENABLED(val, pipe), | 1084 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
1019 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1085 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1020 | pipe_name(pipe)); | 1086 | pipe_name(pipe)); |
1021 | 1087 | ||
@@ -1157,12 +1223,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1157 | 1223 | ||
1158 | reg = TRANSCONF(pipe); | 1224 | reg = TRANSCONF(pipe); |
1159 | val = I915_READ(reg); | 1225 | val = I915_READ(reg); |
1160 | /* | 1226 | |
1161 | * make the BPC in transcoder be consistent with | 1227 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1162 | * that in pipeconf reg. | 1228 | /* |
1163 | */ | 1229 | * make the BPC in transcoder be consistent with |
1164 | val &= ~PIPE_BPC_MASK; | 1230 | * that in pipeconf reg. |
1165 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 1231 | */ |
1232 | val &= ~PIPE_BPC_MASK; | ||
1233 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | ||
1234 | } | ||
1166 | I915_WRITE(reg, val | TRANS_ENABLE); | 1235 | I915_WRITE(reg, val | TRANS_ENABLE); |
1167 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 1236 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1168 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 1237 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
@@ -1272,6 +1341,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1272 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | 1341 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1273 | } | 1342 | } |
1274 | 1343 | ||
1344 | /* | ||
1345 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1346 | * trigger in order to latch. The display address reg provides this. | ||
1347 | */ | ||
1348 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1349 | enum plane plane) | ||
1350 | { | ||
1351 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | ||
1352 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); | ||
1353 | } | ||
1354 | |||
1275 | /** | 1355 | /** |
1276 | * intel_enable_plane - enable a display plane on a given pipe | 1356 | * intel_enable_plane - enable a display plane on a given pipe |
1277 | * @dev_priv: i915 private structure | 1357 | * @dev_priv: i915 private structure |
@@ -1295,20 +1375,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv, | |||
1295 | return; | 1375 | return; |
1296 | 1376 | ||
1297 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | 1377 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1378 | intel_flush_display_plane(dev_priv, plane); | ||
1298 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1379 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1299 | } | 1380 | } |
1300 | 1381 | ||
1301 | /* | ||
1302 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1303 | * trigger in order to latch. The display address reg provides this. | ||
1304 | */ | ||
1305 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1306 | enum plane plane) | ||
1307 | { | ||
1308 | u32 reg = DSPADDR(plane); | ||
1309 | I915_WRITE(reg, I915_READ(reg)); | ||
1310 | } | ||
1311 | |||
1312 | /** | 1382 | /** |
1313 | * intel_disable_plane - disable a display plane | 1383 | * intel_disable_plane - disable a display plane |
1314 | * @dev_priv: i915 private structure | 1384 | * @dev_priv: i915 private structure |
@@ -1334,19 +1404,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, | |||
1334 | } | 1404 | } |
1335 | 1405 | ||
1336 | static void disable_pch_dp(struct drm_i915_private *dev_priv, | 1406 | static void disable_pch_dp(struct drm_i915_private *dev_priv, |
1337 | enum pipe pipe, int reg) | 1407 | enum pipe pipe, int reg, u32 port_sel) |
1338 | { | 1408 | { |
1339 | u32 val = I915_READ(reg); | 1409 | u32 val = I915_READ(reg); |
1340 | if (DP_PIPE_ENABLED(val, pipe)) | 1410 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
1411 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); | ||
1341 | I915_WRITE(reg, val & ~DP_PORT_EN); | 1412 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1413 | } | ||
1342 | } | 1414 | } |
1343 | 1415 | ||
1344 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | 1416 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, |
1345 | enum pipe pipe, int reg) | 1417 | enum pipe pipe, int reg) |
1346 | { | 1418 | { |
1347 | u32 val = I915_READ(reg); | 1419 | u32 val = I915_READ(reg); |
1348 | if (HDMI_PIPE_ENABLED(val, pipe)) | 1420 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
1421 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | ||
1422 | reg, pipe); | ||
1349 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1423 | I915_WRITE(reg, val & ~PORT_ENABLE); |
1424 | } | ||
1350 | } | 1425 | } |
1351 | 1426 | ||
1352 | /* Disable any ports connected to this transcoder */ | 1427 | /* Disable any ports connected to this transcoder */ |
@@ -1358,18 +1433,19 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1358 | val = I915_READ(PCH_PP_CONTROL); | 1433 | val = I915_READ(PCH_PP_CONTROL); |
1359 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | 1434 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); |
1360 | 1435 | ||
1361 | disable_pch_dp(dev_priv, pipe, PCH_DP_B); | 1436 | disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1362 | disable_pch_dp(dev_priv, pipe, PCH_DP_C); | 1437 | disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1363 | disable_pch_dp(dev_priv, pipe, PCH_DP_D); | 1438 | disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1364 | 1439 | ||
1365 | reg = PCH_ADPA; | 1440 | reg = PCH_ADPA; |
1366 | val = I915_READ(reg); | 1441 | val = I915_READ(reg); |
1367 | if (ADPA_PIPE_ENABLED(val, pipe)) | 1442 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
1368 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | 1443 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1369 | 1444 | ||
1370 | reg = PCH_LVDS; | 1445 | reg = PCH_LVDS; |
1371 | val = I915_READ(reg); | 1446 | val = I915_READ(reg); |
1372 | if (LVDS_PIPE_ENABLED(val, pipe)) { | 1447 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1448 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | ||
1373 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | 1449 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1374 | POSTING_READ(reg); | 1450 | POSTING_READ(reg); |
1375 | udelay(100); | 1451 | udelay(100); |
@@ -1380,6 +1456,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1380 | disable_pch_hdmi(dev_priv, pipe, HDMID); | 1456 | disable_pch_hdmi(dev_priv, pipe, HDMID); |
1381 | } | 1457 | } |
1382 | 1458 | ||
1459 | static void i8xx_disable_fbc(struct drm_device *dev) | ||
1460 | { | ||
1461 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1462 | u32 fbc_ctl; | ||
1463 | |||
1464 | /* Disable compression */ | ||
1465 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
1466 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
1467 | return; | ||
1468 | |||
1469 | fbc_ctl &= ~FBC_CTL_EN; | ||
1470 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
1471 | |||
1472 | /* Wait for compressing bit to clear */ | ||
1473 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | ||
1474 | DRM_DEBUG_KMS("FBC idle timed out\n"); | ||
1475 | return; | ||
1476 | } | ||
1477 | |||
1478 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
1479 | } | ||
1480 | |||
1383 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1481 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1384 | { | 1482 | { |
1385 | struct drm_device *dev = crtc->dev; | 1483 | struct drm_device *dev = crtc->dev; |
@@ -1388,36 +1486,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1388 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1486 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1389 | struct drm_i915_gem_object *obj = intel_fb->obj; | 1487 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1488 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1489 | int cfb_pitch; | ||
1391 | int plane, i; | 1490 | int plane, i; |
1392 | u32 fbc_ctl, fbc_ctl2; | 1491 | u32 fbc_ctl, fbc_ctl2; |
1393 | 1492 | ||
1394 | if (fb->pitch == dev_priv->cfb_pitch && | 1493 | cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1395 | obj->fence_reg == dev_priv->cfb_fence && | 1494 | if (fb->pitch < cfb_pitch) |
1396 | intel_crtc->plane == dev_priv->cfb_plane && | 1495 | cfb_pitch = fb->pitch; |
1397 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | ||
1398 | return; | ||
1399 | |||
1400 | i8xx_disable_fbc(dev); | ||
1401 | |||
1402 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | ||
1403 | |||
1404 | if (fb->pitch < dev_priv->cfb_pitch) | ||
1405 | dev_priv->cfb_pitch = fb->pitch; | ||
1406 | 1496 | ||
1407 | /* FBC_CTL wants 64B units */ | 1497 | /* FBC_CTL wants 64B units */ |
1408 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1498 | cfb_pitch = (cfb_pitch / 64) - 1; |
1409 | dev_priv->cfb_fence = obj->fence_reg; | 1499 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1410 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1411 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | ||
1412 | 1500 | ||
1413 | /* Clear old tags */ | 1501 | /* Clear old tags */ |
1414 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | 1502 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
1415 | I915_WRITE(FBC_TAG + (i * 4), 0); | 1503 | I915_WRITE(FBC_TAG + (i * 4), 0); |
1416 | 1504 | ||
1417 | /* Set it up... */ | 1505 | /* Set it up... */ |
1418 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1506 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
1419 | if (obj->tiling_mode != I915_TILING_NONE) | 1507 | fbc_ctl2 |= plane; |
1420 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | ||
1421 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1508 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1422 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1509 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
1423 | 1510 | ||
@@ -1425,36 +1512,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1425 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 1512 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1426 | if (IS_I945GM(dev)) | 1513 | if (IS_I945GM(dev)) |
1427 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1514 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1428 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1515 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1429 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1516 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1430 | if (obj->tiling_mode != I915_TILING_NONE) | 1517 | fbc_ctl |= obj->fence_reg; |
1431 | fbc_ctl |= dev_priv->cfb_fence; | ||
1432 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
1433 | |||
1434 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", | ||
1435 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | ||
1436 | } | ||
1437 | |||
1438 | void i8xx_disable_fbc(struct drm_device *dev) | ||
1439 | { | ||
1440 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1441 | u32 fbc_ctl; | ||
1442 | |||
1443 | /* Disable compression */ | ||
1444 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
1445 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
1446 | return; | ||
1447 | |||
1448 | fbc_ctl &= ~FBC_CTL_EN; | ||
1449 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1518 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1450 | 1519 | ||
1451 | /* Wait for compressing bit to clear */ | 1520 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", |
1452 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | 1521 | cfb_pitch, crtc->y, intel_crtc->plane); |
1453 | DRM_DEBUG_KMS("FBC idle timed out\n"); | ||
1454 | return; | ||
1455 | } | ||
1456 | |||
1457 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
1458 | } | 1522 | } |
1459 | 1523 | ||
1460 | static bool i8xx_fbc_enabled(struct drm_device *dev) | 1524 | static bool i8xx_fbc_enabled(struct drm_device *dev) |
@@ -1476,30 +1540,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1476 | unsigned long stall_watermark = 200; | 1540 | unsigned long stall_watermark = 200; |
1477 | u32 dpfc_ctl; | 1541 | u32 dpfc_ctl; |
1478 | 1542 | ||
1479 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
1480 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1481 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1482 | dev_priv->cfb_fence == obj->fence_reg && | ||
1483 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1484 | dev_priv->cfb_y == crtc->y) | ||
1485 | return; | ||
1486 | |||
1487 | I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1488 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1489 | } | ||
1490 | |||
1491 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1492 | dev_priv->cfb_fence = obj->fence_reg; | ||
1493 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1494 | dev_priv->cfb_y = crtc->y; | ||
1495 | |||
1496 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1543 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1497 | if (obj->tiling_mode != I915_TILING_NONE) { | 1544 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
1498 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1545 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1499 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | ||
1500 | } else { | ||
1501 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1502 | } | ||
1503 | 1546 | ||
1504 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1547 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1505 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1548 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
@@ -1512,7 +1555,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1512 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1555 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1513 | } | 1556 | } |
1514 | 1557 | ||
1515 | void g4x_disable_fbc(struct drm_device *dev) | 1558 | static void g4x_disable_fbc(struct drm_device *dev) |
1516 | { | 1559 | { |
1517 | struct drm_i915_private *dev_priv = dev->dev_private; | 1560 | struct drm_i915_private *dev_priv = dev->dev_private; |
1518 | u32 dpfc_ctl; | 1561 | u32 dpfc_ctl; |
@@ -1567,32 +1610,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1567 | u32 dpfc_ctl; | 1610 | u32 dpfc_ctl; |
1568 | 1611 | ||
1569 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1612 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1570 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1571 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1572 | dev_priv->cfb_fence == obj->fence_reg && | ||
1573 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1574 | dev_priv->cfb_offset == obj->gtt_offset && | ||
1575 | dev_priv->cfb_y == crtc->y) | ||
1576 | return; | ||
1577 | |||
1578 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1579 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1580 | } | ||
1581 | |||
1582 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1583 | dev_priv->cfb_fence = obj->fence_reg; | ||
1584 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1585 | dev_priv->cfb_offset = obj->gtt_offset; | ||
1586 | dev_priv->cfb_y = crtc->y; | ||
1587 | |||
1588 | dpfc_ctl &= DPFC_RESERVED; | 1613 | dpfc_ctl &= DPFC_RESERVED; |
1589 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1614 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1590 | if (obj->tiling_mode != I915_TILING_NONE) { | 1615 | /* Set persistent mode for front-buffer rendering, ala X. */ |
1591 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1616 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; |
1592 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1617 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); |
1593 | } else { | 1618 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1594 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1595 | } | ||
1596 | 1619 | ||
1597 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1620 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1598 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1621 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
@@ -1604,7 +1627,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1604 | 1627 | ||
1605 | if (IS_GEN6(dev)) { | 1628 | if (IS_GEN6(dev)) { |
1606 | I915_WRITE(SNB_DPFC_CTL_SA, | 1629 | I915_WRITE(SNB_DPFC_CTL_SA, |
1607 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | 1630 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
1608 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 1631 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
1609 | sandybridge_blit_fbc_update(dev); | 1632 | sandybridge_blit_fbc_update(dev); |
1610 | } | 1633 | } |
@@ -1612,7 +1635,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1612 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1635 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1613 | } | 1636 | } |
1614 | 1637 | ||
1615 | void ironlake_disable_fbc(struct drm_device *dev) | 1638 | static void ironlake_disable_fbc(struct drm_device *dev) |
1616 | { | 1639 | { |
1617 | struct drm_i915_private *dev_priv = dev->dev_private; | 1640 | struct drm_i915_private *dev_priv = dev->dev_private; |
1618 | u32 dpfc_ctl; | 1641 | u32 dpfc_ctl; |
@@ -1644,24 +1667,109 @@ bool intel_fbc_enabled(struct drm_device *dev) | |||
1644 | return dev_priv->display.fbc_enabled(dev); | 1667 | return dev_priv->display.fbc_enabled(dev); |
1645 | } | 1668 | } |
1646 | 1669 | ||
1647 | void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1670 | static void intel_fbc_work_fn(struct work_struct *__work) |
1648 | { | 1671 | { |
1649 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 1672 | struct intel_fbc_work *work = |
1673 | container_of(to_delayed_work(__work), | ||
1674 | struct intel_fbc_work, work); | ||
1675 | struct drm_device *dev = work->crtc->dev; | ||
1676 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1677 | |||
1678 | mutex_lock(&dev->struct_mutex); | ||
1679 | if (work == dev_priv->fbc_work) { | ||
1680 | /* Double check that we haven't switched fb without cancelling | ||
1681 | * the prior work. | ||
1682 | */ | ||
1683 | if (work->crtc->fb == work->fb) { | ||
1684 | dev_priv->display.enable_fbc(work->crtc, | ||
1685 | work->interval); | ||
1686 | |||
1687 | dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; | ||
1688 | dev_priv->cfb_fb = work->crtc->fb->base.id; | ||
1689 | dev_priv->cfb_y = work->crtc->y; | ||
1690 | } | ||
1691 | |||
1692 | dev_priv->fbc_work = NULL; | ||
1693 | } | ||
1694 | mutex_unlock(&dev->struct_mutex); | ||
1695 | |||
1696 | kfree(work); | ||
1697 | } | ||
1698 | |||
1699 | static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) | ||
1700 | { | ||
1701 | if (dev_priv->fbc_work == NULL) | ||
1702 | return; | ||
1703 | |||
1704 | DRM_DEBUG_KMS("cancelling pending FBC enable\n"); | ||
1705 | |||
1706 | /* Synchronisation is provided by struct_mutex and checking of | ||
1707 | * dev_priv->fbc_work, so we can perform the cancellation | ||
1708 | * entirely asynchronously. | ||
1709 | */ | ||
1710 | if (cancel_delayed_work(&dev_priv->fbc_work->work)) | ||
1711 | /* tasklet was killed before being run, clean up */ | ||
1712 | kfree(dev_priv->fbc_work); | ||
1713 | |||
1714 | /* Mark the work as no longer wanted so that if it does | ||
1715 | * wake-up (because the work was already running and waiting | ||
1716 | * for our mutex), it will discover that is no longer | ||
1717 | * necessary to run. | ||
1718 | */ | ||
1719 | dev_priv->fbc_work = NULL; | ||
1720 | } | ||
1721 | |||
1722 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
1723 | { | ||
1724 | struct intel_fbc_work *work; | ||
1725 | struct drm_device *dev = crtc->dev; | ||
1726 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1650 | 1727 | ||
1651 | if (!dev_priv->display.enable_fbc) | 1728 | if (!dev_priv->display.enable_fbc) |
1652 | return; | 1729 | return; |
1653 | 1730 | ||
1654 | dev_priv->display.enable_fbc(crtc, interval); | 1731 | intel_cancel_fbc_work(dev_priv); |
1732 | |||
1733 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
1734 | if (work == NULL) { | ||
1735 | dev_priv->display.enable_fbc(crtc, interval); | ||
1736 | return; | ||
1737 | } | ||
1738 | |||
1739 | work->crtc = crtc; | ||
1740 | work->fb = crtc->fb; | ||
1741 | work->interval = interval; | ||
1742 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | ||
1743 | |||
1744 | dev_priv->fbc_work = work; | ||
1745 | |||
1746 | DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); | ||
1747 | |||
1748 | /* Delay the actual enabling to let pageflipping cease and the | ||
1749 | * display to settle before starting the compression. Note that | ||
1750 | * this delay also serves a second purpose: it allows for a | ||
1751 | * vblank to pass after disabling the FBC before we attempt | ||
1752 | * to modify the control registers. | ||
1753 | * | ||
1754 | * A more complicated solution would involve tracking vblanks | ||
1755 | * following the termination of the page-flipping sequence | ||
1756 | * and indeed performing the enable as a co-routine and not | ||
1757 | * waiting synchronously upon the vblank. | ||
1758 | */ | ||
1759 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | ||
1655 | } | 1760 | } |
1656 | 1761 | ||
1657 | void intel_disable_fbc(struct drm_device *dev) | 1762 | void intel_disable_fbc(struct drm_device *dev) |
1658 | { | 1763 | { |
1659 | struct drm_i915_private *dev_priv = dev->dev_private; | 1764 | struct drm_i915_private *dev_priv = dev->dev_private; |
1660 | 1765 | ||
1766 | intel_cancel_fbc_work(dev_priv); | ||
1767 | |||
1661 | if (!dev_priv->display.disable_fbc) | 1768 | if (!dev_priv->display.disable_fbc) |
1662 | return; | 1769 | return; |
1663 | 1770 | ||
1664 | dev_priv->display.disable_fbc(dev); | 1771 | dev_priv->display.disable_fbc(dev); |
1772 | dev_priv->cfb_plane = -1; | ||
1665 | } | 1773 | } |
1666 | 1774 | ||
1667 | /** | 1775 | /** |
@@ -1691,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1691 | struct drm_framebuffer *fb; | 1799 | struct drm_framebuffer *fb; |
1692 | struct intel_framebuffer *intel_fb; | 1800 | struct intel_framebuffer *intel_fb; |
1693 | struct drm_i915_gem_object *obj; | 1801 | struct drm_i915_gem_object *obj; |
1802 | int enable_fbc; | ||
1694 | 1803 | ||
1695 | DRM_DEBUG_KMS("\n"); | 1804 | DRM_DEBUG_KMS("\n"); |
1696 | 1805 | ||
@@ -1731,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1731 | intel_fb = to_intel_framebuffer(fb); | 1840 | intel_fb = to_intel_framebuffer(fb); |
1732 | obj = intel_fb->obj; | 1841 | obj = intel_fb->obj; |
1733 | 1842 | ||
1734 | if (!i915_enable_fbc) { | 1843 | enable_fbc = i915_enable_fbc; |
1735 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | 1844 | if (enable_fbc < 0) { |
1845 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | ||
1846 | enable_fbc = 1; | ||
1847 | if (INTEL_INFO(dev)->gen <= 5) | ||
1848 | enable_fbc = 0; | ||
1849 | } | ||
1850 | if (!enable_fbc) { | ||
1851 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
1736 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | 1852 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1737 | goto out_disable; | 1853 | goto out_disable; |
1738 | } | 1854 | } |
@@ -1760,8 +1876,13 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1760 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1876 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1761 | goto out_disable; | 1877 | goto out_disable; |
1762 | } | 1878 | } |
1763 | if (obj->tiling_mode != I915_TILING_X) { | 1879 | |
1764 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1880 | /* The use of a CPU fence is mandatory in order to detect writes |
1881 | * by the CPU to the scanout and trigger updates to the FBC. | ||
1882 | */ | ||
1883 | if (obj->tiling_mode != I915_TILING_X || | ||
1884 | obj->fence_reg == I915_FENCE_REG_NONE) { | ||
1885 | DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); | ||
1765 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1886 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1766 | goto out_disable; | 1887 | goto out_disable; |
1767 | } | 1888 | } |
@@ -1770,6 +1891,44 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1770 | if (in_dbg_master()) | 1891 | if (in_dbg_master()) |
1771 | goto out_disable; | 1892 | goto out_disable; |
1772 | 1893 | ||
1894 | /* If the scanout has not changed, don't modify the FBC settings. | ||
1895 | * Note that we make the fundamental assumption that the fb->obj | ||
1896 | * cannot be unpinned (and have its GTT offset and fence revoked) | ||
1897 | * without first being decoupled from the scanout and FBC disabled. | ||
1898 | */ | ||
1899 | if (dev_priv->cfb_plane == intel_crtc->plane && | ||
1900 | dev_priv->cfb_fb == fb->base.id && | ||
1901 | dev_priv->cfb_y == crtc->y) | ||
1902 | return; | ||
1903 | |||
1904 | if (intel_fbc_enabled(dev)) { | ||
1905 | /* We update FBC along two paths, after changing fb/crtc | ||
1906 | * configuration (modeswitching) and after page-flipping | ||
1907 | * finishes. For the latter, we know that not only did | ||
1908 | * we disable the FBC at the start of the page-flip | ||
1909 | * sequence, but also more than one vblank has passed. | ||
1910 | * | ||
1911 | * For the former case of modeswitching, it is possible | ||
1912 | * to switch between two FBC valid configurations | ||
1913 | * instantaneously so we do need to disable the FBC | ||
1914 | * before we can modify its control registers. We also | ||
1915 | * have to wait for the next vblank for that to take | ||
1916 | * effect. However, since we delay enabling FBC we can | ||
1917 | * assume that a vblank has passed since disabling and | ||
1918 | * that we can safely alter the registers in the deferred | ||
1919 | * callback. | ||
1920 | * | ||
1921 | * In the scenario that we go from a valid to invalid | ||
1922 | * and then back to valid FBC configuration we have | ||
1923 | * no strict enforcement that a vblank occurred since | ||
1924 | * disabling the FBC. However, along all current pipe | ||
1925 | * disabling paths we do need to wait for a vblank at | ||
1926 | * some point. And we wait before enabling FBC anyway. | ||
1927 | */ | ||
1928 | DRM_DEBUG_KMS("disabling active FBC for update\n"); | ||
1929 | intel_disable_fbc(dev); | ||
1930 | } | ||
1931 | |||
1773 | intel_enable_fbc(crtc, 500); | 1932 | intel_enable_fbc(crtc, 500); |
1774 | return; | 1933 | return; |
1775 | 1934 | ||
@@ -1812,14 +1971,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1812 | } | 1971 | } |
1813 | 1972 | ||
1814 | dev_priv->mm.interruptible = false; | 1973 | dev_priv->mm.interruptible = false; |
1815 | ret = i915_gem_object_pin(obj, alignment, true); | 1974 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1816 | if (ret) | 1975 | if (ret) |
1817 | goto err_interruptible; | 1976 | goto err_interruptible; |
1818 | 1977 | ||
1819 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); | ||
1820 | if (ret) | ||
1821 | goto err_unpin; | ||
1822 | |||
1823 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 1978 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1824 | * fence, whereas 965+ only requires a fence if using | 1979 | * fence, whereas 965+ only requires a fence if using |
1825 | * framebuffer compression. For simplicity, we always install | 1980 | * framebuffer compression. For simplicity, we always install |
@@ -1841,10 +1996,8 @@ err_interruptible: | |||
1841 | return ret; | 1996 | return ret; |
1842 | } | 1997 | } |
1843 | 1998 | ||
1844 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | 1999 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
1845 | static int | 2000 | int x, int y) |
1846 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
1847 | int x, int y, enum mode_set_atomic state) | ||
1848 | { | 2001 | { |
1849 | struct drm_device *dev = crtc->dev; | 2002 | struct drm_device *dev = crtc->dev; |
1850 | struct drm_i915_private *dev_priv = dev->dev_private; | 2003 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1887,7 +2040,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1887 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | 2040 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
1888 | break; | 2041 | break; |
1889 | default: | 2042 | default: |
1890 | DRM_ERROR("Unknown color depth\n"); | 2043 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
1891 | return -EINVAL; | 2044 | return -EINVAL; |
1892 | } | 2045 | } |
1893 | if (INTEL_INFO(dev)->gen >= 4) { | 2046 | if (INTEL_INFO(dev)->gen >= 4) { |
@@ -1897,10 +2050,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1897 | dspcntr &= ~DISPPLANE_TILED; | 2050 | dspcntr &= ~DISPPLANE_TILED; |
1898 | } | 2051 | } |
1899 | 2052 | ||
1900 | if (HAS_PCH_SPLIT(dev)) | ||
1901 | /* must disable */ | ||
1902 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | ||
1903 | |||
1904 | I915_WRITE(reg, dspcntr); | 2053 | I915_WRITE(reg, dspcntr); |
1905 | 2054 | ||
1906 | Start = obj->gtt_offset; | 2055 | Start = obj->gtt_offset; |
@@ -1917,6 +2066,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1917 | I915_WRITE(DSPADDR(plane), Start + Offset); | 2066 | I915_WRITE(DSPADDR(plane), Start + Offset); |
1918 | POSTING_READ(reg); | 2067 | POSTING_READ(reg); |
1919 | 2068 | ||
2069 | return 0; | ||
2070 | } | ||
2071 | |||
2072 | static int ironlake_update_plane(struct drm_crtc *crtc, | ||
2073 | struct drm_framebuffer *fb, int x, int y) | ||
2074 | { | ||
2075 | struct drm_device *dev = crtc->dev; | ||
2076 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2077 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2078 | struct intel_framebuffer *intel_fb; | ||
2079 | struct drm_i915_gem_object *obj; | ||
2080 | int plane = intel_crtc->plane; | ||
2081 | unsigned long Start, Offset; | ||
2082 | u32 dspcntr; | ||
2083 | u32 reg; | ||
2084 | |||
2085 | switch (plane) { | ||
2086 | case 0: | ||
2087 | case 1: | ||
2088 | break; | ||
2089 | default: | ||
2090 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | ||
2091 | return -EINVAL; | ||
2092 | } | ||
2093 | |||
2094 | intel_fb = to_intel_framebuffer(fb); | ||
2095 | obj = intel_fb->obj; | ||
2096 | |||
2097 | reg = DSPCNTR(plane); | ||
2098 | dspcntr = I915_READ(reg); | ||
2099 | /* Mask out pixel format bits in case we change it */ | ||
2100 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
2101 | switch (fb->bits_per_pixel) { | ||
2102 | case 8: | ||
2103 | dspcntr |= DISPPLANE_8BPP; | ||
2104 | break; | ||
2105 | case 16: | ||
2106 | if (fb->depth != 16) | ||
2107 | return -EINVAL; | ||
2108 | |||
2109 | dspcntr |= DISPPLANE_16BPP; | ||
2110 | break; | ||
2111 | case 24: | ||
2112 | case 32: | ||
2113 | if (fb->depth == 24) | ||
2114 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
2115 | else if (fb->depth == 30) | ||
2116 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | ||
2117 | else | ||
2118 | return -EINVAL; | ||
2119 | break; | ||
2120 | default: | ||
2121 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); | ||
2122 | return -EINVAL; | ||
2123 | } | ||
2124 | |||
2125 | if (obj->tiling_mode != I915_TILING_NONE) | ||
2126 | dspcntr |= DISPPLANE_TILED; | ||
2127 | else | ||
2128 | dspcntr &= ~DISPPLANE_TILED; | ||
2129 | |||
2130 | /* must disable */ | ||
2131 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | ||
2132 | |||
2133 | I915_WRITE(reg, dspcntr); | ||
2134 | |||
2135 | Start = obj->gtt_offset; | ||
2136 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | ||
2137 | |||
2138 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||
2139 | Start, Offset, x, y, fb->pitch); | ||
2140 | I915_WRITE(DSPSTRIDE(plane), fb->pitch); | ||
2141 | I915_WRITE(DSPSURF(plane), Start); | ||
2142 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | ||
2143 | I915_WRITE(DSPADDR(plane), Offset); | ||
2144 | POSTING_READ(reg); | ||
2145 | |||
2146 | return 0; | ||
2147 | } | ||
2148 | |||
2149 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | ||
2150 | static int | ||
2151 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
2152 | int x, int y, enum mode_set_atomic state) | ||
2153 | { | ||
2154 | struct drm_device *dev = crtc->dev; | ||
2155 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2156 | int ret; | ||
2157 | |||
2158 | ret = dev_priv->display.update_plane(crtc, fb, x, y); | ||
2159 | if (ret) | ||
2160 | return ret; | ||
2161 | |||
1920 | intel_update_fbc(dev); | 2162 | intel_update_fbc(dev); |
1921 | intel_increase_pllclock(crtc); | 2163 | intel_increase_pllclock(crtc); |
1922 | 2164 | ||
@@ -1934,7 +2176,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1934 | 2176 | ||
1935 | /* no fb bound */ | 2177 | /* no fb bound */ |
1936 | if (!crtc->fb) { | 2178 | if (!crtc->fb) { |
1937 | DRM_DEBUG_KMS("No FB bound\n"); | 2179 | DRM_ERROR("No FB bound\n"); |
1938 | return 0; | 2180 | return 0; |
1939 | } | 2181 | } |
1940 | 2182 | ||
@@ -1943,6 +2185,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1943 | case 1: | 2185 | case 1: |
1944 | break; | 2186 | break; |
1945 | default: | 2187 | default: |
2188 | DRM_ERROR("no plane for crtc\n"); | ||
1946 | return -EINVAL; | 2189 | return -EINVAL; |
1947 | } | 2190 | } |
1948 | 2191 | ||
@@ -1952,6 +2195,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1952 | NULL); | 2195 | NULL); |
1953 | if (ret != 0) { | 2196 | if (ret != 0) { |
1954 | mutex_unlock(&dev->struct_mutex); | 2197 | mutex_unlock(&dev->struct_mutex); |
2198 | DRM_ERROR("pin & fence failed\n"); | ||
1955 | return ret; | 2199 | return ret; |
1956 | } | 2200 | } |
1957 | 2201 | ||
@@ -1971,7 +2215,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1971 | * This should only fail upon a hung GPU, in which case we | 2215 | * This should only fail upon a hung GPU, in which case we |
1972 | * can safely continue. | 2216 | * can safely continue. |
1973 | */ | 2217 | */ |
1974 | ret = i915_gem_object_flush_gpu(obj); | 2218 | ret = i915_gem_object_finish_gpu(obj); |
1975 | (void) ret; | 2219 | (void) ret; |
1976 | } | 2220 | } |
1977 | 2221 | ||
@@ -1980,6 +2224,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1980 | if (ret) { | 2224 | if (ret) { |
1981 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 2225 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1982 | mutex_unlock(&dev->struct_mutex); | 2226 | mutex_unlock(&dev->struct_mutex); |
2227 | DRM_ERROR("failed to update base address\n"); | ||
1983 | return ret; | 2228 | return ret; |
1984 | } | 2229 | } |
1985 | 2230 | ||
@@ -2086,6 +2331,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) | |||
2086 | FDI_FE_ERRC_ENABLE); | 2331 | FDI_FE_ERRC_ENABLE); |
2087 | } | 2332 | } |
2088 | 2333 | ||
2334 | static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) | ||
2335 | { | ||
2336 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2337 | u32 flags = I915_READ(SOUTH_CHICKEN1); | ||
2338 | |||
2339 | flags |= FDI_PHASE_SYNC_OVR(pipe); | ||
2340 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ | ||
2341 | flags |= FDI_PHASE_SYNC_EN(pipe); | ||
2342 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ | ||
2343 | POSTING_READ(SOUTH_CHICKEN1); | ||
2344 | } | ||
2345 | |||
2089 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2346 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2090 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2347 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2091 | { | 2348 | { |
@@ -2236,6 +2493,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2236 | POSTING_READ(reg); | 2493 | POSTING_READ(reg); |
2237 | udelay(150); | 2494 | udelay(150); |
2238 | 2495 | ||
2496 | if (HAS_PCH_CPT(dev)) | ||
2497 | cpt_phase_pointer_enable(dev, pipe); | ||
2498 | |||
2239 | for (i = 0; i < 4; i++ ) { | 2499 | for (i = 0; i < 4; i++ ) { |
2240 | reg = FDI_TX_CTL(pipe); | 2500 | reg = FDI_TX_CTL(pipe); |
2241 | temp = I915_READ(reg); | 2501 | temp = I915_READ(reg); |
@@ -2340,6 +2600,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2340 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | 2600 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2341 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2601 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2342 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2602 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2603 | temp |= FDI_COMPOSITE_SYNC; | ||
2343 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2604 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2344 | 2605 | ||
2345 | reg = FDI_RX_CTL(pipe); | 2606 | reg = FDI_RX_CTL(pipe); |
@@ -2347,11 +2608,15 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2347 | temp &= ~FDI_LINK_TRAIN_AUTO; | 2608 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2348 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2609 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2349 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2610 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2611 | temp |= FDI_COMPOSITE_SYNC; | ||
2350 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2612 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2351 | 2613 | ||
2352 | POSTING_READ(reg); | 2614 | POSTING_READ(reg); |
2353 | udelay(150); | 2615 | udelay(150); |
2354 | 2616 | ||
2617 | if (HAS_PCH_CPT(dev)) | ||
2618 | cpt_phase_pointer_enable(dev, pipe); | ||
2619 | |||
2355 | for (i = 0; i < 4; i++ ) { | 2620 | for (i = 0; i < 4; i++ ) { |
2356 | reg = FDI_TX_CTL(pipe); | 2621 | reg = FDI_TX_CTL(pipe); |
2357 | temp = I915_READ(reg); | 2622 | temp = I915_READ(reg); |
@@ -2461,6 +2726,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) | |||
2461 | } | 2726 | } |
2462 | } | 2727 | } |
2463 | 2728 | ||
2729 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) | ||
2730 | { | ||
2731 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2732 | u32 flags = I915_READ(SOUTH_CHICKEN1); | ||
2733 | |||
2734 | flags &= ~(FDI_PHASE_SYNC_EN(pipe)); | ||
2735 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ | ||
2736 | flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); | ||
2737 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ | ||
2738 | POSTING_READ(SOUTH_CHICKEN1); | ||
2739 | } | ||
2464 | static void ironlake_fdi_disable(struct drm_crtc *crtc) | 2740 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2465 | { | 2741 | { |
2466 | struct drm_device *dev = crtc->dev; | 2742 | struct drm_device *dev = crtc->dev; |
@@ -2490,6 +2766,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) | |||
2490 | I915_WRITE(FDI_RX_CHICKEN(pipe), | 2766 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2491 | I915_READ(FDI_RX_CHICKEN(pipe) & | 2767 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2492 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); | 2768 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); |
2769 | } else if (HAS_PCH_CPT(dev)) { | ||
2770 | cpt_phase_pointer_disable(dev, pipe); | ||
2493 | } | 2771 | } |
2494 | 2772 | ||
2495 | /* still set train pattern 1 */ | 2773 | /* still set train pattern 1 */ |
@@ -2622,6 +2900,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2622 | /* For PCH DP, enable TRANS_DP_CTL */ | 2900 | /* For PCH DP, enable TRANS_DP_CTL */ |
2623 | if (HAS_PCH_CPT(dev) && | 2901 | if (HAS_PCH_CPT(dev) && |
2624 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2902 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
2903 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; | ||
2625 | reg = TRANS_DP_CTL(pipe); | 2904 | reg = TRANS_DP_CTL(pipe); |
2626 | temp = I915_READ(reg); | 2905 | temp = I915_READ(reg); |
2627 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | 2906 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
@@ -2629,7 +2908,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2629 | TRANS_DP_BPC_MASK); | 2908 | TRANS_DP_BPC_MASK); |
2630 | temp |= (TRANS_DP_OUTPUT_ENABLE | | 2909 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2631 | TRANS_DP_ENH_FRAMING); | 2910 | TRANS_DP_ENH_FRAMING); |
2632 | temp |= TRANS_DP_8BPC; | 2911 | temp |= bpc << 9; /* same format but at 11:9 */ |
2633 | 2912 | ||
2634 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 2913 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2635 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 2914 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
@@ -2699,14 +2978,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2699 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | 2978 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
2700 | } | 2979 | } |
2701 | 2980 | ||
2981 | /* | ||
2982 | * On ILK+ LUT must be loaded before the pipe is running but with | ||
2983 | * clocks enabled | ||
2984 | */ | ||
2985 | intel_crtc_load_lut(crtc); | ||
2986 | |||
2702 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | 2987 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
2703 | intel_enable_plane(dev_priv, plane, pipe); | 2988 | intel_enable_plane(dev_priv, plane, pipe); |
2704 | 2989 | ||
2705 | if (is_pch_port) | 2990 | if (is_pch_port) |
2706 | ironlake_pch_enable(crtc); | 2991 | ironlake_pch_enable(crtc); |
2707 | 2992 | ||
2708 | intel_crtc_load_lut(crtc); | ||
2709 | |||
2710 | mutex_lock(&dev->struct_mutex); | 2993 | mutex_lock(&dev->struct_mutex); |
2711 | intel_update_fbc(dev); | 2994 | intel_update_fbc(dev); |
2712 | mutex_unlock(&dev->struct_mutex); | 2995 | mutex_unlock(&dev->struct_mutex); |
@@ -2732,9 +3015,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2732 | 3015 | ||
2733 | intel_disable_plane(dev_priv, plane, pipe); | 3016 | intel_disable_plane(dev_priv, plane, pipe); |
2734 | 3017 | ||
2735 | if (dev_priv->cfb_plane == plane && | 3018 | if (dev_priv->cfb_plane == plane) |
2736 | dev_priv->display.disable_fbc) | 3019 | intel_disable_fbc(dev); |
2737 | dev_priv->display.disable_fbc(dev); | ||
2738 | 3020 | ||
2739 | intel_disable_pipe(dev_priv, pipe); | 3021 | intel_disable_pipe(dev_priv, pipe); |
2740 | 3022 | ||
@@ -2898,9 +3180,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
2898 | intel_crtc_dpms_overlay(intel_crtc, false); | 3180 | intel_crtc_dpms_overlay(intel_crtc, false); |
2899 | intel_crtc_update_cursor(crtc, false); | 3181 | intel_crtc_update_cursor(crtc, false); |
2900 | 3182 | ||
2901 | if (dev_priv->cfb_plane == plane && | 3183 | if (dev_priv->cfb_plane == plane) |
2902 | dev_priv->display.disable_fbc) | 3184 | intel_disable_fbc(dev); |
2903 | dev_priv->display.disable_fbc(dev); | ||
2904 | 3185 | ||
2905 | intel_disable_plane(dev_priv, plane, pipe); | 3186 | intel_disable_plane(dev_priv, plane, pipe); |
2906 | intel_disable_pipe(dev_priv, pipe); | 3187 | intel_disable_pipe(dev_priv, pipe); |
@@ -4309,6 +4590,137 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | |||
4309 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | 4590 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4310 | } | 4591 | } |
4311 | 4592 | ||
4593 | /** | ||
4594 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send | ||
4595 | * @crtc: CRTC structure | ||
4596 | * | ||
4597 | * A pipe may be connected to one or more outputs. Based on the depth of the | ||
4598 | * attached framebuffer, choose a good color depth to use on the pipe. | ||
4599 | * | ||
4600 | * If possible, match the pipe depth to the fb depth. In some cases, this | ||
4601 | * isn't ideal, because the connected output supports a lesser or restricted | ||
4602 | * set of depths. Resolve that here: | ||
4603 | * LVDS typically supports only 6bpc, so clamp down in that case | ||
4604 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc | ||
4605 | * Displays may support a restricted set as well, check EDID and clamp as | ||
4606 | * appropriate. | ||
4607 | * | ||
4608 | * RETURNS: | ||
4609 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, | ||
4610 | * true if they don't match). | ||
4611 | */ | ||
4612 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | ||
4613 | unsigned int *pipe_bpp) | ||
4614 | { | ||
4615 | struct drm_device *dev = crtc->dev; | ||
4616 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4617 | struct drm_encoder *encoder; | ||
4618 | struct drm_connector *connector; | ||
4619 | unsigned int display_bpc = UINT_MAX, bpc; | ||
4620 | |||
4621 | /* Walk the encoders & connectors on this crtc, get min bpc */ | ||
4622 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
4623 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
4624 | |||
4625 | if (encoder->crtc != crtc) | ||
4626 | continue; | ||
4627 | |||
4628 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { | ||
4629 | unsigned int lvds_bpc; | ||
4630 | |||
4631 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == | ||
4632 | LVDS_A3_POWER_UP) | ||
4633 | lvds_bpc = 8; | ||
4634 | else | ||
4635 | lvds_bpc = 6; | ||
4636 | |||
4637 | if (lvds_bpc < display_bpc) { | ||
4638 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); | ||
4639 | display_bpc = lvds_bpc; | ||
4640 | } | ||
4641 | continue; | ||
4642 | } | ||
4643 | |||
4644 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { | ||
4645 | /* Use VBT settings if we have an eDP panel */ | ||
4646 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; | ||
4647 | |||
4648 | if (edp_bpc < display_bpc) { | ||
4649 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); | ||
4650 | display_bpc = edp_bpc; | ||
4651 | } | ||
4652 | continue; | ||
4653 | } | ||
4654 | |||
4655 | /* Not one of the known troublemakers, check the EDID */ | ||
4656 | list_for_each_entry(connector, &dev->mode_config.connector_list, | ||
4657 | head) { | ||
4658 | if (connector->encoder != encoder) | ||
4659 | continue; | ||
4660 | |||
4661 | /* Don't use an invalid EDID bpc value */ | ||
4662 | if (connector->display_info.bpc && | ||
4663 | connector->display_info.bpc < display_bpc) { | ||
4664 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); | ||
4665 | display_bpc = connector->display_info.bpc; | ||
4666 | } | ||
4667 | } | ||
4668 | |||
4669 | /* | ||
4670 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak | ||
4671 | * through, clamp it down. (Note: >12bpc will be caught below.) | ||
4672 | */ | ||
4673 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | ||
4674 | if (display_bpc > 8 && display_bpc < 12) { | ||
4675 | DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); | ||
4676 | display_bpc = 12; | ||
4677 | } else { | ||
4678 | DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); | ||
4679 | display_bpc = 8; | ||
4680 | } | ||
4681 | } | ||
4682 | } | ||
4683 | |||
4684 | /* | ||
4685 | * We could just drive the pipe at the highest bpc all the time and | ||
4686 | * enable dithering as needed, but that costs bandwidth. So choose | ||
4687 | * the minimum value that expresses the full color range of the fb but | ||
4688 | * also stays within the max display bpc discovered above. | ||
4689 | */ | ||
4690 | |||
4691 | switch (crtc->fb->depth) { | ||
4692 | case 8: | ||
4693 | bpc = 8; /* since we go through a colormap */ | ||
4694 | break; | ||
4695 | case 15: | ||
4696 | case 16: | ||
4697 | bpc = 6; /* min is 18bpp */ | ||
4698 | break; | ||
4699 | case 24: | ||
4700 | bpc = 8; | ||
4701 | break; | ||
4702 | case 30: | ||
4703 | bpc = 10; | ||
4704 | break; | ||
4705 | case 48: | ||
4706 | bpc = 12; | ||
4707 | break; | ||
4708 | default: | ||
4709 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | ||
4710 | bpc = min((unsigned int)8, display_bpc); | ||
4711 | break; | ||
4712 | } | ||
4713 | |||
4714 | display_bpc = min(display_bpc, bpc); | ||
4715 | |||
4716 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | ||
4717 | bpc, display_bpc); | ||
4718 | |||
4719 | *pipe_bpp = display_bpc * 3; | ||
4720 | |||
4721 | return display_bpc != bpc; | ||
4722 | } | ||
4723 | |||
4312 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | 4724 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4313 | struct drm_display_mode *mode, | 4725 | struct drm_display_mode *mode, |
4314 | struct drm_display_mode *adjusted_mode, | 4726 | struct drm_display_mode *adjusted_mode, |
@@ -4697,6 +5109,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4697 | return ret; | 5109 | return ret; |
4698 | } | 5110 | } |
4699 | 5111 | ||
5112 | static void ironlake_update_pch_refclk(struct drm_device *dev) | ||
5113 | { | ||
5114 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5115 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5116 | struct drm_crtc *crtc; | ||
5117 | struct intel_encoder *encoder; | ||
5118 | struct intel_encoder *has_edp_encoder = NULL; | ||
5119 | u32 temp; | ||
5120 | bool has_lvds = false; | ||
5121 | |||
5122 | /* We need to take the global config into account */ | ||
5123 | list_for_each_entry(crtc, &mode_config->crtc_list, head) { | ||
5124 | if (!crtc->enabled) | ||
5125 | continue; | ||
5126 | |||
5127 | list_for_each_entry(encoder, &mode_config->encoder_list, | ||
5128 | base.head) { | ||
5129 | if (encoder->base.crtc != crtc) | ||
5130 | continue; | ||
5131 | |||
5132 | switch (encoder->type) { | ||
5133 | case INTEL_OUTPUT_LVDS: | ||
5134 | has_lvds = true; | ||
5135 | case INTEL_OUTPUT_EDP: | ||
5136 | has_edp_encoder = encoder; | ||
5137 | break; | ||
5138 | } | ||
5139 | } | ||
5140 | } | ||
5141 | |||
5142 | /* Ironlake: try to setup display ref clock before DPLL | ||
5143 | * enabling. This is only under driver's control after | ||
5144 | * PCH B stepping, previous chipset stepping should be | ||
5145 | * ignoring this setting. | ||
5146 | */ | ||
5147 | temp = I915_READ(PCH_DREF_CONTROL); | ||
5148 | /* Always enable nonspread source */ | ||
5149 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
5150 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5151 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5152 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5153 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5154 | |||
5155 | POSTING_READ(PCH_DREF_CONTROL); | ||
5156 | udelay(200); | ||
5157 | |||
5158 | if (has_edp_encoder) { | ||
5159 | if (intel_panel_use_ssc(dev_priv)) { | ||
5160 | temp |= DREF_SSC1_ENABLE; | ||
5161 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5162 | |||
5163 | POSTING_READ(PCH_DREF_CONTROL); | ||
5164 | udelay(200); | ||
5165 | } | ||
5166 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5167 | |||
5168 | /* Enable CPU source on CPU attached eDP */ | ||
5169 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
5170 | if (intel_panel_use_ssc(dev_priv)) | ||
5171 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
5172 | else | ||
5173 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
5174 | } else { | ||
5175 | /* Enable SSC on PCH eDP if needed */ | ||
5176 | if (intel_panel_use_ssc(dev_priv)) { | ||
5177 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5178 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5179 | } | ||
5180 | } | ||
5181 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5182 | POSTING_READ(PCH_DREF_CONTROL); | ||
5183 | udelay(200); | ||
5184 | } | ||
5185 | } | ||
5186 | |||
4700 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5187 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
4701 | struct drm_display_mode *mode, | 5188 | struct drm_display_mode *mode, |
4702 | struct drm_display_mode *adjusted_mode, | 5189 | struct drm_display_mode *adjusted_mode, |
@@ -4721,7 +5208,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4721 | struct fdi_m_n m_n = {0}; | 5208 | struct fdi_m_n m_n = {0}; |
4722 | u32 temp; | 5209 | u32 temp; |
4723 | u32 lvds_sync = 0; | 5210 | u32 lvds_sync = 0; |
4724 | int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; | 5211 | int target_clock, pixel_multiplier, lane, link_bw, factor; |
5212 | unsigned int pipe_bpp; | ||
5213 | bool dither; | ||
4725 | 5214 | ||
4726 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | 5215 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4727 | if (encoder->base.crtc != crtc) | 5216 | if (encoder->base.crtc != crtc) |
@@ -4848,56 +5337,38 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4848 | /* determine panel color depth */ | 5337 | /* determine panel color depth */ |
4849 | temp = I915_READ(PIPECONF(pipe)); | 5338 | temp = I915_READ(PIPECONF(pipe)); |
4850 | temp &= ~PIPE_BPC_MASK; | 5339 | temp &= ~PIPE_BPC_MASK; |
4851 | if (is_lvds) { | 5340 | dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp); |
4852 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | 5341 | switch (pipe_bpp) { |
4853 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | 5342 | case 18: |
4854 | temp |= PIPE_8BPC; | 5343 | temp |= PIPE_6BPC; |
4855 | else | ||
4856 | temp |= PIPE_6BPC; | ||
4857 | } else if (has_edp_encoder) { | ||
4858 | switch (dev_priv->edp.bpp/3) { | ||
4859 | case 8: | ||
4860 | temp |= PIPE_8BPC; | ||
4861 | break; | ||
4862 | case 10: | ||
4863 | temp |= PIPE_10BPC; | ||
4864 | break; | ||
4865 | case 6: | ||
4866 | temp |= PIPE_6BPC; | ||
4867 | break; | ||
4868 | case 12: | ||
4869 | temp |= PIPE_12BPC; | ||
4870 | break; | ||
4871 | } | ||
4872 | } else | ||
4873 | temp |= PIPE_8BPC; | ||
4874 | I915_WRITE(PIPECONF(pipe), temp); | ||
4875 | |||
4876 | switch (temp & PIPE_BPC_MASK) { | ||
4877 | case PIPE_8BPC: | ||
4878 | bpp = 24; | ||
4879 | break; | 5344 | break; |
4880 | case PIPE_10BPC: | 5345 | case 24: |
4881 | bpp = 30; | 5346 | temp |= PIPE_8BPC; |
4882 | break; | 5347 | break; |
4883 | case PIPE_6BPC: | 5348 | case 30: |
4884 | bpp = 18; | 5349 | temp |= PIPE_10BPC; |
4885 | break; | 5350 | break; |
4886 | case PIPE_12BPC: | 5351 | case 36: |
4887 | bpp = 36; | 5352 | temp |= PIPE_12BPC; |
4888 | break; | 5353 | break; |
4889 | default: | 5354 | default: |
4890 | DRM_ERROR("unknown pipe bpc value\n"); | 5355 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
4891 | bpp = 24; | 5356 | pipe_bpp); |
5357 | temp |= PIPE_8BPC; | ||
5358 | pipe_bpp = 24; | ||
5359 | break; | ||
4892 | } | 5360 | } |
4893 | 5361 | ||
5362 | intel_crtc->bpp = pipe_bpp; | ||
5363 | I915_WRITE(PIPECONF(pipe), temp); | ||
5364 | |||
4894 | if (!lane) { | 5365 | if (!lane) { |
4895 | /* | 5366 | /* |
4896 | * Account for spread spectrum to avoid | 5367 | * Account for spread spectrum to avoid |
4897 | * oversubscribing the link. Max center spread | 5368 | * oversubscribing the link. Max center spread |
4898 | * is 2.5%; use 5% for safety's sake. | 5369 | * is 2.5%; use 5% for safety's sake. |
4899 | */ | 5370 | */ |
4900 | u32 bps = target_clock * bpp * 21 / 20; | 5371 | u32 bps = target_clock * intel_crtc->bpp * 21 / 20; |
4901 | lane = bps / (link_bw * 8) + 1; | 5372 | lane = bps / (link_bw * 8) + 1; |
4902 | } | 5373 | } |
4903 | 5374 | ||
@@ -4905,51 +5376,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4905 | 5376 | ||
4906 | if (pixel_multiplier > 1) | 5377 | if (pixel_multiplier > 1) |
4907 | link_bw *= pixel_multiplier; | 5378 | link_bw *= pixel_multiplier; |
4908 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | 5379 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
4909 | 5380 | &m_n); | |
4910 | /* Ironlake: try to setup display ref clock before DPLL | ||
4911 | * enabling. This is only under driver's control after | ||
4912 | * PCH B stepping, previous chipset stepping should be | ||
4913 | * ignoring this setting. | ||
4914 | */ | ||
4915 | temp = I915_READ(PCH_DREF_CONTROL); | ||
4916 | /* Always enable nonspread source */ | ||
4917 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
4918 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
4919 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
4920 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
4921 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4922 | |||
4923 | POSTING_READ(PCH_DREF_CONTROL); | ||
4924 | udelay(200); | ||
4925 | 5381 | ||
4926 | if (has_edp_encoder) { | 5382 | ironlake_update_pch_refclk(dev); |
4927 | if (intel_panel_use_ssc(dev_priv)) { | ||
4928 | temp |= DREF_SSC1_ENABLE; | ||
4929 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4930 | |||
4931 | POSTING_READ(PCH_DREF_CONTROL); | ||
4932 | udelay(200); | ||
4933 | } | ||
4934 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
4935 | |||
4936 | /* Enable CPU source on CPU attached eDP */ | ||
4937 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
4938 | if (intel_panel_use_ssc(dev_priv)) | ||
4939 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
4940 | else | ||
4941 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
4942 | } else { | ||
4943 | /* Enable SSC on PCH eDP if needed */ | ||
4944 | if (intel_panel_use_ssc(dev_priv)) { | ||
4945 | DRM_ERROR("enabling SSC on PCH\n"); | ||
4946 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
4947 | } | ||
4948 | } | ||
4949 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4950 | POSTING_READ(PCH_DREF_CONTROL); | ||
4951 | udelay(200); | ||
4952 | } | ||
4953 | 5383 | ||
4954 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5384 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
4955 | if (has_reduced_clock) | 5385 | if (has_reduced_clock) |
@@ -4966,7 +5396,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4966 | } else if (is_sdvo && is_tv) | 5396 | } else if (is_sdvo && is_tv) |
4967 | factor = 20; | 5397 | factor = 20; |
4968 | 5398 | ||
4969 | if (clock.m1 < factor * clock.n) | 5399 | if (clock.m < factor * clock.n) |
4970 | fp |= FP_CB_TUNE; | 5400 | fp |= FP_CB_TUNE; |
4971 | 5401 | ||
4972 | dpll = 0; | 5402 | dpll = 0; |
@@ -5108,14 +5538,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5108 | I915_WRITE(PCH_LVDS, temp); | 5538 | I915_WRITE(PCH_LVDS, temp); |
5109 | } | 5539 | } |
5110 | 5540 | ||
5111 | /* set the dithering flag and clear for anything other than a panel. */ | ||
5112 | pipeconf &= ~PIPECONF_DITHER_EN; | 5541 | pipeconf &= ~PIPECONF_DITHER_EN; |
5113 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; | 5542 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; |
5114 | if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { | 5543 | if ((is_lvds && dev_priv->lvds_dither) || dither) { |
5115 | pipeconf |= PIPECONF_DITHER_EN; | 5544 | pipeconf |= PIPECONF_DITHER_EN; |
5116 | pipeconf |= PIPECONF_DITHER_TYPE_ST1; | 5545 | pipeconf |= PIPECONF_DITHER_TYPE_ST1; |
5117 | } | 5546 | } |
5118 | |||
5119 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5547 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5120 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 5548 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5121 | } else { | 5549 | } else { |
@@ -5246,6 +5674,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5246 | 5674 | ||
5247 | drm_vblank_post_modeset(dev, pipe); | 5675 | drm_vblank_post_modeset(dev, pipe); |
5248 | 5676 | ||
5677 | intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; | ||
5678 | |||
5249 | return ret; | 5679 | return ret; |
5250 | } | 5680 | } |
5251 | 5681 | ||
@@ -5330,6 +5760,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
5330 | I915_WRITE(CURBASE(pipe), base); | 5760 | I915_WRITE(CURBASE(pipe), base); |
5331 | } | 5761 | } |
5332 | 5762 | ||
5763 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) | ||
5764 | { | ||
5765 | struct drm_device *dev = crtc->dev; | ||
5766 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5767 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5768 | int pipe = intel_crtc->pipe; | ||
5769 | bool visible = base != 0; | ||
5770 | |||
5771 | if (intel_crtc->cursor_visible != visible) { | ||
5772 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | ||
5773 | if (base) { | ||
5774 | cntl &= ~CURSOR_MODE; | ||
5775 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
5776 | } else { | ||
5777 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
5778 | cntl |= CURSOR_MODE_DISABLE; | ||
5779 | } | ||
5780 | I915_WRITE(CURCNTR_IVB(pipe), cntl); | ||
5781 | |||
5782 | intel_crtc->cursor_visible = visible; | ||
5783 | } | ||
5784 | /* and commit changes on next vblank */ | ||
5785 | I915_WRITE(CURBASE_IVB(pipe), base); | ||
5786 | } | ||
5787 | |||
5333 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 5788 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
5334 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, | 5789 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
5335 | bool on) | 5790 | bool on) |
@@ -5377,11 +5832,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
5377 | if (!visible && !intel_crtc->cursor_visible) | 5832 | if (!visible && !intel_crtc->cursor_visible) |
5378 | return; | 5833 | return; |
5379 | 5834 | ||
5380 | I915_WRITE(CURPOS(pipe), pos); | 5835 | if (IS_IVYBRIDGE(dev)) { |
5381 | if (IS_845G(dev) || IS_I865G(dev)) | 5836 | I915_WRITE(CURPOS_IVB(pipe), pos); |
5382 | i845_update_cursor(crtc, base); | 5837 | ivb_update_cursor(crtc, base); |
5383 | else | 5838 | } else { |
5384 | i9xx_update_cursor(crtc, base); | 5839 | I915_WRITE(CURPOS(pipe), pos); |
5840 | if (IS_845G(dev) || IS_I865G(dev)) | ||
5841 | i845_update_cursor(crtc, base); | ||
5842 | else | ||
5843 | i9xx_update_cursor(crtc, base); | ||
5844 | } | ||
5385 | 5845 | ||
5386 | if (visible) | 5846 | if (visible) |
5387 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | 5847 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); |
@@ -5435,21 +5895,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
5435 | goto fail_locked; | 5895 | goto fail_locked; |
5436 | } | 5896 | } |
5437 | 5897 | ||
5438 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | 5898 | ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); |
5439 | if (ret) { | ||
5440 | DRM_ERROR("failed to pin cursor bo\n"); | ||
5441 | goto fail_locked; | ||
5442 | } | ||
5443 | |||
5444 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); | ||
5445 | if (ret) { | 5899 | if (ret) { |
5446 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5900 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
5447 | goto fail_unpin; | 5901 | goto fail_locked; |
5448 | } | 5902 | } |
5449 | 5903 | ||
5450 | ret = i915_gem_object_put_fence(obj); | 5904 | ret = i915_gem_object_put_fence(obj); |
5451 | if (ret) { | 5905 | if (ret) { |
5452 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5906 | DRM_ERROR("failed to release fence for cursor"); |
5453 | goto fail_unpin; | 5907 | goto fail_unpin; |
5454 | } | 5908 | } |
5455 | 5909 | ||
@@ -6152,6 +6606,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
6152 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 6606 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
6153 | drm_gem_object_unreference(&work->old_fb_obj->base); | 6607 | drm_gem_object_unreference(&work->old_fb_obj->base); |
6154 | 6608 | ||
6609 | intel_update_fbc(work->dev); | ||
6155 | mutex_unlock(&work->dev->struct_mutex); | 6610 | mutex_unlock(&work->dev->struct_mutex); |
6156 | kfree(work); | 6611 | kfree(work); |
6157 | } | 6612 | } |
@@ -6516,6 +6971,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
6516 | if (ret) | 6971 | if (ret) |
6517 | goto cleanup_pending; | 6972 | goto cleanup_pending; |
6518 | 6973 | ||
6974 | intel_disable_fbc(dev); | ||
6519 | mutex_unlock(&dev->struct_mutex); | 6975 | mutex_unlock(&dev->struct_mutex); |
6520 | 6976 | ||
6521 | trace_i915_flip_request(intel_crtc->plane, obj); | 6977 | trace_i915_flip_request(intel_crtc->plane, obj); |
@@ -6644,6 +7100,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
6644 | 7100 | ||
6645 | intel_crtc_reset(&intel_crtc->base); | 7101 | intel_crtc_reset(&intel_crtc->base); |
6646 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | 7102 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
7103 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ | ||
6647 | 7104 | ||
6648 | if (HAS_PCH_SPLIT(dev)) { | 7105 | if (HAS_PCH_SPLIT(dev)) { |
6649 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | 7106 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
@@ -6823,8 +7280,6 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6823 | intel_encoder_clones(dev, encoder->clone_mask); | 7280 | intel_encoder_clones(dev, encoder->clone_mask); |
6824 | } | 7281 | } |
6825 | 7282 | ||
6826 | intel_panel_setup_backlight(dev); | ||
6827 | |||
6828 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 7283 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
6829 | drm_helper_disable_unused_functions(dev); | 7284 | drm_helper_disable_unused_functions(dev); |
6830 | } | 7285 | } |
@@ -6870,6 +7325,11 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
6870 | switch (mode_cmd->bpp) { | 7325 | switch (mode_cmd->bpp) { |
6871 | case 8: | 7326 | case 8: |
6872 | case 16: | 7327 | case 16: |
7328 | /* Only pre-ILK can handle 5:5:5 */ | ||
7329 | if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev)) | ||
7330 | return -EINVAL; | ||
7331 | break; | ||
7332 | |||
6873 | case 24: | 7333 | case 24: |
6874 | case 32: | 7334 | case 32: |
6875 | break; | 7335 | break; |
@@ -7284,6 +7744,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
7284 | mutex_unlock(&dev_priv->dev->struct_mutex); | 7744 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7285 | } | 7745 | } |
7286 | 7746 | ||
7747 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||
7748 | { | ||
7749 | int min_freq = 15; | ||
7750 | int gpu_freq, ia_freq, max_ia_freq; | ||
7751 | int scaling_factor = 180; | ||
7752 | |||
7753 | max_ia_freq = cpufreq_quick_get_max(0); | ||
7754 | /* | ||
7755 | * Default to measured freq if none found, PCU will ensure we don't go | ||
7756 | * over | ||
7757 | */ | ||
7758 | if (!max_ia_freq) | ||
7759 | max_ia_freq = tsc_khz; | ||
7760 | |||
7761 | /* Convert from kHz to MHz */ | ||
7762 | max_ia_freq /= 1000; | ||
7763 | |||
7764 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
7765 | |||
7766 | /* | ||
7767 | * For each potential GPU frequency, load a ring frequency we'd like | ||
7768 | * to use for memory access. We do this by specifying the IA frequency | ||
7769 | * the PCU should use as a reference to determine the ring frequency. | ||
7770 | */ | ||
7771 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | ||
7772 | gpu_freq--) { | ||
7773 | int diff = dev_priv->max_delay - gpu_freq; | ||
7774 | |||
7775 | /* | ||
7776 | * For GPU frequencies less than 750MHz, just use the lowest | ||
7777 | * ring freq. | ||
7778 | */ | ||
7779 | if (gpu_freq < min_freq) | ||
7780 | ia_freq = 800; | ||
7781 | else | ||
7782 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | ||
7783 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | ||
7784 | |||
7785 | I915_WRITE(GEN6_PCODE_DATA, | ||
7786 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | ||
7787 | gpu_freq); | ||
7788 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
7789 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
7790 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
7791 | GEN6_PCODE_READY) == 0, 10)) { | ||
7792 | DRM_ERROR("pcode write of freq table timed out\n"); | ||
7793 | continue; | ||
7794 | } | ||
7795 | } | ||
7796 | |||
7797 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
7798 | } | ||
7799 | |||
7287 | static void ironlake_init_clock_gating(struct drm_device *dev) | 7800 | static void ironlake_init_clock_gating(struct drm_device *dev) |
7288 | { | 7801 | { |
7289 | struct drm_i915_private *dev_priv = dev->dev_private; | 7802 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -7369,6 +7882,20 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
7369 | I915_WRITE(WM2_LP_ILK, 0); | 7882 | I915_WRITE(WM2_LP_ILK, 0); |
7370 | I915_WRITE(WM1_LP_ILK, 0); | 7883 | I915_WRITE(WM1_LP_ILK, 0); |
7371 | 7884 | ||
7885 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | ||
7886 | * gating disable must be set. Failure to set it results in | ||
7887 | * flickering pixels due to Z write ordering failures after | ||
7888 | * some amount of runtime in the Mesa "fire" demo, and Unigine | ||
7889 | * Sanctuary and Tropics, and apparently anything else with | ||
7890 | * alpha test or pixel discard. | ||
7891 | * | ||
7892 | * According to the spec, bit 11 (RCCUNIT) must also be set, | ||
7893 | * but we didn't debug actual testcases to find it out. | ||
7894 | */ | ||
7895 | I915_WRITE(GEN6_UCGCTL2, | ||
7896 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | ||
7897 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | ||
7898 | |||
7372 | /* | 7899 | /* |
7373 | * According to the spec the following bits should be | 7900 | * According to the spec the following bits should be |
7374 | * set in order to enable memory self-refresh and fbc: | 7901 | * set in order to enable memory self-refresh and fbc: |
@@ -7389,10 +7916,12 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
7389 | ILK_DPARB_CLK_GATE | | 7916 | ILK_DPARB_CLK_GATE | |
7390 | ILK_DPFD_CLK_GATE); | 7917 | ILK_DPFD_CLK_GATE); |
7391 | 7918 | ||
7392 | for_each_pipe(pipe) | 7919 | for_each_pipe(pipe) { |
7393 | I915_WRITE(DSPCNTR(pipe), | 7920 | I915_WRITE(DSPCNTR(pipe), |
7394 | I915_READ(DSPCNTR(pipe)) | | 7921 | I915_READ(DSPCNTR(pipe)) | |
7395 | DISPPLANE_TRICKLE_FEED_DISABLE); | 7922 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7923 | intel_flush_display_plane(dev_priv, pipe); | ||
7924 | } | ||
7396 | } | 7925 | } |
7397 | 7926 | ||
7398 | static void ivybridge_init_clock_gating(struct drm_device *dev) | 7927 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
@@ -7409,10 +7938,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
7409 | 7938 | ||
7410 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 7939 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
7411 | 7940 | ||
7412 | for_each_pipe(pipe) | 7941 | for_each_pipe(pipe) { |
7413 | I915_WRITE(DSPCNTR(pipe), | 7942 | I915_WRITE(DSPCNTR(pipe), |
7414 | I915_READ(DSPCNTR(pipe)) | | 7943 | I915_READ(DSPCNTR(pipe)) | |
7415 | DISPPLANE_TRICKLE_FEED_DISABLE); | 7944 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7945 | intel_flush_display_plane(dev_priv, pipe); | ||
7946 | } | ||
7416 | } | 7947 | } |
7417 | 7948 | ||
7418 | static void g4x_init_clock_gating(struct drm_device *dev) | 7949 | static void g4x_init_clock_gating(struct drm_device *dev) |
@@ -7495,6 +8026,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) | |||
7495 | static void cpt_init_clock_gating(struct drm_device *dev) | 8026 | static void cpt_init_clock_gating(struct drm_device *dev) |
7496 | { | 8027 | { |
7497 | struct drm_i915_private *dev_priv = dev->dev_private; | 8028 | struct drm_i915_private *dev_priv = dev->dev_private; |
8029 | int pipe; | ||
7498 | 8030 | ||
7499 | /* | 8031 | /* |
7500 | * On Ibex Peak and Cougar Point, we need to disable clock | 8032 | * On Ibex Peak and Cougar Point, we need to disable clock |
@@ -7504,6 +8036,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) | |||
7504 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | 8036 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7505 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | 8037 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
7506 | DPLS_EDP_PPS_FIX_DIS); | 8038 | DPLS_EDP_PPS_FIX_DIS); |
8039 | /* Without this, mode sets may fail silently on FDI */ | ||
8040 | for_each_pipe(pipe) | ||
8041 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
7507 | } | 8042 | } |
7508 | 8043 | ||
7509 | static void ironlake_teardown_rc6(struct drm_device *dev) | 8044 | static void ironlake_teardown_rc6(struct drm_device *dev) |
@@ -7640,9 +8175,11 @@ static void intel_init_display(struct drm_device *dev) | |||
7640 | if (HAS_PCH_SPLIT(dev)) { | 8175 | if (HAS_PCH_SPLIT(dev)) { |
7641 | dev_priv->display.dpms = ironlake_crtc_dpms; | 8176 | dev_priv->display.dpms = ironlake_crtc_dpms; |
7642 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; | 8177 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
8178 | dev_priv->display.update_plane = ironlake_update_plane; | ||
7643 | } else { | 8179 | } else { |
7644 | dev_priv->display.dpms = i9xx_crtc_dpms; | 8180 | dev_priv->display.dpms = i9xx_crtc_dpms; |
7645 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | 8181 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
8182 | dev_priv->display.update_plane = i9xx_update_plane; | ||
7646 | } | 8183 | } |
7647 | 8184 | ||
7648 | if (I915_HAS_FBC(dev)) { | 8185 | if (I915_HAS_FBC(dev)) { |
@@ -7851,6 +8388,9 @@ struct intel_quirk intel_quirks[] = { | |||
7851 | 8388 | ||
7852 | /* Lenovo U160 cannot use SSC on LVDS */ | 8389 | /* Lenovo U160 cannot use SSC on LVDS */ |
7853 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | 8390 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8391 | |||
8392 | /* Sony Vaio Y cannot use SSC on LVDS */ | ||
8393 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | ||
7854 | }; | 8394 | }; |
7855 | 8395 | ||
7856 | static void intel_init_quirks(struct drm_device *dev) | 8396 | static void intel_init_quirks(struct drm_device *dev) |
@@ -7939,8 +8479,10 @@ void intel_modeset_init(struct drm_device *dev) | |||
7939 | intel_init_emon(dev); | 8479 | intel_init_emon(dev); |
7940 | } | 8480 | } |
7941 | 8481 | ||
7942 | if (IS_GEN6(dev)) | 8482 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
7943 | gen6_enable_rps(dev_priv); | 8483 | gen6_enable_rps(dev_priv); |
8484 | gen6_update_ring_freq(dev_priv); | ||
8485 | } | ||
7944 | 8486 | ||
7945 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 8487 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
7946 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 8488 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
@@ -7976,12 +8518,11 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
7976 | intel_increase_pllclock(crtc); | 8518 | intel_increase_pllclock(crtc); |
7977 | } | 8519 | } |
7978 | 8520 | ||
7979 | if (dev_priv->display.disable_fbc) | 8521 | intel_disable_fbc(dev); |
7980 | dev_priv->display.disable_fbc(dev); | ||
7981 | 8522 | ||
7982 | if (IS_IRONLAKE_M(dev)) | 8523 | if (IS_IRONLAKE_M(dev)) |
7983 | ironlake_disable_drps(dev); | 8524 | ironlake_disable_drps(dev); |
7984 | if (IS_GEN6(dev)) | 8525 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
7985 | gen6_disable_rps(dev); | 8526 | gen6_disable_rps(dev); |
7986 | 8527 | ||
7987 | if (IS_IRONLAKE_M(dev)) | 8528 | if (IS_IRONLAKE_M(dev)) |
@@ -7994,6 +8535,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
7994 | drm_irq_uninstall(dev); | 8535 | drm_irq_uninstall(dev); |
7995 | cancel_work_sync(&dev_priv->hotplug_work); | 8536 | cancel_work_sync(&dev_priv->hotplug_work); |
7996 | 8537 | ||
8538 | /* flush any delayed tasks or pending work */ | ||
8539 | flush_scheduled_work(); | ||
8540 | |||
7997 | /* Shut off idle work before the crtcs get freed. */ | 8541 | /* Shut off idle work before the crtcs get freed. */ |
7998 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 8542 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
7999 | intel_crtc = to_intel_crtc(crtc); | 8543 | intel_crtc = to_intel_crtc(crtc); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e2aced6eec4..7c64db48c1c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -50,9 +50,10 @@ struct intel_dp { | |||
50 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | 51 | int force_audio; |
52 | uint32_t color_range; | 52 | uint32_t color_range; |
53 | int dpms_mode; | ||
53 | uint8_t link_bw; | 54 | uint8_t link_bw; |
54 | uint8_t lane_count; | 55 | uint8_t lane_count; |
55 | uint8_t dpcd[4]; | 56 | uint8_t dpcd[8]; |
56 | struct i2c_adapter adapter; | 57 | struct i2c_adapter adapter; |
57 | struct i2c_algo_dp_aux_data algo; | 58 | struct i2c_algo_dp_aux_data algo; |
58 | bool is_pch_edp; | 59 | bool is_pch_edp; |
@@ -178,12 +179,14 @@ intel_dp_link_clock(uint8_t link_bw) | |||
178 | static int | 179 | static int |
179 | intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) | 180 | intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) |
180 | { | 181 | { |
181 | struct drm_i915_private *dev_priv = dev->dev_private; | 182 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
184 | int bpp = 24; | ||
182 | 185 | ||
183 | if (is_edp(intel_dp)) | 186 | if (intel_crtc) |
184 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; | 187 | bpp = intel_crtc->bpp; |
185 | else | 188 | |
186 | return pixel_clock * 3; | 189 | return (pixel_clock * bpp + 7) / 8; |
187 | } | 190 | } |
188 | 191 | ||
189 | static int | 192 | static int |
@@ -314,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
314 | else | 317 | else |
315 | precharge = 5; | 318 | precharge = 5; |
316 | 319 | ||
317 | if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { | 320 | /* Try to wait for any previous AUX channel activity */ |
318 | DRM_ERROR("dp_aux_ch not started status 0x%08x\n", | 321 | for (try = 0; try < 3; try++) { |
319 | I915_READ(ch_ctl)); | 322 | status = I915_READ(ch_ctl); |
323 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | ||
324 | break; | ||
325 | msleep(1); | ||
326 | } | ||
327 | |||
328 | if (try == 3) { | ||
329 | WARN(1, "dp_aux_ch not started status 0x%08x\n", | ||
330 | I915_READ(ch_ctl)); | ||
320 | return -EBUSY; | 331 | return -EBUSY; |
321 | } | 332 | } |
322 | 333 | ||
@@ -681,7 +692,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
681 | struct drm_encoder *encoder; | 692 | struct drm_encoder *encoder; |
682 | struct drm_i915_private *dev_priv = dev->dev_private; | 693 | struct drm_i915_private *dev_priv = dev->dev_private; |
683 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 694 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
684 | int lane_count = 4, bpp = 24; | 695 | int lane_count = 4; |
685 | struct intel_dp_m_n m_n; | 696 | struct intel_dp_m_n m_n; |
686 | int pipe = intel_crtc->pipe; | 697 | int pipe = intel_crtc->pipe; |
687 | 698 | ||
@@ -700,7 +711,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
700 | break; | 711 | break; |
701 | } else if (is_edp(intel_dp)) { | 712 | } else if (is_edp(intel_dp)) { |
702 | lane_count = dev_priv->edp.lanes; | 713 | lane_count = dev_priv->edp.lanes; |
703 | bpp = dev_priv->edp.bpp; | ||
704 | break; | 714 | break; |
705 | } | 715 | } |
706 | } | 716 | } |
@@ -710,7 +720,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
710 | * the number of bytes_per_pixel post-LUT, which we always | 720 | * the number of bytes_per_pixel post-LUT, which we always |
711 | * set up for 8-bits of R/G/B, or 3 bytes total. | 721 | * set up for 8-bits of R/G/B, or 3 bytes total. |
712 | */ | 722 | */ |
713 | intel_dp_compute_m_n(bpp, lane_count, | 723 | intel_dp_compute_m_n(intel_crtc->bpp, lane_count, |
714 | mode->clock, adjusted_mode->clock, &m_n); | 724 | mode->clock, adjusted_mode->clock, &m_n); |
715 | 725 | ||
716 | if (HAS_PCH_SPLIT(dev)) { | 726 | if (HAS_PCH_SPLIT(dev)) { |
@@ -769,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
769 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 779 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
770 | intel_dp->link_configuration[0] = intel_dp->link_bw; | 780 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
771 | intel_dp->link_configuration[1] = intel_dp->lane_count; | 781 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
782 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | ||
772 | 783 | ||
773 | /* | 784 | /* |
774 | * Check for DPCD version > 1.1 and enhanced framing support | 785 | * Check for DPCD version > 1.1 and enhanced framing support |
@@ -1010,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
1010 | 1021 | ||
1011 | if (is_edp(intel_dp)) | 1022 | if (is_edp(intel_dp)) |
1012 | ironlake_edp_backlight_on(dev); | 1023 | ironlake_edp_backlight_on(dev); |
1024 | |||
1025 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
1013 | } | 1026 | } |
1014 | 1027 | ||
1015 | static void | 1028 | static void |
@@ -1044,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1044 | if (is_edp(intel_dp)) | 1057 | if (is_edp(intel_dp)) |
1045 | ironlake_edp_backlight_on(dev); | 1058 | ironlake_edp_backlight_on(dev); |
1046 | } | 1059 | } |
1060 | intel_dp->dpms_mode = mode; | ||
1047 | } | 1061 | } |
1048 | 1062 | ||
1049 | /* | 1063 | /* |
@@ -1333,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1333 | u32 reg; | 1347 | u32 reg; |
1334 | uint32_t DP = intel_dp->DP; | 1348 | uint32_t DP = intel_dp->DP; |
1335 | 1349 | ||
1336 | /* Enable output, wait for it to become active */ | 1350 | /* |
1337 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | 1351 | * On CPT we have to enable the port in training pattern 1, which |
1338 | POSTING_READ(intel_dp->output_reg); | 1352 | * will happen below in intel_dp_set_link_train. Otherwise, enable |
1339 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1353 | * the port and wait for it to become active. |
1354 | */ | ||
1355 | if (!HAS_PCH_CPT(dev)) { | ||
1356 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
1357 | POSTING_READ(intel_dp->output_reg); | ||
1358 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1359 | } | ||
1340 | 1360 | ||
1341 | /* Write the link configuration data */ | 1361 | /* Write the link configuration data */ |
1342 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1362 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
@@ -1369,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1369 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1389 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1370 | 1390 | ||
1371 | if (!intel_dp_set_link_train(intel_dp, reg, | 1391 | if (!intel_dp_set_link_train(intel_dp, reg, |
1372 | DP_TRAINING_PATTERN_1)) | 1392 | DP_TRAINING_PATTERN_1 | |
1393 | DP_LINK_SCRAMBLING_DISABLE)) | ||
1373 | break; | 1394 | break; |
1374 | /* Set training pattern 1 */ | 1395 | /* Set training pattern 1 */ |
1375 | 1396 | ||
@@ -1444,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1444 | 1465 | ||
1445 | /* channel eq pattern */ | 1466 | /* channel eq pattern */ |
1446 | if (!intel_dp_set_link_train(intel_dp, reg, | 1467 | if (!intel_dp_set_link_train(intel_dp, reg, |
1447 | DP_TRAINING_PATTERN_2)) | 1468 | DP_TRAINING_PATTERN_2 | |
1469 | DP_LINK_SCRAMBLING_DISABLE)) | ||
1448 | break; | 1470 | break; |
1449 | 1471 | ||
1450 | udelay(400); | 1472 | udelay(400); |
@@ -1558,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1558 | POSTING_READ(intel_dp->output_reg); | 1580 | POSTING_READ(intel_dp->output_reg); |
1559 | } | 1581 | } |
1560 | 1582 | ||
1583 | static bool | ||
1584 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | ||
1585 | { | ||
1586 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | ||
1587 | sizeof (intel_dp->dpcd)) && | ||
1588 | (intel_dp->dpcd[DP_DPCD_REV] != 0)) { | ||
1589 | return true; | ||
1590 | } | ||
1591 | |||
1592 | return false; | ||
1593 | } | ||
1594 | |||
1561 | /* | 1595 | /* |
1562 | * According to DP spec | 1596 | * According to DP spec |
1563 | * 5.1.2: | 1597 | * 5.1.2: |
@@ -1570,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1570 | static void | 1604 | static void |
1571 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1605 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1572 | { | 1606 | { |
1573 | int ret; | 1607 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) |
1608 | return; | ||
1574 | 1609 | ||
1575 | if (!intel_dp->base.base.crtc) | 1610 | if (!intel_dp->base.base.crtc) |
1576 | return; | 1611 | return; |
1577 | 1612 | ||
1613 | /* Try to read receiver status if the link appears to be up */ | ||
1578 | if (!intel_dp_get_link_status(intel_dp)) { | 1614 | if (!intel_dp_get_link_status(intel_dp)) { |
1579 | intel_dp_link_down(intel_dp); | 1615 | intel_dp_link_down(intel_dp); |
1580 | return; | 1616 | return; |
1581 | } | 1617 | } |
1582 | 1618 | ||
1583 | /* Try to read receiver status if the link appears to be up */ | 1619 | /* Now read the DPCD to see if it's actually running */ |
1584 | ret = intel_dp_aux_native_read(intel_dp, | 1620 | if (!intel_dp_get_dpcd(intel_dp)) { |
1585 | 0x000, intel_dp->dpcd, | ||
1586 | sizeof (intel_dp->dpcd)); | ||
1587 | if (ret != sizeof(intel_dp->dpcd)) { | ||
1588 | intel_dp_link_down(intel_dp); | 1621 | intel_dp_link_down(intel_dp); |
1589 | return; | 1622 | return; |
1590 | } | 1623 | } |
1591 | 1624 | ||
1592 | if (!intel_channel_eq_ok(intel_dp)) { | 1625 | if (!intel_channel_eq_ok(intel_dp)) { |
1626 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | ||
1627 | drm_get_encoder_name(&intel_dp->base.base)); | ||
1593 | intel_dp_start_link_train(intel_dp); | 1628 | intel_dp_start_link_train(intel_dp); |
1594 | intel_dp_complete_link_train(intel_dp); | 1629 | intel_dp_complete_link_train(intel_dp); |
1595 | } | 1630 | } |
1596 | } | 1631 | } |
1597 | 1632 | ||
1598 | static enum drm_connector_status | 1633 | static enum drm_connector_status |
1634 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) | ||
1635 | { | ||
1636 | if (intel_dp_get_dpcd(intel_dp)) | ||
1637 | return connector_status_connected; | ||
1638 | return connector_status_disconnected; | ||
1639 | } | ||
1640 | |||
1641 | static enum drm_connector_status | ||
1599 | ironlake_dp_detect(struct intel_dp *intel_dp) | 1642 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1600 | { | 1643 | { |
1601 | enum drm_connector_status status; | 1644 | enum drm_connector_status status; |
1602 | bool ret; | ||
1603 | 1645 | ||
1604 | /* Can't disconnect eDP, but you can close the lid... */ | 1646 | /* Can't disconnect eDP, but you can close the lid... */ |
1605 | if (is_edp(intel_dp)) { | 1647 | if (is_edp(intel_dp)) { |
@@ -1609,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
1609 | return status; | 1651 | return status; |
1610 | } | 1652 | } |
1611 | 1653 | ||
1612 | status = connector_status_disconnected; | 1654 | return intel_dp_detect_dpcd(intel_dp); |
1613 | ret = intel_dp_aux_native_read_retry(intel_dp, | ||
1614 | 0x000, intel_dp->dpcd, | ||
1615 | sizeof (intel_dp->dpcd)); | ||
1616 | if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) | ||
1617 | status = connector_status_connected; | ||
1618 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | ||
1619 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | ||
1620 | return status; | ||
1621 | } | 1655 | } |
1622 | 1656 | ||
1623 | static enum drm_connector_status | 1657 | static enum drm_connector_status |
@@ -1625,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1625 | { | 1659 | { |
1626 | struct drm_device *dev = intel_dp->base.base.dev; | 1660 | struct drm_device *dev = intel_dp->base.base.dev; |
1627 | struct drm_i915_private *dev_priv = dev->dev_private; | 1661 | struct drm_i915_private *dev_priv = dev->dev_private; |
1628 | enum drm_connector_status status; | ||
1629 | uint32_t temp, bit; | 1662 | uint32_t temp, bit; |
1630 | 1663 | ||
1631 | switch (intel_dp->output_reg) { | 1664 | switch (intel_dp->output_reg) { |
@@ -1647,17 +1680,34 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1647 | if ((temp & bit) == 0) | 1680 | if ((temp & bit) == 0) |
1648 | return connector_status_disconnected; | 1681 | return connector_status_disconnected; |
1649 | 1682 | ||
1650 | status = connector_status_disconnected; | 1683 | return intel_dp_detect_dpcd(intel_dp); |
1651 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, | 1684 | } |
1652 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1685 | |
1653 | { | 1686 | static struct edid * |
1654 | if (intel_dp->dpcd[DP_DPCD_REV] != 0) | 1687 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
1655 | status = connector_status_connected; | 1688 | { |
1656 | } | 1689 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1690 | struct edid *edid; | ||
1657 | 1691 | ||
1658 | return status; | 1692 | ironlake_edp_panel_vdd_on(intel_dp); |
1693 | edid = drm_get_edid(connector, adapter); | ||
1694 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1695 | return edid; | ||
1659 | } | 1696 | } |
1660 | 1697 | ||
1698 | static int | ||
1699 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) | ||
1700 | { | ||
1701 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1702 | int ret; | ||
1703 | |||
1704 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1705 | ret = intel_ddc_get_modes(connector, adapter); | ||
1706 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1707 | return ret; | ||
1708 | } | ||
1709 | |||
1710 | |||
1661 | /** | 1711 | /** |
1662 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | 1712 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. |
1663 | * | 1713 | * |
@@ -1678,13 +1728,19 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1678 | status = ironlake_dp_detect(intel_dp); | 1728 | status = ironlake_dp_detect(intel_dp); |
1679 | else | 1729 | else |
1680 | status = g4x_dp_detect(intel_dp); | 1730 | status = g4x_dp_detect(intel_dp); |
1731 | |||
1732 | DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", | ||
1733 | intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], | ||
1734 | intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], | ||
1735 | intel_dp->dpcd[6], intel_dp->dpcd[7]); | ||
1736 | |||
1681 | if (status != connector_status_connected) | 1737 | if (status != connector_status_connected) |
1682 | return status; | 1738 | return status; |
1683 | 1739 | ||
1684 | if (intel_dp->force_audio) { | 1740 | if (intel_dp->force_audio) { |
1685 | intel_dp->has_audio = intel_dp->force_audio > 0; | 1741 | intel_dp->has_audio = intel_dp->force_audio > 0; |
1686 | } else { | 1742 | } else { |
1687 | edid = drm_get_edid(connector, &intel_dp->adapter); | 1743 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1688 | if (edid) { | 1744 | if (edid) { |
1689 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 1745 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
1690 | connector->display_info.raw_edid = NULL; | 1746 | connector->display_info.raw_edid = NULL; |
@@ -1705,7 +1761,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1705 | /* We should parse the EDID data and find out if it has an audio sink | 1761 | /* We should parse the EDID data and find out if it has an audio sink |
1706 | */ | 1762 | */ |
1707 | 1763 | ||
1708 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); | 1764 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
1709 | if (ret) { | 1765 | if (ret) { |
1710 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { | 1766 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { |
1711 | struct drm_display_mode *newmode; | 1767 | struct drm_display_mode *newmode; |
@@ -1741,7 +1797,7 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
1741 | struct edid *edid; | 1797 | struct edid *edid; |
1742 | bool has_audio = false; | 1798 | bool has_audio = false; |
1743 | 1799 | ||
1744 | edid = drm_get_edid(connector, &intel_dp->adapter); | 1800 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1745 | if (edid) { | 1801 | if (edid) { |
1746 | has_audio = drm_detect_monitor_audio(edid); | 1802 | has_audio = drm_detect_monitor_audio(edid); |
1747 | 1803 | ||
@@ -1810,6 +1866,11 @@ done: | |||
1810 | static void | 1866 | static void |
1811 | intel_dp_destroy (struct drm_connector *connector) | 1867 | intel_dp_destroy (struct drm_connector *connector) |
1812 | { | 1868 | { |
1869 | struct drm_device *dev = connector->dev; | ||
1870 | |||
1871 | if (intel_dpd_is_edp(dev)) | ||
1872 | intel_panel_destroy_backlight(dev); | ||
1873 | |||
1813 | drm_sysfs_connector_remove(connector); | 1874 | drm_sysfs_connector_remove(connector); |
1814 | drm_connector_cleanup(connector); | 1875 | drm_connector_cleanup(connector); |
1815 | kfree(connector); | 1876 | kfree(connector); |
@@ -1923,6 +1984,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1923 | return; | 1984 | return; |
1924 | 1985 | ||
1925 | intel_dp->output_reg = output_reg; | 1986 | intel_dp->output_reg = output_reg; |
1987 | intel_dp->dpms_mode = -1; | ||
1926 | 1988 | ||
1927 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1989 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1928 | if (!intel_connector) { | 1990 | if (!intel_connector) { |
@@ -1999,7 +2061,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1999 | 2061 | ||
2000 | /* Cache some DPCD data in the eDP case */ | 2062 | /* Cache some DPCD data in the eDP case */ |
2001 | if (is_edp(intel_dp)) { | 2063 | if (is_edp(intel_dp)) { |
2002 | int ret; | 2064 | bool ret; |
2003 | u32 pp_on, pp_div; | 2065 | u32 pp_on, pp_div; |
2004 | 2066 | ||
2005 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | 2067 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
@@ -2012,11 +2074,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2012 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | 2074 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ |
2013 | 2075 | ||
2014 | ironlake_edp_panel_vdd_on(intel_dp); | 2076 | ironlake_edp_panel_vdd_on(intel_dp); |
2015 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | 2077 | ret = intel_dp_get_dpcd(intel_dp); |
2016 | intel_dp->dpcd, | ||
2017 | sizeof(intel_dp->dpcd)); | ||
2018 | ironlake_edp_panel_vdd_off(intel_dp); | 2078 | ironlake_edp_panel_vdd_off(intel_dp); |
2019 | if (ret == sizeof(intel_dp->dpcd)) { | 2079 | if (ret) { |
2020 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 2080 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2021 | dev_priv->no_aux_handshake = | 2081 | dev_priv->no_aux_handshake = |
2022 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | 2082 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
@@ -2042,6 +2102,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2042 | DRM_MODE_TYPE_PREFERRED; | 2102 | DRM_MODE_TYPE_PREFERRED; |
2043 | } | 2103 | } |
2044 | } | 2104 | } |
2105 | dev_priv->int_edp_connector = connector; | ||
2106 | intel_panel_setup_backlight(dev); | ||
2045 | } | 2107 | } |
2046 | 2108 | ||
2047 | intel_dp_add_properties(intel_dp, connector); | 2109 | intel_dp_add_properties(intel_dp, connector); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9ffa61eb4d7..2480cfa7c0c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -170,6 +170,7 @@ struct intel_crtc { | |||
170 | int16_t cursor_x, cursor_y; | 170 | int16_t cursor_x, cursor_y; |
171 | int16_t cursor_width, cursor_height; | 171 | int16_t cursor_width, cursor_height; |
172 | bool cursor_visible; | 172 | bool cursor_visible; |
173 | unsigned int bpp; | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 176 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -177,10 +178,28 @@ struct intel_crtc { | |||
177 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 178 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
178 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 179 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
179 | 180 | ||
181 | #define DIP_HEADER_SIZE 5 | ||
182 | |||
180 | #define DIP_TYPE_AVI 0x82 | 183 | #define DIP_TYPE_AVI 0x82 |
181 | #define DIP_VERSION_AVI 0x2 | 184 | #define DIP_VERSION_AVI 0x2 |
182 | #define DIP_LEN_AVI 13 | 185 | #define DIP_LEN_AVI 13 |
183 | 186 | ||
187 | #define DIP_TYPE_SPD 0x83 | ||
188 | #define DIP_VERSION_SPD 0x1 | ||
189 | #define DIP_LEN_SPD 25 | ||
190 | #define DIP_SPD_UNKNOWN 0 | ||
191 | #define DIP_SPD_DSTB 0x1 | ||
192 | #define DIP_SPD_DVDP 0x2 | ||
193 | #define DIP_SPD_DVHS 0x3 | ||
194 | #define DIP_SPD_HDDVR 0x4 | ||
195 | #define DIP_SPD_DVC 0x5 | ||
196 | #define DIP_SPD_DSC 0x6 | ||
197 | #define DIP_SPD_VCD 0x7 | ||
198 | #define DIP_SPD_GAME 0x8 | ||
199 | #define DIP_SPD_PC 0x9 | ||
200 | #define DIP_SPD_BD 0xa | ||
201 | #define DIP_SPD_SCD 0xb | ||
202 | |||
184 | struct dip_infoframe { | 203 | struct dip_infoframe { |
185 | uint8_t type; /* HB0 */ | 204 | uint8_t type; /* HB0 */ |
186 | uint8_t ver; /* HB1 */ | 205 | uint8_t ver; /* HB1 */ |
@@ -205,6 +224,11 @@ struct dip_infoframe { | |||
205 | uint16_t left_bar_end; | 224 | uint16_t left_bar_end; |
206 | uint16_t right_bar_start; | 225 | uint16_t right_bar_start; |
207 | } avi; | 226 | } avi; |
227 | struct { | ||
228 | uint8_t vn[8]; | ||
229 | uint8_t pd[16]; | ||
230 | uint8_t sdi; | ||
231 | } spd; | ||
208 | uint8_t payload[27]; | 232 | uint8_t payload[27]; |
209 | } __attribute__ ((packed)) body; | 233 | } __attribute__ ((packed)) body; |
210 | } __attribute__((packed)); | 234 | } __attribute__((packed)); |
@@ -233,6 +257,13 @@ struct intel_unpin_work { | |||
233 | bool enable_stall_check; | 257 | bool enable_stall_check; |
234 | }; | 258 | }; |
235 | 259 | ||
260 | struct intel_fbc_work { | ||
261 | struct delayed_work work; | ||
262 | struct drm_crtc *crtc; | ||
263 | struct drm_framebuffer *fb; | ||
264 | int interval; | ||
265 | }; | ||
266 | |||
236 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 267 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
237 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); | 268 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); |
238 | 269 | ||
@@ -266,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
266 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 297 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
267 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 298 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
268 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 299 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
269 | extern void intel_panel_setup_backlight(struct drm_device *dev); | 300 | extern int intel_panel_setup_backlight(struct drm_device *dev); |
270 | extern void intel_panel_enable_backlight(struct drm_device *dev); | 301 | extern void intel_panel_enable_backlight(struct drm_device *dev); |
271 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 302 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
303 | extern void intel_panel_destroy_backlight(struct drm_device *dev); | ||
272 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 304 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
273 | 305 | ||
274 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 306 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
@@ -305,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
305 | struct drm_connector *connector, | 337 | struct drm_connector *connector, |
306 | struct intel_load_detect_pipe *old); | 338 | struct intel_load_detect_pipe *old); |
307 | 339 | ||
308 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
309 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
310 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
311 | extern void intelfb_restore(void); | 340 | extern void intelfb_restore(void); |
312 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 341 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
313 | u16 blue, int regno); | 342 | u16 blue, int regno); |
@@ -317,6 +346,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev); | |||
317 | extern void ironlake_enable_drps(struct drm_device *dev); | 346 | extern void ironlake_enable_drps(struct drm_device *dev); |
318 | extern void ironlake_disable_drps(struct drm_device *dev); | 347 | extern void ironlake_disable_drps(struct drm_device *dev); |
319 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | 348 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
349 | extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); | ||
320 | extern void gen6_disable_rps(struct drm_device *dev); | 350 | extern void gen6_disable_rps(struct drm_device *dev); |
321 | extern void intel_init_emon(struct drm_device *dev); | 351 | extern void intel_init_emon(struct drm_device *dev); |
322 | 352 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index aa0a8e83142..226ba830f38 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -45,6 +45,8 @@ struct intel_hdmi { | |||
45 | bool has_hdmi_sink; | 45 | bool has_hdmi_sink; |
46 | bool has_audio; | 46 | bool has_audio; |
47 | int force_audio; | 47 | int force_audio; |
48 | void (*write_infoframe)(struct drm_encoder *encoder, | ||
49 | struct dip_infoframe *frame); | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 52 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | |||
58 | struct intel_hdmi, base); | 60 | struct intel_hdmi, base); |
59 | } | 61 | } |
60 | 62 | ||
61 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) | 63 | void intel_dip_infoframe_csum(struct dip_infoframe *frame) |
62 | { | 64 | { |
63 | uint8_t *data = (uint8_t *)avi_if; | 65 | uint8_t *data = (uint8_t *)frame; |
64 | uint8_t sum = 0; | 66 | uint8_t sum = 0; |
65 | unsigned i; | 67 | unsigned i; |
66 | 68 | ||
67 | avi_if->checksum = 0; | 69 | frame->checksum = 0; |
68 | avi_if->ecc = 0; | 70 | frame->ecc = 0; |
69 | 71 | ||
70 | for (i = 0; i < sizeof(*avi_if); i++) | 72 | /* Header isn't part of the checksum */ |
73 | for (i = 5; i < frame->len; i++) | ||
71 | sum += data[i]; | 74 | sum += data[i]; |
72 | 75 | ||
73 | avi_if->checksum = 0x100 - sum; | 76 | frame->checksum = 0x100 - sum; |
74 | } | 77 | } |
75 | 78 | ||
76 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | 79 | static u32 intel_infoframe_index(struct dip_infoframe *frame) |
77 | { | 80 | { |
78 | struct dip_infoframe avi_if = { | 81 | u32 flags = 0; |
79 | .type = DIP_TYPE_AVI, | 82 | |
80 | .ver = DIP_VERSION_AVI, | 83 | switch (frame->type) { |
81 | .len = DIP_LEN_AVI, | 84 | case DIP_TYPE_AVI: |
82 | }; | 85 | flags |= VIDEO_DIP_SELECT_AVI; |
83 | uint32_t *data = (uint32_t *)&avi_if; | 86 | break; |
87 | case DIP_TYPE_SPD: | ||
88 | flags |= VIDEO_DIP_SELECT_SPD; | ||
89 | break; | ||
90 | default: | ||
91 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | ||
92 | break; | ||
93 | } | ||
94 | |||
95 | return flags; | ||
96 | } | ||
97 | |||
98 | static u32 intel_infoframe_flags(struct dip_infoframe *frame) | ||
99 | { | ||
100 | u32 flags = 0; | ||
101 | |||
102 | switch (frame->type) { | ||
103 | case DIP_TYPE_AVI: | ||
104 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; | ||
105 | break; | ||
106 | case DIP_TYPE_SPD: | ||
107 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC; | ||
108 | break; | ||
109 | default: | ||
110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | return flags; | ||
115 | } | ||
116 | |||
117 | static void i9xx_write_infoframe(struct drm_encoder *encoder, | ||
118 | struct dip_infoframe *frame) | ||
119 | { | ||
120 | uint32_t *data = (uint32_t *)frame; | ||
84 | struct drm_device *dev = encoder->dev; | 121 | struct drm_device *dev = encoder->dev; |
85 | struct drm_i915_private *dev_priv = dev->dev_private; | 122 | struct drm_i915_private *dev_priv = dev->dev_private; |
86 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 123 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
87 | u32 port; | 124 | u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); |
88 | unsigned i; | 125 | unsigned i, len = DIP_HEADER_SIZE + frame->len; |
89 | 126 | ||
90 | if (!intel_hdmi->has_hdmi_sink) | ||
91 | return; | ||
92 | 127 | ||
93 | /* XXX first guess at handling video port, is this corrent? */ | 128 | /* XXX first guess at handling video port, is this corrent? */ |
94 | if (intel_hdmi->sdvox_reg == SDVOB) | 129 | if (intel_hdmi->sdvox_reg == SDVOB) |
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | |||
98 | else | 133 | else |
99 | return; | 134 | return; |
100 | 135 | ||
101 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | 136 | flags = intel_infoframe_index(frame); |
102 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); | 137 | |
138 | val &= ~VIDEO_DIP_SELECT_MASK; | ||
103 | 139 | ||
104 | intel_dip_infoframe_csum(&avi_if); | 140 | I915_WRITE(VIDEO_DIP_CTL, val | port | flags); |
105 | for (i = 0; i < sizeof(avi_if); i += 4) { | 141 | |
142 | for (i = 0; i < len; i += 4) { | ||
106 | I915_WRITE(VIDEO_DIP_DATA, *data); | 143 | I915_WRITE(VIDEO_DIP_DATA, *data); |
107 | data++; | 144 | data++; |
108 | } | 145 | } |
109 | 146 | ||
110 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | 147 | flags |= intel_infoframe_flags(frame); |
111 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | | 148 | |
112 | VIDEO_DIP_ENABLE_AVI); | 149 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); |
150 | } | ||
151 | |||
152 | static void ironlake_write_infoframe(struct drm_encoder *encoder, | ||
153 | struct dip_infoframe *frame) | ||
154 | { | ||
155 | uint32_t *data = (uint32_t *)frame; | ||
156 | struct drm_device *dev = encoder->dev; | ||
157 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
158 | struct drm_crtc *crtc = encoder->crtc; | ||
159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
160 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | ||
161 | unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||
162 | u32 flags, val = I915_READ(reg); | ||
163 | |||
164 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
165 | |||
166 | flags = intel_infoframe_index(frame); | ||
167 | |||
168 | val &= ~VIDEO_DIP_SELECT_MASK; | ||
169 | |||
170 | I915_WRITE(reg, val | flags); | ||
171 | |||
172 | for (i = 0; i < len; i += 4) { | ||
173 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | ||
174 | data++; | ||
175 | } | ||
176 | |||
177 | flags |= intel_infoframe_flags(frame); | ||
178 | |||
179 | I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); | ||
180 | } | ||
181 | static void intel_set_infoframe(struct drm_encoder *encoder, | ||
182 | struct dip_infoframe *frame) | ||
183 | { | ||
184 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
185 | |||
186 | if (!intel_hdmi->has_hdmi_sink) | ||
187 | return; | ||
188 | |||
189 | intel_dip_infoframe_csum(frame); | ||
190 | intel_hdmi->write_infoframe(encoder, frame); | ||
191 | } | ||
192 | |||
193 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | ||
194 | { | ||
195 | struct dip_infoframe avi_if = { | ||
196 | .type = DIP_TYPE_AVI, | ||
197 | .ver = DIP_VERSION_AVI, | ||
198 | .len = DIP_LEN_AVI, | ||
199 | }; | ||
200 | |||
201 | intel_set_infoframe(encoder, &avi_if); | ||
202 | } | ||
203 | |||
204 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | ||
205 | { | ||
206 | struct dip_infoframe spd_if; | ||
207 | |||
208 | memset(&spd_if, 0, sizeof(spd_if)); | ||
209 | spd_if.type = DIP_TYPE_SPD; | ||
210 | spd_if.ver = DIP_VERSION_SPD; | ||
211 | spd_if.len = DIP_LEN_SPD; | ||
212 | strcpy(spd_if.body.spd.vn, "Intel"); | ||
213 | strcpy(spd_if.body.spd.pd, "Integrated gfx"); | ||
214 | spd_if.body.spd.sdi = DIP_SPD_PC; | ||
215 | |||
216 | intel_set_infoframe(encoder, &spd_if); | ||
113 | } | 217 | } |
114 | 218 | ||
115 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 219 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
@@ -124,12 +228,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
124 | u32 sdvox; | 228 | u32 sdvox; |
125 | 229 | ||
126 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; | 230 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
127 | sdvox |= intel_hdmi->color_range; | 231 | if (!HAS_PCH_SPLIT(dev)) |
232 | sdvox |= intel_hdmi->color_range; | ||
128 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 233 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
129 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | 234 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
130 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 235 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
131 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 236 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
132 | 237 | ||
238 | if (intel_crtc->bpp > 24) | ||
239 | sdvox |= COLOR_FORMAT_12bpc; | ||
240 | else | ||
241 | sdvox |= COLOR_FORMAT_8bpc; | ||
242 | |||
133 | /* Required on CPT */ | 243 | /* Required on CPT */ |
134 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) | 244 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) |
135 | sdvox |= HDMI_MODE_SELECT; | 245 | sdvox |= HDMI_MODE_SELECT; |
@@ -150,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
150 | POSTING_READ(intel_hdmi->sdvox_reg); | 260 | POSTING_READ(intel_hdmi->sdvox_reg); |
151 | 261 | ||
152 | intel_hdmi_set_avi_infoframe(encoder); | 262 | intel_hdmi_set_avi_infoframe(encoder); |
263 | intel_hdmi_set_spd_infoframe(encoder); | ||
153 | } | 264 | } |
154 | 265 | ||
155 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | 266 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) |
@@ -427,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
427 | 538 | ||
428 | intel_hdmi->sdvox_reg = sdvox_reg; | 539 | intel_hdmi->sdvox_reg = sdvox_reg; |
429 | 540 | ||
541 | if (!HAS_PCH_SPLIT(dev)) | ||
542 | intel_hdmi->write_infoframe = i9xx_write_infoframe; | ||
543 | else | ||
544 | intel_hdmi->write_infoframe = ironlake_write_infoframe; | ||
545 | |||
430 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | 546 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
431 | 547 | ||
432 | intel_hdmi_add_properties(intel_hdmi, connector); | 548 | intel_hdmi_add_properties(intel_hdmi, connector); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b28f7bd9f88..31da77f5c05 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
72 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | 73 | struct drm_device *dev = intel_lvds->base.base.dev; |
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
75 | u32 ctl_reg, lvds_reg; | 75 | u32 ctl_reg, lvds_reg, stat_reg; |
76 | 76 | ||
77 | if (HAS_PCH_SPLIT(dev)) { | 77 | if (HAS_PCH_SPLIT(dev)) { |
78 | ctl_reg = PCH_PP_CONTROL; | 78 | ctl_reg = PCH_PP_CONTROL; |
79 | lvds_reg = PCH_LVDS; | 79 | lvds_reg = PCH_LVDS; |
80 | stat_reg = PCH_PP_STATUS; | ||
80 | } else { | 81 | } else { |
81 | ctl_reg = PP_CONTROL; | 82 | ctl_reg = PP_CONTROL; |
82 | lvds_reg = LVDS; | 83 | lvds_reg = LVDS; |
84 | stat_reg = PP_STATUS; | ||
83 | } | 85 | } |
84 | 86 | ||
85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | 87 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | 96 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
95 | intel_lvds->pfit_control, | 97 | intel_lvds->pfit_control, |
96 | intel_lvds->pfit_pgm_ratios); | 98 | intel_lvds->pfit_pgm_ratios); |
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | 99 | |
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | 100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); |
99 | } else { | 101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); |
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | 102 | intel_lvds->pfit_dirty = false; |
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
102 | intel_lvds->pfit_dirty = false; | ||
103 | } | ||
104 | } | 103 | } |
105 | 104 | ||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | 105 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
107 | POSTING_READ(lvds_reg); | 106 | POSTING_READ(lvds_reg); |
107 | if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) | ||
108 | DRM_ERROR("timed out waiting for panel to power on\n"); | ||
108 | 109 | ||
109 | intel_panel_enable_backlight(dev); | 110 | intel_panel_enable_backlight(dev); |
110 | } | 111 | } |
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) | |||
113 | { | 114 | { |
114 | struct drm_device *dev = intel_lvds->base.base.dev; | 115 | struct drm_device *dev = intel_lvds->base.base.dev; |
115 | struct drm_i915_private *dev_priv = dev->dev_private; | 116 | struct drm_i915_private *dev_priv = dev->dev_private; |
116 | u32 ctl_reg, lvds_reg; | 117 | u32 ctl_reg, lvds_reg, stat_reg; |
117 | 118 | ||
118 | if (HAS_PCH_SPLIT(dev)) { | 119 | if (HAS_PCH_SPLIT(dev)) { |
119 | ctl_reg = PCH_PP_CONTROL; | 120 | ctl_reg = PCH_PP_CONTROL; |
120 | lvds_reg = PCH_LVDS; | 121 | lvds_reg = PCH_LVDS; |
122 | stat_reg = PCH_PP_STATUS; | ||
121 | } else { | 123 | } else { |
122 | ctl_reg = PP_CONTROL; | 124 | ctl_reg = PP_CONTROL; |
123 | lvds_reg = LVDS; | 125 | lvds_reg = LVDS; |
126 | stat_reg = PP_STATUS; | ||
124 | } | 127 | } |
125 | 128 | ||
126 | intel_panel_disable_backlight(dev); | 129 | intel_panel_disable_backlight(dev); |
127 | 130 | ||
128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | 131 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
132 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) | ||
133 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
129 | 134 | ||
130 | if (intel_lvds->pfit_control) { | 135 | if (intel_lvds->pfit_control) { |
131 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
132 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
133 | |||
134 | I915_WRITE(PFIT_CONTROL, 0); | 136 | I915_WRITE(PFIT_CONTROL, 0); |
135 | intel_lvds->pfit_dirty = true; | 137 | intel_lvds->pfit_dirty = true; |
136 | } | 138 | } |
@@ -398,53 +400,21 @@ out: | |||
398 | 400 | ||
399 | static void intel_lvds_prepare(struct drm_encoder *encoder) | 401 | static void intel_lvds_prepare(struct drm_encoder *encoder) |
400 | { | 402 | { |
401 | struct drm_device *dev = encoder->dev; | ||
402 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
404 | 404 | ||
405 | /* We try to do the minimum that is necessary in order to unlock | 405 | /* |
406 | * the registers for mode setting. | ||
407 | * | ||
408 | * On Ironlake, this is quite simple as we just set the unlock key | ||
409 | * and ignore all subtleties. (This may cause some issues...) | ||
410 | * | ||
411 | * Prior to Ironlake, we must disable the pipe if we want to adjust | 406 | * Prior to Ironlake, we must disable the pipe if we want to adjust |
412 | * the panel fitter. However at all other times we can just reset | 407 | * the panel fitter. However at all other times we can just reset |
413 | * the registers regardless. | 408 | * the registers regardless. |
414 | */ | 409 | */ |
415 | 410 | if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) | |
416 | if (HAS_PCH_SPLIT(dev)) { | 411 | intel_lvds_disable(intel_lvds); |
417 | I915_WRITE(PCH_PP_CONTROL, | ||
418 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
419 | } else if (intel_lvds->pfit_dirty) { | ||
420 | I915_WRITE(PP_CONTROL, | ||
421 | (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) | ||
422 | & ~POWER_TARGET_ON); | ||
423 | } else { | ||
424 | I915_WRITE(PP_CONTROL, | ||
425 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
426 | } | ||
427 | } | 412 | } |
428 | 413 | ||
429 | static void intel_lvds_commit(struct drm_encoder *encoder) | 414 | static void intel_lvds_commit(struct drm_encoder *encoder) |
430 | { | 415 | { |
431 | struct drm_device *dev = encoder->dev; | ||
432 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
433 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 416 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
434 | 417 | ||
435 | /* Undo any unlocking done in prepare to prevent accidental | ||
436 | * adjustment of the registers. | ||
437 | */ | ||
438 | if (HAS_PCH_SPLIT(dev)) { | ||
439 | u32 val = I915_READ(PCH_PP_CONTROL); | ||
440 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
441 | I915_WRITE(PCH_PP_CONTROL, val & 0x3); | ||
442 | } else { | ||
443 | u32 val = I915_READ(PP_CONTROL); | ||
444 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
445 | I915_WRITE(PP_CONTROL, val & 0x3); | ||
446 | } | ||
447 | |||
448 | /* Always do a full power on as we do not know what state | 418 | /* Always do a full power on as we do not know what state |
449 | * we were left in. | 419 | * we were left in. |
450 | */ | 420 | */ |
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
582 | struct drm_device *dev = connector->dev; | 552 | struct drm_device *dev = connector->dev; |
583 | struct drm_i915_private *dev_priv = dev->dev_private; | 553 | struct drm_i915_private *dev_priv = dev->dev_private; |
584 | 554 | ||
555 | intel_panel_destroy_backlight(dev); | ||
556 | |||
585 | if (dev_priv->lid_notifier.notifier_call) | 557 | if (dev_priv->lid_notifier.notifier_call) |
586 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 558 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
587 | drm_sysfs_connector_remove(connector); | 559 | drm_sysfs_connector_remove(connector); |
@@ -690,6 +662,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
690 | }, | 662 | }, |
691 | { | 663 | { |
692 | .callback = intel_no_lvds_dmi_callback, | 664 | .callback = intel_no_lvds_dmi_callback, |
665 | .ident = "Dell OptiPlex FX170", | ||
666 | .matches = { | ||
667 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
668 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), | ||
669 | }, | ||
670 | }, | ||
671 | { | ||
672 | .callback = intel_no_lvds_dmi_callback, | ||
693 | .ident = "AOpen Mini PC", | 673 | .ident = "AOpen Mini PC", |
694 | .matches = { | 674 | .matches = { |
695 | DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), | 675 | DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), |
@@ -1032,6 +1012,19 @@ out: | |||
1032 | pwm = I915_READ(BLC_PWM_PCH_CTL1); | 1012 | pwm = I915_READ(BLC_PWM_PCH_CTL1); |
1033 | pwm |= PWM_PCH_ENABLE; | 1013 | pwm |= PWM_PCH_ENABLE; |
1034 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | 1014 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); |
1015 | /* | ||
1016 | * Unlock registers and just | ||
1017 | * leave them unlocked | ||
1018 | */ | ||
1019 | I915_WRITE(PCH_PP_CONTROL, | ||
1020 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1021 | } else { | ||
1022 | /* | ||
1023 | * Unlock registers and just | ||
1024 | * leave them unlocked | ||
1025 | */ | ||
1026 | I915_WRITE(PP_CONTROL, | ||
1027 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
1035 | } | 1028 | } |
1036 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | 1029 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; |
1037 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | 1030 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { |
@@ -1041,6 +1034,9 @@ out: | |||
1041 | /* keep the LVDS connector */ | 1034 | /* keep the LVDS connector */ |
1042 | dev_priv->int_lvds_connector = connector; | 1035 | dev_priv->int_lvds_connector = connector; |
1043 | drm_sysfs_connector_add(connector); | 1036 | drm_sysfs_connector_add(connector); |
1037 | |||
1038 | intel_panel_setup_backlight(dev); | ||
1039 | |||
1044 | return true; | 1040 | return true; |
1045 | 1041 | ||
1046 | failed: | 1042 | failed: |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index d2c71042290..b8e8158bb16 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev) | |||
227 | asle->aslc = asle_stat; | 227 | asle->aslc = asle_stat; |
228 | } | 228 | } |
229 | 229 | ||
230 | /* Only present on Ironlake+ */ | ||
231 | void intel_opregion_gse_intr(struct drm_device *dev) | 230 | void intel_opregion_gse_intr(struct drm_device *dev) |
232 | { | 231 | { |
233 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -297,19 +296,26 @@ static int intel_opregion_video_event(struct notifier_block *nb, | |||
297 | /* The only video events relevant to opregion are 0x80. These indicate | 296 | /* The only video events relevant to opregion are 0x80. These indicate |
298 | either a docking event, lid switch or display switch request. In | 297 | either a docking event, lid switch or display switch request. In |
299 | Linux, these are handled by the dock, button and video drivers. | 298 | Linux, these are handled by the dock, button and video drivers. |
300 | We might want to fix the video driver to be opregion-aware in | 299 | */ |
301 | future, but right now we just indicate to the firmware that the | ||
302 | request has been handled */ | ||
303 | 300 | ||
304 | struct opregion_acpi *acpi; | 301 | struct opregion_acpi *acpi; |
302 | struct acpi_bus_event *event = data; | ||
303 | int ret = NOTIFY_OK; | ||
304 | |||
305 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | ||
306 | return NOTIFY_DONE; | ||
305 | 307 | ||
306 | if (!system_opregion) | 308 | if (!system_opregion) |
307 | return NOTIFY_DONE; | 309 | return NOTIFY_DONE; |
308 | 310 | ||
309 | acpi = system_opregion->acpi; | 311 | acpi = system_opregion->acpi; |
312 | |||
313 | if (event->type == 0x80 && !(acpi->cevt & 0x1)) | ||
314 | ret = NOTIFY_BAD; | ||
315 | |||
310 | acpi->csts = 0; | 316 | acpi->csts = 0; |
311 | 317 | ||
312 | return NOTIFY_OK; | 318 | return ret; |
313 | } | 319 | } |
314 | 320 | ||
315 | static struct notifier_block intel_opregion_notifier = { | 321 | static struct notifier_block intel_opregion_notifier = { |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 9e2959bc91c..d3603808682 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
773 | if (ret != 0) | 773 | if (ret != 0) |
774 | return ret; | 774 | return ret; |
775 | 775 | ||
776 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); | 776 | ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); |
777 | if (ret != 0) | 777 | if (ret != 0) |
778 | return ret; | 778 | return ret; |
779 | 779 | ||
780 | ret = i915_gem_object_set_to_gtt_domain(new_bo, 0); | ||
781 | if (ret != 0) | ||
782 | goto out_unpin; | ||
783 | |||
784 | ret = i915_gem_object_put_fence(new_bo); | 780 | ret = i915_gem_object_put_fence(new_bo); |
785 | if (ret) | 781 | if (ret) |
786 | goto out_unpin; | 782 | goto out_unpin; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a06ff07a4d3..007f6ca309d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev, | |||
83 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 83 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
84 | if (scaled_width > scaled_height) { /* pillar */ | 84 | if (scaled_width > scaled_height) { /* pillar */ |
85 | width = scaled_height / mode->vdisplay; | 85 | width = scaled_height / mode->vdisplay; |
86 | if (width & 1) | ||
87 | width++; | ||
86 | x = (adjusted_mode->hdisplay - width + 1) / 2; | 88 | x = (adjusted_mode->hdisplay - width + 1) / 2; |
87 | y = 0; | 89 | y = 0; |
88 | height = adjusted_mode->vdisplay; | 90 | height = adjusted_mode->vdisplay; |
89 | } else if (scaled_width < scaled_height) { /* letter */ | 91 | } else if (scaled_width < scaled_height) { /* letter */ |
90 | height = scaled_width / mode->hdisplay; | 92 | height = scaled_width / mode->hdisplay; |
93 | if (height & 1) | ||
94 | height++; | ||
91 | y = (adjusted_mode->vdisplay - height + 1) / 2; | 95 | y = (adjusted_mode->vdisplay - height + 1) / 2; |
92 | x = 0; | 96 | x = 0; |
93 | width = adjusted_mode->hdisplay; | 97 | width = adjusted_mode->hdisplay; |
@@ -222,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | |||
222 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); | 226 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); |
223 | } | 227 | } |
224 | 228 | ||
225 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) | 229 | static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) |
226 | { | 230 | { |
227 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
228 | u32 tmp; | 232 | u32 tmp; |
@@ -250,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
250 | I915_WRITE(BLC_PWM_CTL, tmp | level); | 254 | I915_WRITE(BLC_PWM_CTL, tmp | level); |
251 | } | 255 | } |
252 | 256 | ||
253 | void intel_panel_disable_backlight(struct drm_device *dev) | 257 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) |
254 | { | 258 | { |
255 | struct drm_i915_private *dev_priv = dev->dev_private; | 259 | struct drm_i915_private *dev_priv = dev->dev_private; |
256 | 260 | ||
257 | if (dev_priv->backlight_enabled) { | 261 | dev_priv->backlight_level = level; |
258 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | 262 | if (dev_priv->backlight_enabled) |
259 | dev_priv->backlight_enabled = false; | 263 | intel_panel_actually_set_backlight(dev, level); |
260 | } | 264 | } |
265 | |||
266 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
261 | 269 | ||
262 | intel_panel_set_backlight(dev, 0); | 270 | dev_priv->backlight_enabled = false; |
271 | intel_panel_actually_set_backlight(dev, 0); | ||
263 | } | 272 | } |
264 | 273 | ||
265 | void intel_panel_enable_backlight(struct drm_device *dev) | 274 | void intel_panel_enable_backlight(struct drm_device *dev) |
@@ -269,11 +278,11 @@ void intel_panel_enable_backlight(struct drm_device *dev) | |||
269 | if (dev_priv->backlight_level == 0) | 278 | if (dev_priv->backlight_level == 0) |
270 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | 279 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); |
271 | 280 | ||
272 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
273 | dev_priv->backlight_enabled = true; | 281 | dev_priv->backlight_enabled = true; |
282 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | ||
274 | } | 283 | } |
275 | 284 | ||
276 | void intel_panel_setup_backlight(struct drm_device *dev) | 285 | static void intel_panel_init_backlight(struct drm_device *dev) |
277 | { | 286 | { |
278 | struct drm_i915_private *dev_priv = dev->dev_private; | 287 | struct drm_i915_private *dev_priv = dev->dev_private; |
279 | 288 | ||
@@ -305,3 +314,74 @@ intel_panel_detect(struct drm_device *dev) | |||
305 | 314 | ||
306 | return connector_status_unknown; | 315 | return connector_status_unknown; |
307 | } | 316 | } |
317 | |||
318 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
319 | static int intel_panel_update_status(struct backlight_device *bd) | ||
320 | { | ||
321 | struct drm_device *dev = bl_get_data(bd); | ||
322 | intel_panel_set_backlight(dev, bd->props.brightness); | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static int intel_panel_get_brightness(struct backlight_device *bd) | ||
327 | { | ||
328 | struct drm_device *dev = bl_get_data(bd); | ||
329 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
330 | return dev_priv->backlight_level; | ||
331 | } | ||
332 | |||
333 | static const struct backlight_ops intel_panel_bl_ops = { | ||
334 | .update_status = intel_panel_update_status, | ||
335 | .get_brightness = intel_panel_get_brightness, | ||
336 | }; | ||
337 | |||
338 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
339 | { | ||
340 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
341 | struct backlight_properties props; | ||
342 | struct drm_connector *connector; | ||
343 | |||
344 | intel_panel_init_backlight(dev); | ||
345 | |||
346 | if (dev_priv->int_lvds_connector) | ||
347 | connector = dev_priv->int_lvds_connector; | ||
348 | else if (dev_priv->int_edp_connector) | ||
349 | connector = dev_priv->int_edp_connector; | ||
350 | else | ||
351 | return -ENODEV; | ||
352 | |||
353 | props.type = BACKLIGHT_RAW; | ||
354 | props.max_brightness = intel_panel_get_max_backlight(dev); | ||
355 | dev_priv->backlight = | ||
356 | backlight_device_register("intel_backlight", | ||
357 | &connector->kdev, dev, | ||
358 | &intel_panel_bl_ops, &props); | ||
359 | |||
360 | if (IS_ERR(dev_priv->backlight)) { | ||
361 | DRM_ERROR("Failed to register backlight: %ld\n", | ||
362 | PTR_ERR(dev_priv->backlight)); | ||
363 | dev_priv->backlight = NULL; | ||
364 | return -ENODEV; | ||
365 | } | ||
366 | dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev); | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
371 | { | ||
372 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
373 | if (dev_priv->backlight) | ||
374 | backlight_device_unregister(dev_priv->backlight); | ||
375 | } | ||
376 | #else | ||
377 | int intel_panel_setup_backlight(struct drm_device *dev) | ||
378 | { | ||
379 | intel_panel_init_backlight(dev); | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | void intel_panel_destroy_backlight(struct drm_device *dev) | ||
384 | { | ||
385 | return; | ||
386 | } | ||
387 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 95c4b142993..c30626ea9f9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring) | |||
236 | ret = -ENOMEM; | 236 | ret = -ENOMEM; |
237 | goto err; | 237 | goto err; |
238 | } | 238 | } |
239 | obj->cache_level = I915_CACHE_LLC; | 239 | |
240 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | ||
240 | 241 | ||
241 | ret = i915_gem_object_pin(obj, 4096, true); | 242 | ret = i915_gem_object_pin(obj, 4096, true); |
242 | if (ret) | 243 | if (ret) |
@@ -289,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
289 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 290 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
290 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 291 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
291 | I915_WRITE(MI_MODE, mode); | 292 | I915_WRITE(MI_MODE, mode); |
293 | if (IS_GEN7(dev)) | ||
294 | I915_WRITE(GFX_MODE_GEN7, | ||
295 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | ||
296 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | ||
292 | } | 297 | } |
293 | 298 | ||
294 | if (INTEL_INFO(dev)->gen >= 6) { | 299 | if (INTEL_INFO(dev)->gen >= 6) { |
@@ -776,7 +781,8 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
776 | ret = -ENOMEM; | 781 | ret = -ENOMEM; |
777 | goto err; | 782 | goto err; |
778 | } | 783 | } |
779 | obj->cache_level = I915_CACHE_LLC; | 784 | |
785 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | ||
780 | 786 | ||
781 | ret = i915_gem_object_pin(obj, 4096, true); | 787 | ret = i915_gem_object_pin(obj, 4096, true); |
782 | if (ret != 0) { | 788 | if (ret != 0) { |
@@ -1319,6 +1325,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1319 | ring->get_seqno = pc_render_get_seqno; | 1325 | ring->get_seqno = pc_render_get_seqno; |
1320 | } | 1326 | } |
1321 | 1327 | ||
1328 | if (!I915_NEED_GFX_HWS(dev)) | ||
1329 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1330 | |||
1322 | ring->dev = dev; | 1331 | ring->dev = dev; |
1323 | INIT_LIST_HEAD(&ring->active_list); | 1332 | INIT_LIST_HEAD(&ring->active_list); |
1324 | INIT_LIST_HEAD(&ring->request_list); | 1333 | INIT_LIST_HEAD(&ring->request_list); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d893..66e47a0100c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -92,6 +92,11 @@ struct intel_sdvo { | |||
92 | */ | 92 | */ |
93 | uint16_t attached_output; | 93 | uint16_t attached_output; |
94 | 94 | ||
95 | /* | ||
96 | * Hotplug activation bits for this device | ||
97 | */ | ||
98 | uint8_t hotplug_active[2]; | ||
99 | |||
95 | /** | 100 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | 101 | * This is used to select the color range of RBG outputs in HDMI mode. |
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 102 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
@@ -1059,15 +1064,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1059 | 1064 | ||
1060 | /* Set the SDVO control regs. */ | 1065 | /* Set the SDVO control regs. */ |
1061 | if (INTEL_INFO(dev)->gen >= 4) { | 1066 | if (INTEL_INFO(dev)->gen >= 4) { |
1062 | sdvox = 0; | 1067 | /* The real mode polarity is set by the SDVO commands, using |
1068 | * struct intel_sdvo_dtd. */ | ||
1069 | sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; | ||
1063 | if (intel_sdvo->is_hdmi) | 1070 | if (intel_sdvo->is_hdmi) |
1064 | sdvox |= intel_sdvo->color_range; | 1071 | sdvox |= intel_sdvo->color_range; |
1065 | if (INTEL_INFO(dev)->gen < 5) | 1072 | if (INTEL_INFO(dev)->gen < 5) |
1066 | sdvox |= SDVO_BORDER_ENABLE; | 1073 | sdvox |= SDVO_BORDER_ENABLE; |
1067 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
1068 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | ||
1069 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
1070 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | ||
1071 | } else { | 1074 | } else { |
1072 | sdvox = I915_READ(intel_sdvo->sdvo_reg); | 1075 | sdvox = I915_READ(intel_sdvo->sdvo_reg); |
1073 | switch (intel_sdvo->sdvo_reg) { | 1076 | switch (intel_sdvo->sdvo_reg) { |
@@ -1080,8 +1083,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1080 | } | 1083 | } |
1081 | sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; | 1084 | sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; |
1082 | } | 1085 | } |
1083 | if (intel_crtc->pipe == 1) | 1086 | |
1084 | sdvox |= SDVO_PIPE_B_SELECT; | 1087 | if (INTEL_PCH_TYPE(dev) >= PCH_CPT) |
1088 | sdvox |= TRANSCODER_CPT(intel_crtc->pipe); | ||
1089 | else | ||
1090 | sdvox |= TRANSCODER(intel_crtc->pipe); | ||
1091 | |||
1085 | if (intel_sdvo->has_hdmi_audio) | 1092 | if (intel_sdvo->has_hdmi_audio) |
1086 | sdvox |= SDVO_AUDIO_ENABLE; | 1093 | sdvox |= SDVO_AUDIO_ENABLE; |
1087 | 1094 | ||
@@ -1208,74 +1215,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in | |||
1208 | return true; | 1215 | return true; |
1209 | } | 1216 | } |
1210 | 1217 | ||
1211 | /* No use! */ | 1218 | static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) |
1212 | #if 0 | ||
1213 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1214 | { | ||
1215 | struct drm_connector *connector = NULL; | ||
1216 | struct intel_sdvo *iout = NULL; | ||
1217 | struct intel_sdvo *sdvo; | ||
1218 | |||
1219 | /* find the sdvo connector */ | ||
1220 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1221 | iout = to_intel_sdvo(connector); | ||
1222 | |||
1223 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1224 | continue; | ||
1225 | |||
1226 | sdvo = iout->dev_priv; | ||
1227 | |||
1228 | if (sdvo->sdvo_reg == SDVOB && sdvoB) | ||
1229 | return connector; | ||
1230 | |||
1231 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) | ||
1232 | return connector; | ||
1233 | |||
1234 | } | ||
1235 | |||
1236 | return NULL; | ||
1237 | } | ||
1238 | |||
1239 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1240 | { | 1219 | { |
1241 | u8 response[2]; | 1220 | u8 response[2]; |
1242 | u8 status; | ||
1243 | struct intel_sdvo *intel_sdvo; | ||
1244 | DRM_DEBUG_KMS("\n"); | ||
1245 | |||
1246 | if (!connector) | ||
1247 | return 0; | ||
1248 | |||
1249 | intel_sdvo = to_intel_sdvo(connector); | ||
1250 | 1221 | ||
1251 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, | 1222 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1252 | &response, 2) && response[0]; | 1223 | &response, 2) && response[0]; |
1253 | } | 1224 | } |
1254 | 1225 | ||
1255 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1226 | static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) |
1256 | { | 1227 | { |
1257 | u8 response[2]; | 1228 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1258 | u8 status; | ||
1259 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); | ||
1260 | |||
1261 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1262 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1263 | 1229 | ||
1264 | if (on) { | 1230 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); |
1265 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1266 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1267 | |||
1268 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1269 | } else { | ||
1270 | response[0] = 0; | ||
1271 | response[1] = 0; | ||
1272 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1273 | } | ||
1274 | |||
1275 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1276 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1277 | } | 1231 | } |
1278 | #endif | ||
1279 | 1232 | ||
1280 | static bool | 1233 | static bool |
1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1234 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
@@ -2045,6 +1998,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2045 | { | 1998 | { |
2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; | 1999 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2047 | struct drm_connector *connector; | 2000 | struct drm_connector *connector; |
2001 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
2048 | struct intel_connector *intel_connector; | 2002 | struct intel_connector *intel_connector; |
2049 | struct intel_sdvo_connector *intel_sdvo_connector; | 2003 | struct intel_sdvo_connector *intel_sdvo_connector; |
2050 | 2004 | ||
@@ -2062,7 +2016,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2062 | 2016 | ||
2063 | intel_connector = &intel_sdvo_connector->base; | 2017 | intel_connector = &intel_sdvo_connector->base; |
2064 | connector = &intel_connector->base; | 2018 | connector = &intel_connector->base; |
2065 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2019 | if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { |
2020 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2021 | intel_sdvo->hotplug_active[0] |= 1 << device; | ||
2022 | /* Some SDVO devices have one-shot hotplug interrupts. | ||
2023 | * Ensure that they get re-enabled when an interrupt happens. | ||
2024 | */ | ||
2025 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | ||
2026 | intel_sdvo_enable_hotplug(intel_encoder); | ||
2027 | } | ||
2028 | else | ||
2029 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2030 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2031 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2068 | 2032 | ||
@@ -2569,6 +2533,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2533 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2570 | goto err; | 2534 | goto err; |
2571 | 2535 | ||
2536 | /* Set up hotplug command - note paranoia about contents of reply. | ||
2537 | * We assume that the hardware is in a sane state, and only touch | ||
2538 | * the bits we think we understand. | ||
2539 | */ | ||
2540 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, | ||
2541 | &intel_sdvo->hotplug_active, 2); | ||
2542 | intel_sdvo->hotplug_active[0] &= ~0x3; | ||
2543 | |||
2572 | if (intel_sdvo_output_setup(intel_sdvo, | 2544 | if (intel_sdvo_output_setup(intel_sdvo, |
2573 | intel_sdvo->caps.output_flags) != true) { | 2545 | intel_sdvo->caps.output_flags) != true) { |
2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2546 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 113e4e7264c..210d570fd51 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv, | |||
1236 | struct drm_connector *connector) | 1236 | struct drm_connector *connector) |
1237 | { | 1237 | { |
1238 | struct drm_encoder *encoder = &intel_tv->base.base; | 1238 | struct drm_encoder *encoder = &intel_tv->base.base; |
1239 | struct drm_crtc *crtc = encoder->crtc; | ||
1240 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1239 | struct drm_device *dev = encoder->dev; | 1241 | struct drm_device *dev = encoder->dev; |
1240 | struct drm_i915_private *dev_priv = dev->dev_private; | 1242 | struct drm_i915_private *dev_priv = dev->dev_private; |
1241 | unsigned long irqflags; | 1243 | unsigned long irqflags; |
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv, | |||
1258 | /* Poll for TV detection */ | 1260 | /* Poll for TV detection */ |
1259 | tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); | 1261 | tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); |
1260 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; | 1262 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; |
1263 | if (intel_crtc->pipe == 1) | ||
1264 | tv_ctl |= TV_ENC_PIPEB_SELECT; | ||
1265 | else | ||
1266 | tv_ctl &= ~TV_ENC_PIPEB_SELECT; | ||
1261 | 1267 | ||
1262 | tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); | 1268 | tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); |
1263 | tv_dac |= (TVDAC_STATE_CHG_EN | | 1269 | tv_dac |= (TVDAC_STATE_CHG_EN | |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv, | |||
1277 | to_intel_crtc(intel_tv->base.base.crtc)->pipe); | 1283 | to_intel_crtc(intel_tv->base.base.crtc)->pipe); |
1278 | 1284 | ||
1279 | type = -1; | 1285 | type = -1; |
1280 | if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { | 1286 | tv_dac = I915_READ(TV_DAC); |
1281 | DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); | 1287 | DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); |
1282 | /* | 1288 | /* |
1283 | * A B C | 1289 | * A B C |
1284 | * 0 1 1 Composite | 1290 | * 0 1 1 Composite |
1285 | * 1 0 X svideo | 1291 | * 1 0 X svideo |
1286 | * 0 0 0 Component | 1292 | * 0 0 0 Component |
1287 | */ | 1293 | */ |
1288 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { | 1294 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { |
1289 | DRM_DEBUG_KMS("Detected Composite TV connection\n"); | 1295 | DRM_DEBUG_KMS("Detected Composite TV connection\n"); |
1290 | type = DRM_MODE_CONNECTOR_Composite; | 1296 | type = DRM_MODE_CONNECTOR_Composite; |
1291 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { | 1297 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { |
1292 | DRM_DEBUG_KMS("Detected S-Video TV connection\n"); | 1298 | DRM_DEBUG_KMS("Detected S-Video TV connection\n"); |
1293 | type = DRM_MODE_CONNECTOR_SVIDEO; | 1299 | type = DRM_MODE_CONNECTOR_SVIDEO; |
1294 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { | 1300 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { |
1295 | DRM_DEBUG_KMS("Detected Component TV connection\n"); | 1301 | DRM_DEBUG_KMS("Detected Component TV connection\n"); |
1296 | type = DRM_MODE_CONNECTOR_Component; | 1302 | type = DRM_MODE_CONNECTOR_Component; |
1297 | } else { | 1303 | } else { |
1298 | DRM_DEBUG_KMS("Unrecognised TV connection\n"); | 1304 | DRM_DEBUG_KMS("Unrecognised TV connection\n"); |
1299 | } | 1305 | type = -1; |
1300 | } | 1306 | } |
1301 | 1307 | ||
1302 | I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); | 1308 | I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 729d5fd7c88..b311faba34f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data) | |||
135 | int i; | 135 | int i; |
136 | 136 | ||
137 | if (dev_priv->card_type >= NV_50) { | 137 | if (dev_priv->card_type >= NV_50) { |
138 | uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8; | 138 | u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8; |
139 | 139 | if (!addr) { | |
140 | if (!vbios_vram) | 140 | addr = (u64)nv_rd32(dev, 0x1700) << 16; |
141 | vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000; | 141 | addr += 0xf0000; |
142 | } | ||
142 | 143 | ||
143 | old_bar0_pramin = nv_rd32(dev, 0x1700); | 144 | old_bar0_pramin = nv_rd32(dev, 0x1700); |
144 | nv_wr32(dev, 0x1700, vbios_vram >> 16); | 145 | nv_wr32(dev, 0x1700, addr >> 16); |
145 | } | 146 | } |
146 | 147 | ||
147 | /* bail if no rom signature */ | 148 | /* bail if no rom signature */ |
@@ -5186,7 +5187,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st | |||
5186 | load_table_ptr = ROM16(bios->data[bitentry->offset]); | 5187 | load_table_ptr = ROM16(bios->data[bitentry->offset]); |
5187 | 5188 | ||
5188 | if (load_table_ptr == 0x0) { | 5189 | if (load_table_ptr == 0x0) { |
5189 | NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); | 5190 | NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n"); |
5190 | return -EINVAL; | 5191 | return -EINVAL; |
5191 | } | 5192 | } |
5192 | 5193 | ||
@@ -5965,6 +5966,12 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx) | |||
5965 | if (cte->type == DCB_CONNECTOR_HDMI_1) | 5966 | if (cte->type == DCB_CONNECTOR_HDMI_1) |
5966 | cte->type = DCB_CONNECTOR_DVI_I; | 5967 | cte->type = DCB_CONNECTOR_DVI_I; |
5967 | } | 5968 | } |
5969 | |||
5970 | /* Gigabyte GV-NX86T512H */ | ||
5971 | if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) { | ||
5972 | if (cte->type == DCB_CONNECTOR_HDMI_1) | ||
5973 | cte->type = DCB_CONNECTOR_DVI_I; | ||
5974 | } | ||
5968 | } | 5975 | } |
5969 | 5976 | ||
5970 | static const u8 hpd_gpio[16] = { | 5977 | static const u8 hpd_gpio[16] = { |
@@ -6377,6 +6384,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | |||
6377 | } | 6384 | } |
6378 | } | 6385 | } |
6379 | 6386 | ||
6387 | /* Some other twisted XFX board (rhbz#694914) | ||
6388 | * | ||
6389 | * The DVI/VGA encoder combo that's supposed to represent the | ||
6390 | * DVI-I connector actually point at two different ones, and | ||
6391 | * the HDMI connector ends up paired with the VGA instead. | ||
6392 | * | ||
6393 | * Connector table is missing anything for VGA at all, pointing it | ||
6394 | * an invalid conntab entry 2 so we figure it out ourself. | ||
6395 | */ | ||
6396 | if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) { | ||
6397 | if (idx == 0) { | ||
6398 | *conn = 0x02002300; /* VGA, connector 2 */ | ||
6399 | *conf = 0x00000028; | ||
6400 | } else | ||
6401 | if (idx == 1) { | ||
6402 | *conn = 0x01010312; /* DVI, connector 0 */ | ||
6403 | *conf = 0x00020030; | ||
6404 | } else | ||
6405 | if (idx == 2) { | ||
6406 | *conn = 0x04020310; /* VGA, connector 0 */ | ||
6407 | *conf = 0x00000028; | ||
6408 | } else | ||
6409 | if (idx == 3) { | ||
6410 | *conn = 0x02021322; /* HDMI, connector 1 */ | ||
6411 | *conf = 0x00020010; | ||
6412 | } else { | ||
6413 | *conn = 0x0000000e; /* EOL */ | ||
6414 | *conf = 0x00000000; | ||
6415 | } | ||
6416 | } | ||
6417 | |||
6380 | return true; | 6418 | return true; |
6381 | } | 6419 | } |
6382 | 6420 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2ad49cbf7c8..890d50e4d68 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | if (nvbo->vma.node) { | ||
53 | nouveau_vm_unmap(&nvbo->vma); | ||
54 | nouveau_vm_put(&nvbo->vma); | ||
55 | } | ||
56 | kfree(nvbo); | 52 | kfree(nvbo); |
57 | } | 53 | } |
58 | 54 | ||
59 | static void | 55 | static void |
60 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | 56 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
61 | int *align, int *size, int *page_shift) | 57 | int *align, int *size) |
62 | { | 58 | { |
63 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 59 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
64 | 60 | ||
@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | |||
82 | } | 78 | } |
83 | } | 79 | } |
84 | } else { | 80 | } else { |
85 | if (likely(dev_priv->chan_vm)) { | 81 | *size = roundup(*size, (1 << nvbo->page_shift)); |
86 | if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024) | 82 | *align = max((1 << nvbo->page_shift), *align); |
87 | *page_shift = dev_priv->chan_vm->lpg_shift; | ||
88 | else | ||
89 | *page_shift = dev_priv->chan_vm->spg_shift; | ||
90 | } else { | ||
91 | *page_shift = 12; | ||
92 | } | ||
93 | |||
94 | *size = roundup(*size, (1 << *page_shift)); | ||
95 | *align = max((1 << *page_shift), *align); | ||
96 | } | 83 | } |
97 | 84 | ||
98 | *size = roundup(*size, PAGE_SIZE); | 85 | *size = roundup(*size, PAGE_SIZE); |
99 | } | 86 | } |
100 | 87 | ||
101 | int | 88 | int |
102 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | 89 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
103 | int size, int align, uint32_t flags, uint32_t tile_mode, | 90 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
104 | uint32_t tile_flags, struct nouveau_bo **pnvbo) | 91 | struct nouveau_bo **pnvbo) |
105 | { | 92 | { |
106 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 93 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
107 | struct nouveau_bo *nvbo; | 94 | struct nouveau_bo *nvbo; |
108 | int ret = 0, page_shift = 0; | 95 | int ret; |
109 | 96 | ||
110 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | 97 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
111 | if (!nvbo) | 98 | if (!nvbo) |
112 | return -ENOMEM; | 99 | return -ENOMEM; |
113 | INIT_LIST_HEAD(&nvbo->head); | 100 | INIT_LIST_HEAD(&nvbo->head); |
114 | INIT_LIST_HEAD(&nvbo->entry); | 101 | INIT_LIST_HEAD(&nvbo->entry); |
102 | INIT_LIST_HEAD(&nvbo->vma_list); | ||
115 | nvbo->tile_mode = tile_mode; | 103 | nvbo->tile_mode = tile_mode; |
116 | nvbo->tile_flags = tile_flags; | 104 | nvbo->tile_flags = tile_flags; |
117 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | 105 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
118 | 106 | ||
119 | nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift); | 107 | nvbo->page_shift = 12; |
120 | align >>= PAGE_SHIFT; | 108 | if (dev_priv->bar1_vm) { |
121 | 109 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) | |
122 | if (dev_priv->chan_vm) { | 110 | nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; |
123 | ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, | ||
124 | NV_MEM_ACCESS_RW, &nvbo->vma); | ||
125 | if (ret) { | ||
126 | kfree(nvbo); | ||
127 | return ret; | ||
128 | } | ||
129 | } | 111 | } |
130 | 112 | ||
113 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | ||
131 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | 114 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
132 | nouveau_bo_placement_set(nvbo, flags, 0); | 115 | nouveau_bo_placement_set(nvbo, flags, 0); |
133 | 116 | ||
134 | nvbo->channel = chan; | ||
135 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 117 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
136 | ttm_bo_type_device, &nvbo->placement, align, 0, | 118 | ttm_bo_type_device, &nvbo->placement, |
137 | false, NULL, size, nouveau_bo_del_ttm); | 119 | align >> PAGE_SHIFT, 0, false, NULL, size, |
120 | nouveau_bo_del_ttm); | ||
138 | if (ret) { | 121 | if (ret) { |
139 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | 122 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ |
140 | return ret; | 123 | return ret; |
141 | } | 124 | } |
142 | nvbo->channel = NULL; | ||
143 | 125 | ||
144 | if (nvbo->vma.node) | ||
145 | nvbo->bo.offset = nvbo->vma.offset; | ||
146 | *pnvbo = nvbo; | 126 | *pnvbo = nvbo; |
147 | return 0; | 127 | return 0; |
148 | } | 128 | } |
@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |||
312 | if (ret) | 292 | if (ret) |
313 | return ret; | 293 | return ret; |
314 | 294 | ||
315 | if (nvbo->vma.node) | ||
316 | nvbo->bo.offset = nvbo->vma.offset; | ||
317 | return 0; | 295 | return 0; |
318 | } | 296 | } |
319 | 297 | ||
@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
440 | TTM_MEMTYPE_FLAG_CMA; | 418 | TTM_MEMTYPE_FLAG_CMA; |
441 | man->available_caching = TTM_PL_MASK_CACHING; | 419 | man->available_caching = TTM_PL_MASK_CACHING; |
442 | man->default_caching = TTM_PL_FLAG_CACHED; | 420 | man->default_caching = TTM_PL_FLAG_CACHED; |
443 | man->gpu_offset = dev_priv->gart_info.aper_base; | ||
444 | break; | 421 | break; |
445 | default: | 422 | default: |
446 | NV_ERROR(dev, "Unknown GART type: %d\n", | 423 | NV_ERROR(dev, "Unknown GART type: %d\n", |
@@ -501,19 +478,12 @@ static int | |||
501 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 478 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
502 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 479 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
503 | { | 480 | { |
504 | struct nouveau_mem *old_node = old_mem->mm_node; | 481 | struct nouveau_mem *node = old_mem->mm_node; |
505 | struct nouveau_mem *new_node = new_mem->mm_node; | 482 | u64 src_offset = node->vma[0].offset; |
506 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 483 | u64 dst_offset = node->vma[1].offset; |
507 | u32 page_count = new_mem->num_pages; | 484 | u32 page_count = new_mem->num_pages; |
508 | u64 src_offset, dst_offset; | ||
509 | int ret; | 485 | int ret; |
510 | 486 | ||
511 | src_offset = old_node->tmp_vma.offset; | ||
512 | if (new_node->tmp_vma.node) | ||
513 | dst_offset = new_node->tmp_vma.offset; | ||
514 | else | ||
515 | dst_offset = nvbo->vma.offset; | ||
516 | |||
517 | page_count = new_mem->num_pages; | 487 | page_count = new_mem->num_pages; |
518 | while (page_count) { | 488 | while (page_count) { |
519 | int line_count = (page_count > 2047) ? 2047 : page_count; | 489 | int line_count = (page_count > 2047) ? 2047 : page_count; |
@@ -547,19 +517,13 @@ static int | |||
547 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 517 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
548 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 518 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
549 | { | 519 | { |
550 | struct nouveau_mem *old_node = old_mem->mm_node; | 520 | struct nouveau_mem *node = old_mem->mm_node; |
551 | struct nouveau_mem *new_node = new_mem->mm_node; | ||
552 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 521 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
553 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | 522 | u64 length = (new_mem->num_pages << PAGE_SHIFT); |
554 | u64 src_offset, dst_offset; | 523 | u64 src_offset = node->vma[0].offset; |
524 | u64 dst_offset = node->vma[1].offset; | ||
555 | int ret; | 525 | int ret; |
556 | 526 | ||
557 | src_offset = old_node->tmp_vma.offset; | ||
558 | if (new_node->tmp_vma.node) | ||
559 | dst_offset = new_node->tmp_vma.offset; | ||
560 | else | ||
561 | dst_offset = nvbo->vma.offset; | ||
562 | |||
563 | while (length) { | 527 | while (length) { |
564 | u32 amount, stride, height; | 528 | u32 amount, stride, height; |
565 | 529 | ||
@@ -695,6 +659,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
695 | } | 659 | } |
696 | 660 | ||
697 | static int | 661 | static int |
662 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | ||
663 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | ||
664 | { | ||
665 | struct nouveau_mem *node = mem->mm_node; | ||
666 | int ret; | ||
667 | |||
668 | ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, | ||
669 | node->page_shift, NV_MEM_ACCESS_RO, vma); | ||
670 | if (ret) | ||
671 | return ret; | ||
672 | |||
673 | if (mem->mem_type == TTM_PL_VRAM) | ||
674 | nouveau_vm_map(vma, node); | ||
675 | else | ||
676 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, | ||
677 | node, node->pages); | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int | ||
698 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | 683 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
699 | bool no_wait_reserve, bool no_wait_gpu, | 684 | bool no_wait_reserve, bool no_wait_gpu, |
700 | struct ttm_mem_reg *new_mem) | 685 | struct ttm_mem_reg *new_mem) |
@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
711 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | 696 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
712 | } | 697 | } |
713 | 698 | ||
714 | /* create temporary vma for old memory, this will get cleaned | 699 | /* create temporary vmas for the transfer and attach them to the |
715 | * up after ttm destroys the ttm_mem_reg | 700 | * old nouveau_mem node, these will get cleaned up after ttm has |
701 | * destroyed the ttm_mem_reg | ||
716 | */ | 702 | */ |
717 | if (dev_priv->card_type >= NV_50) { | 703 | if (dev_priv->card_type >= NV_50) { |
718 | struct nouveau_mem *node = old_mem->mm_node; | 704 | struct nouveau_mem *node = old_mem->mm_node; |
719 | if (!node->tmp_vma.node) { | ||
720 | u32 page_shift = nvbo->vma.node->type; | ||
721 | if (old_mem->mem_type == TTM_PL_TT) | ||
722 | page_shift = nvbo->vma.vm->spg_shift; | ||
723 | |||
724 | ret = nouveau_vm_get(chan->vm, | ||
725 | old_mem->num_pages << PAGE_SHIFT, | ||
726 | page_shift, NV_MEM_ACCESS_RO, | ||
727 | &node->tmp_vma); | ||
728 | if (ret) | ||
729 | goto out; | ||
730 | } | ||
731 | 705 | ||
732 | if (old_mem->mem_type == TTM_PL_VRAM) | 706 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
733 | nouveau_vm_map(&node->tmp_vma, node); | 707 | if (ret) |
734 | else { | 708 | goto out; |
735 | nouveau_vm_map_sg(&node->tmp_vma, 0, | 709 | |
736 | old_mem->num_pages << PAGE_SHIFT, | 710 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); |
737 | node, node->pages); | 711 | if (ret) |
738 | } | 712 | goto out; |
739 | } | 713 | } |
740 | 714 | ||
741 | if (dev_priv->card_type < NV_50) | 715 | if (dev_priv->card_type < NV_50) |
@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
762 | bool no_wait_reserve, bool no_wait_gpu, | 736 | bool no_wait_reserve, bool no_wait_gpu, |
763 | struct ttm_mem_reg *new_mem) | 737 | struct ttm_mem_reg *new_mem) |
764 | { | 738 | { |
765 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
766 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | 739 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
767 | struct ttm_placement placement; | 740 | struct ttm_placement placement; |
768 | struct ttm_mem_reg tmp_mem; | 741 | struct ttm_mem_reg tmp_mem; |
@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
782 | if (ret) | 755 | if (ret) |
783 | goto out; | 756 | goto out; |
784 | 757 | ||
785 | if (dev_priv->card_type >= NV_50) { | ||
786 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
787 | struct nouveau_mem *node = tmp_mem.mm_node; | ||
788 | struct nouveau_vma *vma = &nvbo->vma; | ||
789 | if (vma->node->type != vma->vm->spg_shift) | ||
790 | vma = &node->tmp_vma; | ||
791 | nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT, | ||
792 | node, node->pages); | ||
793 | } | ||
794 | |||
795 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); | 758 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
796 | |||
797 | if (dev_priv->card_type >= NV_50) { | ||
798 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
799 | nouveau_vm_unmap(&nvbo->vma); | ||
800 | } | ||
801 | |||
802 | if (ret) | 759 | if (ret) |
803 | goto out; | 760 | goto out; |
804 | 761 | ||
@@ -844,30 +801,22 @@ out: | |||
844 | static void | 801 | static void |
845 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | 802 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) |
846 | { | 803 | { |
847 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
848 | struct nouveau_mem *node = new_mem->mm_node; | 804 | struct nouveau_mem *node = new_mem->mm_node; |
849 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 805 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
850 | struct nouveau_vma *vma = &nvbo->vma; | 806 | struct nouveau_vma *vma; |
851 | struct nouveau_vm *vm = vma->vm; | 807 | |
852 | 808 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
853 | if (dev_priv->card_type < NV_50) | 809 | if (new_mem->mem_type == TTM_PL_VRAM) { |
854 | return; | 810 | nouveau_vm_map(vma, new_mem->mm_node); |
855 | 811 | } else | |
856 | switch (new_mem->mem_type) { | 812 | if (new_mem->mem_type == TTM_PL_TT && |
857 | case TTM_PL_VRAM: | 813 | nvbo->page_shift == vma->vm->spg_shift) { |
858 | nouveau_vm_map(vma, node); | 814 | nouveau_vm_map_sg(vma, 0, new_mem-> |
859 | break; | 815 | num_pages << PAGE_SHIFT, |
860 | case TTM_PL_TT: | 816 | node, node->pages); |
861 | if (vma->node->type != vm->spg_shift) { | 817 | } else { |
862 | nouveau_vm_unmap(vma); | 818 | nouveau_vm_unmap(vma); |
863 | vma = &node->tmp_vma; | ||
864 | } | 819 | } |
865 | nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT, | ||
866 | node, node->pages); | ||
867 | break; | ||
868 | default: | ||
869 | nouveau_vm_unmap(&nvbo->vma); | ||
870 | break; | ||
871 | } | 820 | } |
872 | } | 821 | } |
873 | 822 | ||
@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = { | |||
1113 | .io_mem_free = &nouveau_ttm_io_mem_free, | 1062 | .io_mem_free = &nouveau_ttm_io_mem_free, |
1114 | }; | 1063 | }; |
1115 | 1064 | ||
1065 | struct nouveau_vma * | ||
1066 | nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) | ||
1067 | { | ||
1068 | struct nouveau_vma *vma; | ||
1069 | list_for_each_entry(vma, &nvbo->vma_list, head) { | ||
1070 | if (vma->vm == vm) | ||
1071 | return vma; | ||
1072 | } | ||
1073 | |||
1074 | return NULL; | ||
1075 | } | ||
1076 | |||
1077 | int | ||
1078 | nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | ||
1079 | struct nouveau_vma *vma) | ||
1080 | { | ||
1081 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | ||
1082 | struct nouveau_mem *node = nvbo->bo.mem.mm_node; | ||
1083 | int ret; | ||
1084 | |||
1085 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, | ||
1086 | NV_MEM_ACCESS_RW, vma); | ||
1087 | if (ret) | ||
1088 | return ret; | ||
1089 | |||
1090 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | ||
1091 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | ||
1092 | else | ||
1093 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | ||
1094 | nouveau_vm_map_sg(vma, 0, size, node, node->pages); | ||
1095 | |||
1096 | list_add_tail(&vma->head, &nvbo->vma_list); | ||
1097 | vma->refcount = 1; | ||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | void | ||
1102 | nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | ||
1103 | { | ||
1104 | if (vma->node) { | ||
1105 | if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { | ||
1106 | spin_lock(&nvbo->bo.bdev->fence_lock); | ||
1107 | ttm_bo_wait(&nvbo->bo, false, false, false); | ||
1108 | spin_unlock(&nvbo->bo.bdev->fence_lock); | ||
1109 | nouveau_vm_unmap(vma); | ||
1110 | } | ||
1111 | |||
1112 | nouveau_vm_put(vma); | ||
1113 | list_del(&vma->head); | ||
1114 | } | ||
1115 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a7583a8ddb0..0e3241c39b8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -27,40 +27,63 @@ | |||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_drm.h" | 28 | #include "nouveau_drm.h" |
29 | #include "nouveau_dma.h" | 29 | #include "nouveau_dma.h" |
30 | #include "nouveau_ramht.h" | ||
30 | 31 | ||
31 | static int | 32 | static int |
32 | nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | 33 | nouveau_channel_pushbuf_init(struct nouveau_channel *chan) |
33 | { | 34 | { |
35 | u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT; | ||
34 | struct drm_device *dev = chan->dev; | 36 | struct drm_device *dev = chan->dev; |
35 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 37 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
36 | struct nouveau_bo *pb = chan->pushbuf_bo; | 38 | int ret; |
37 | struct nouveau_gpuobj *pushbuf = NULL; | 39 | |
38 | int ret = 0; | 40 | /* allocate buffer object */ |
41 | ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo); | ||
42 | if (ret) | ||
43 | goto out; | ||
44 | |||
45 | ret = nouveau_bo_pin(chan->pushbuf_bo, mem); | ||
46 | if (ret) | ||
47 | goto out; | ||
48 | |||
49 | ret = nouveau_bo_map(chan->pushbuf_bo); | ||
50 | if (ret) | ||
51 | goto out; | ||
39 | 52 | ||
53 | /* create DMA object covering the entire memtype where the push | ||
54 | * buffer resides, userspace can submit its own push buffers from | ||
55 | * anywhere within the same memtype. | ||
56 | */ | ||
57 | chan->pushbuf_base = chan->pushbuf_bo->bo.offset; | ||
40 | if (dev_priv->card_type >= NV_50) { | 58 | if (dev_priv->card_type >= NV_50) { |
59 | ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm, | ||
60 | &chan->pushbuf_vma); | ||
61 | if (ret) | ||
62 | goto out; | ||
63 | |||
41 | if (dev_priv->card_type < NV_C0) { | 64 | if (dev_priv->card_type < NV_C0) { |
42 | ret = nouveau_gpuobj_dma_new(chan, | 65 | ret = nouveau_gpuobj_dma_new(chan, |
43 | NV_CLASS_DMA_IN_MEMORY, 0, | 66 | NV_CLASS_DMA_IN_MEMORY, 0, |
44 | (1ULL << 40), | 67 | (1ULL << 40), |
45 | NV_MEM_ACCESS_RO, | 68 | NV_MEM_ACCESS_RO, |
46 | NV_MEM_TARGET_VM, | 69 | NV_MEM_TARGET_VM, |
47 | &pushbuf); | 70 | &chan->pushbuf); |
48 | } | 71 | } |
49 | chan->pushbuf_base = pb->bo.offset; | 72 | chan->pushbuf_base = chan->pushbuf_vma.offset; |
50 | } else | 73 | } else |
51 | if (pb->bo.mem.mem_type == TTM_PL_TT) { | 74 | if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { |
52 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 75 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
53 | dev_priv->gart_info.aper_size, | 76 | dev_priv->gart_info.aper_size, |
54 | NV_MEM_ACCESS_RO, | 77 | NV_MEM_ACCESS_RO, |
55 | NV_MEM_TARGET_GART, &pushbuf); | 78 | NV_MEM_TARGET_GART, |
56 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 79 | &chan->pushbuf); |
57 | } else | 80 | } else |
58 | if (dev_priv->card_type != NV_04) { | 81 | if (dev_priv->card_type != NV_04) { |
59 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 82 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
60 | dev_priv->fb_available_size, | 83 | dev_priv->fb_available_size, |
61 | NV_MEM_ACCESS_RO, | 84 | NV_MEM_ACCESS_RO, |
62 | NV_MEM_TARGET_VRAM, &pushbuf); | 85 | NV_MEM_TARGET_VRAM, |
63 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 86 | &chan->pushbuf); |
64 | } else { | 87 | } else { |
65 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | 88 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's |
66 | * exact reason for existing :) PCI access to cmdbuf in | 89 | * exact reason for existing :) PCI access to cmdbuf in |
@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
70 | pci_resource_start(dev->pdev, 1), | 93 | pci_resource_start(dev->pdev, 1), |
71 | dev_priv->fb_available_size, | 94 | dev_priv->fb_available_size, |
72 | NV_MEM_ACCESS_RO, | 95 | NV_MEM_ACCESS_RO, |
73 | NV_MEM_TARGET_PCI, &pushbuf); | 96 | NV_MEM_TARGET_PCI, |
74 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 97 | &chan->pushbuf); |
75 | } | 98 | } |
76 | 99 | ||
77 | nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); | 100 | out: |
78 | nouveau_gpuobj_ref(NULL, &pushbuf); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | static struct nouveau_bo * | ||
83 | nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | ||
84 | { | ||
85 | struct nouveau_bo *pushbuf = NULL; | ||
86 | int location, ret; | ||
87 | |||
88 | if (nouveau_vram_pushbuf) | ||
89 | location = TTM_PL_FLAG_VRAM; | ||
90 | else | ||
91 | location = TTM_PL_FLAG_TT; | ||
92 | |||
93 | ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf); | ||
94 | if (ret) { | ||
95 | NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); | ||
96 | return NULL; | ||
97 | } | ||
98 | |||
99 | ret = nouveau_bo_pin(pushbuf, location); | ||
100 | if (ret) { | ||
101 | NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); | ||
102 | nouveau_bo_ref(NULL, &pushbuf); | ||
103 | return NULL; | ||
104 | } | ||
105 | |||
106 | ret = nouveau_bo_map(pushbuf); | ||
107 | if (ret) { | 101 | if (ret) { |
108 | nouveau_bo_unpin(pushbuf); | 102 | NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); |
109 | nouveau_bo_ref(NULL, &pushbuf); | 103 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); |
110 | return NULL; | 104 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
105 | if (chan->pushbuf_bo) { | ||
106 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
107 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
108 | } | ||
111 | } | 109 | } |
112 | 110 | ||
113 | return pushbuf; | 111 | return 0; |
114 | } | 112 | } |
115 | 113 | ||
116 | /* allocates and initializes a fifo for user space consumption */ | 114 | /* allocates and initializes a fifo for user space consumption */ |
@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
121 | { | 119 | { |
122 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 120 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
123 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 121 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
122 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
124 | struct nouveau_channel *chan; | 123 | struct nouveau_channel *chan; |
125 | unsigned long flags; | 124 | unsigned long flags; |
126 | int ret; | 125 | int ret; |
@@ -159,20 +158,16 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
159 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
160 | INIT_LIST_HEAD(&chan->nvsw.flip); | 159 | INIT_LIST_HEAD(&chan->nvsw.flip); |
161 | INIT_LIST_HEAD(&chan->fence.pending); | 160 | INIT_LIST_HEAD(&chan->fence.pending); |
161 | spin_lock_init(&chan->fence.lock); | ||
162 | 162 | ||
163 | /* Allocate DMA push buffer */ | 163 | /* setup channel's memory and vm */ |
164 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); | 164 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
165 | if (!chan->pushbuf_bo) { | 165 | if (ret) { |
166 | ret = -ENOMEM; | 166 | NV_ERROR(dev, "gpuobj %d\n", ret); |
167 | NV_ERROR(dev, "pushbuf %d\n", ret); | ||
168 | nouveau_channel_put(&chan); | 167 | nouveau_channel_put(&chan); |
169 | return ret; | 168 | return ret; |
170 | } | 169 | } |
171 | 170 | ||
172 | nouveau_dma_pre_init(chan); | ||
173 | chan->user_put = 0x40; | ||
174 | chan->user_get = 0x44; | ||
175 | |||
176 | /* Allocate space for per-channel fixed notifier memory */ | 171 | /* Allocate space for per-channel fixed notifier memory */ |
177 | ret = nouveau_notifier_init_channel(chan); | 172 | ret = nouveau_notifier_init_channel(chan); |
178 | if (ret) { | 173 | if (ret) { |
@@ -181,21 +176,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
181 | return ret; | 176 | return ret; |
182 | } | 177 | } |
183 | 178 | ||
184 | /* Setup channel's default objects */ | 179 | /* Allocate DMA push buffer */ |
185 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | 180 | ret = nouveau_channel_pushbuf_init(chan); |
186 | if (ret) { | 181 | if (ret) { |
187 | NV_ERROR(dev, "gpuobj %d\n", ret); | 182 | NV_ERROR(dev, "pushbuf %d\n", ret); |
188 | nouveau_channel_put(&chan); | 183 | nouveau_channel_put(&chan); |
189 | return ret; | 184 | return ret; |
190 | } | 185 | } |
191 | 186 | ||
192 | /* Create a dma object for the push buffer */ | 187 | nouveau_dma_pre_init(chan); |
193 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); | 188 | chan->user_put = 0x40; |
194 | if (ret) { | 189 | chan->user_get = 0x44; |
195 | NV_ERROR(dev, "pbctxdma %d\n", ret); | ||
196 | nouveau_channel_put(&chan); | ||
197 | return ret; | ||
198 | } | ||
199 | 190 | ||
200 | /* disable the fifo caches */ | 191 | /* disable the fifo caches */ |
201 | pfifo->reassign(dev, false); | 192 | pfifo->reassign(dev, false); |
@@ -220,6 +211,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
220 | nouveau_debugfs_channel_init(chan); | 211 | nouveau_debugfs_channel_init(chan); |
221 | 212 | ||
222 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); | 213 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
214 | if (fpriv) { | ||
215 | spin_lock(&fpriv->lock); | ||
216 | list_add(&chan->list, &fpriv->channels); | ||
217 | spin_unlock(&fpriv->lock); | ||
218 | } | ||
223 | *chan_ret = chan; | 219 | *chan_ret = chan; |
224 | return 0; | 220 | return 0; |
225 | } | 221 | } |
@@ -236,29 +232,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref) | |||
236 | } | 232 | } |
237 | 233 | ||
238 | struct nouveau_channel * | 234 | struct nouveau_channel * |
239 | nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) | 235 | nouveau_channel_get(struct drm_file *file_priv, int id) |
240 | { | 236 | { |
241 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 237 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
242 | struct nouveau_channel *chan; | 238 | struct nouveau_channel *chan; |
243 | unsigned long flags; | ||
244 | |||
245 | if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR)) | ||
246 | return ERR_PTR(-EINVAL); | ||
247 | |||
248 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
249 | chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); | ||
250 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
251 | |||
252 | if (unlikely(!chan)) | ||
253 | return ERR_PTR(-EINVAL); | ||
254 | 239 | ||
255 | if (unlikely(file_priv && chan->file_priv != file_priv)) { | 240 | spin_lock(&fpriv->lock); |
256 | nouveau_channel_put_unlocked(&chan); | 241 | list_for_each_entry(chan, &fpriv->channels, list) { |
257 | return ERR_PTR(-EINVAL); | 242 | if (chan->id == id) { |
243 | chan = nouveau_channel_get_unlocked(chan); | ||
244 | spin_unlock(&fpriv->lock); | ||
245 | mutex_lock(&chan->mutex); | ||
246 | return chan; | ||
247 | } | ||
258 | } | 248 | } |
249 | spin_unlock(&fpriv->lock); | ||
259 | 250 | ||
260 | mutex_lock(&chan->mutex); | 251 | return ERR_PTR(-EINVAL); |
261 | return chan; | ||
262 | } | 252 | } |
263 | 253 | ||
264 | void | 254 | void |
@@ -312,12 +302,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | |||
312 | /* destroy any resources the channel owned */ | 302 | /* destroy any resources the channel owned */ |
313 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | 303 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
314 | if (chan->pushbuf_bo) { | 304 | if (chan->pushbuf_bo) { |
305 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); | ||
315 | nouveau_bo_unmap(chan->pushbuf_bo); | 306 | nouveau_bo_unmap(chan->pushbuf_bo); |
316 | nouveau_bo_unpin(chan->pushbuf_bo); | 307 | nouveau_bo_unpin(chan->pushbuf_bo); |
317 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | 308 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); |
318 | } | 309 | } |
319 | nouveau_gpuobj_channel_takedown(chan); | 310 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
320 | nouveau_notifier_takedown_channel(chan); | 311 | nouveau_notifier_takedown_channel(chan); |
312 | nouveau_gpuobj_channel_takedown(chan); | ||
321 | 313 | ||
322 | nouveau_channel_ref(NULL, pchan); | 314 | nouveau_channel_ref(NULL, pchan); |
323 | } | 315 | } |
@@ -383,10 +375,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | |||
383 | 375 | ||
384 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | 376 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
385 | for (i = 0; i < engine->fifo.channels; i++) { | 377 | for (i = 0; i < engine->fifo.channels; i++) { |
386 | chan = nouveau_channel_get(dev, file_priv, i); | 378 | chan = nouveau_channel_get(file_priv, i); |
387 | if (IS_ERR(chan)) | 379 | if (IS_ERR(chan)) |
388 | continue; | 380 | continue; |
389 | 381 | ||
382 | list_del(&chan->list); | ||
390 | atomic_dec(&chan->users); | 383 | atomic_dec(&chan->users); |
391 | nouveau_channel_put(&chan); | 384 | nouveau_channel_put(&chan); |
392 | } | 385 | } |
@@ -459,10 +452,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |||
459 | struct drm_nouveau_channel_free *req = data; | 452 | struct drm_nouveau_channel_free *req = data; |
460 | struct nouveau_channel *chan; | 453 | struct nouveau_channel *chan; |
461 | 454 | ||
462 | chan = nouveau_channel_get(dev, file_priv, req->channel); | 455 | chan = nouveau_channel_get(file_priv, req->channel); |
463 | if (IS_ERR(chan)) | 456 | if (IS_ERR(chan)) |
464 | return PTR_ERR(chan); | 457 | return PTR_ERR(chan); |
465 | 458 | ||
459 | list_del(&chan->list); | ||
466 | atomic_dec(&chan->users); | 460 | atomic_dec(&chan->users); |
467 | nouveau_channel_put(&chan); | 461 | nouveau_channel_put(&chan); |
468 | return 0; | 462 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 1595d0b6e81..939d4df0777 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -40,7 +40,7 @@ | |||
40 | static void nouveau_connector_hotplug(void *, int); | 40 | static void nouveau_connector_hotplug(void *, int); |
41 | 41 | ||
42 | static struct nouveau_encoder * | 42 | static struct nouveau_encoder * |
43 | find_encoder_by_type(struct drm_connector *connector, int type) | 43 | find_encoder(struct drm_connector *connector, int type) |
44 | { | 44 | { |
45 | struct drm_device *dev = connector->dev; | 45 | struct drm_device *dev = connector->dev; |
46 | struct nouveau_encoder *nv_encoder; | 46 | struct nouveau_encoder *nv_encoder; |
@@ -170,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector) | |||
170 | struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); | 170 | struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); |
171 | 171 | ||
172 | if (!dn || | 172 | if (!dn || |
173 | !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) || | 173 | !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) || |
174 | (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG)))) | 174 | (nv_encoder = find_encoder(connector, OUTPUT_ANALOG)))) |
175 | return NULL; | 175 | return NULL; |
176 | 176 | ||
177 | for_each_child_of_node(dn, cn) { | 177 | for_each_child_of_node(dn, cn) { |
@@ -233,6 +233,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
233 | struct drm_device *dev = connector->dev; | 233 | struct drm_device *dev = connector->dev; |
234 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 234 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
235 | struct nouveau_encoder *nv_encoder = NULL; | 235 | struct nouveau_encoder *nv_encoder = NULL; |
236 | struct nouveau_encoder *nv_partner; | ||
236 | struct nouveau_i2c_chan *i2c; | 237 | struct nouveau_i2c_chan *i2c; |
237 | int type; | 238 | int type; |
238 | 239 | ||
@@ -266,19 +267,22 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
266 | * same i2c channel so the value returned from ddc_detect | 267 | * same i2c channel so the value returned from ddc_detect |
267 | * isn't necessarily correct. | 268 | * isn't necessarily correct. |
268 | */ | 269 | */ |
269 | if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { | 270 | nv_partner = NULL; |
271 | if (nv_encoder->dcb->type == OUTPUT_TMDS) | ||
272 | nv_partner = find_encoder(connector, OUTPUT_ANALOG); | ||
273 | if (nv_encoder->dcb->type == OUTPUT_ANALOG) | ||
274 | nv_partner = find_encoder(connector, OUTPUT_TMDS); | ||
275 | |||
276 | if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG && | ||
277 | nv_partner->dcb->type == OUTPUT_TMDS) || | ||
278 | (nv_encoder->dcb->type == OUTPUT_TMDS && | ||
279 | nv_partner->dcb->type == OUTPUT_ANALOG))) { | ||
270 | if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) | 280 | if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) |
271 | type = OUTPUT_TMDS; | 281 | type = OUTPUT_TMDS; |
272 | else | 282 | else |
273 | type = OUTPUT_ANALOG; | 283 | type = OUTPUT_ANALOG; |
274 | 284 | ||
275 | nv_encoder = find_encoder_by_type(connector, type); | 285 | nv_encoder = find_encoder(connector, type); |
276 | if (!nv_encoder) { | ||
277 | NV_ERROR(dev, "Detected %d encoder on %s, " | ||
278 | "but no object!\n", type, | ||
279 | drm_get_connector_name(connector)); | ||
280 | return connector_status_disconnected; | ||
281 | } | ||
282 | } | 286 | } |
283 | 287 | ||
284 | nouveau_connector_set_encoder(connector, nv_encoder); | 288 | nouveau_connector_set_encoder(connector, nv_encoder); |
@@ -292,9 +296,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
292 | } | 296 | } |
293 | 297 | ||
294 | detect_analog: | 298 | detect_analog: |
295 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 299 | nv_encoder = find_encoder(connector, OUTPUT_ANALOG); |
296 | if (!nv_encoder && !nouveau_tv_disable) | 300 | if (!nv_encoder && !nouveau_tv_disable) |
297 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 301 | nv_encoder = find_encoder(connector, OUTPUT_TV); |
298 | if (nv_encoder && force) { | 302 | if (nv_encoder && force) { |
299 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); | 303 | struct drm_encoder *encoder = to_drm_encoder(nv_encoder); |
300 | struct drm_encoder_helper_funcs *helper = | 304 | struct drm_encoder_helper_funcs *helper = |
@@ -327,7 +331,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) | |||
327 | nv_connector->edid = NULL; | 331 | nv_connector->edid = NULL; |
328 | } | 332 | } |
329 | 333 | ||
330 | nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); | 334 | nv_encoder = find_encoder(connector, OUTPUT_LVDS); |
331 | if (!nv_encoder) | 335 | if (!nv_encoder) |
332 | return connector_status_disconnected; | 336 | return connector_status_disconnected; |
333 | 337 | ||
@@ -405,7 +409,7 @@ nouveau_connector_force(struct drm_connector *connector) | |||
405 | } else | 409 | } else |
406 | type = OUTPUT_ANY; | 410 | type = OUTPUT_ANY; |
407 | 411 | ||
408 | nv_encoder = find_encoder_by_type(connector, type); | 412 | nv_encoder = find_encoder(connector, type); |
409 | if (!nv_encoder) { | 413 | if (!nv_encoder) { |
410 | NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", | 414 | NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", |
411 | drm_get_connector_name(connector)); | 415 | drm_get_connector_name(connector)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 568caedd721..00bc6eaad55 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | |||
167 | int delta, int length) | 167 | int delta, int length) |
168 | { | 168 | { |
169 | struct nouveau_bo *pb = chan->pushbuf_bo; | 169 | struct nouveau_bo *pb = chan->pushbuf_bo; |
170 | uint64_t offset = bo->bo.offset + delta; | 170 | struct nouveau_vma *vma; |
171 | int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; | 171 | int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; |
172 | u64 offset; | ||
173 | |||
174 | vma = nouveau_bo_vma_find(bo, chan->vm); | ||
175 | BUG_ON(!vma); | ||
176 | offset = vma->offset + delta; | ||
172 | 177 | ||
173 | BUG_ON(chan->dma.ib_free < 1); | 178 | BUG_ON(chan->dma.ib_free < 1); |
174 | nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); | 179 | nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 02c6f37d8bd..b30ddd8d2e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0; | |||
73 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | 73 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); |
74 | 74 | ||
75 | MODULE_PARM_DESC(noaccel, "Disable all acceleration"); | 75 | MODULE_PARM_DESC(noaccel, "Disable all acceleration"); |
76 | int nouveau_noaccel = 0; | 76 | int nouveau_noaccel = -1; |
77 | module_param_named(noaccel, nouveau_noaccel, int, 0400); | 77 | module_param_named(noaccel, nouveau_noaccel, int, 0400); |
78 | 78 | ||
79 | MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); | 79 | MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); |
@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); | |||
119 | int nouveau_msi; | 119 | int nouveau_msi; |
120 | module_param_named(msi, nouveau_msi, int, 0400); | 120 | module_param_named(msi, nouveau_msi, int, 0400); |
121 | 121 | ||
122 | MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n"); | ||
123 | int nouveau_ctxfw; | ||
124 | module_param_named(ctxfw, nouveau_ctxfw, int, 0400); | ||
125 | |||
122 | int nouveau_fbpercrtc; | 126 | int nouveau_fbpercrtc; |
123 | #if 0 | 127 | #if 0 |
124 | module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); | 128 | module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); |
@@ -210,10 +214,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
210 | pfifo->unload_context(dev); | 214 | pfifo->unload_context(dev); |
211 | 215 | ||
212 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | 216 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { |
213 | if (dev_priv->eng[e]) { | 217 | if (!dev_priv->eng[e]) |
214 | ret = dev_priv->eng[e]->fini(dev, e); | 218 | continue; |
215 | if (ret) | 219 | |
216 | goto out_abort; | 220 | ret = dev_priv->eng[e]->fini(dev, e, true); |
221 | if (ret) { | ||
222 | NV_ERROR(dev, "... engine %d failed: %d\n", i, ret); | ||
223 | goto out_abort; | ||
217 | } | 224 | } |
218 | } | 225 | } |
219 | 226 | ||
@@ -354,7 +361,7 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
354 | 361 | ||
355 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 362 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
356 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 363 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
357 | u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; | 364 | u32 offset = nv_crtc->cursor.nvbo->bo.offset; |
358 | 365 | ||
359 | nv_crtc->cursor.set_offset(nv_crtc, offset); | 366 | nv_crtc->cursor.set_offset(nv_crtc, offset); |
360 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | 367 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, |
@@ -389,7 +396,9 @@ static struct drm_driver driver = { | |||
389 | .firstopen = nouveau_firstopen, | 396 | .firstopen = nouveau_firstopen, |
390 | .lastclose = nouveau_lastclose, | 397 | .lastclose = nouveau_lastclose, |
391 | .unload = nouveau_unload, | 398 | .unload = nouveau_unload, |
399 | .open = nouveau_open, | ||
392 | .preclose = nouveau_preclose, | 400 | .preclose = nouveau_preclose, |
401 | .postclose = nouveau_postclose, | ||
393 | #if defined(CONFIG_DRM_NOUVEAU_DEBUG) | 402 | #if defined(CONFIG_DRM_NOUVEAU_DEBUG) |
394 | .debugfs_init = nouveau_debugfs_init, | 403 | .debugfs_init = nouveau_debugfs_init, |
395 | .debugfs_cleanup = nouveau_debugfs_takedown, | 404 | .debugfs_cleanup = nouveau_debugfs_takedown, |
@@ -420,6 +429,8 @@ static struct drm_driver driver = { | |||
420 | 429 | ||
421 | .gem_init_object = nouveau_gem_object_new, | 430 | .gem_init_object = nouveau_gem_object_new, |
422 | .gem_free_object = nouveau_gem_object_del, | 431 | .gem_free_object = nouveau_gem_object_del, |
432 | .gem_open_object = nouveau_gem_object_open, | ||
433 | .gem_close_object = nouveau_gem_object_close, | ||
423 | 434 | ||
424 | .name = DRIVER_NAME, | 435 | .name = DRIVER_NAME, |
425 | .desc = DRIVER_DESC, | 436 | .desc = DRIVER_DESC, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9c56331941e..d7d51deb34b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -46,9 +46,17 @@ | |||
46 | #include "ttm/ttm_module.h" | 46 | #include "ttm/ttm_module.h" |
47 | 47 | ||
48 | struct nouveau_fpriv { | 48 | struct nouveau_fpriv { |
49 | struct ttm_object_file *tfile; | 49 | spinlock_t lock; |
50 | struct list_head channels; | ||
51 | struct nouveau_vm *vm; | ||
50 | }; | 52 | }; |
51 | 53 | ||
54 | static inline struct nouveau_fpriv * | ||
55 | nouveau_fpriv(struct drm_file *file_priv) | ||
56 | { | ||
57 | return file_priv ? file_priv->driver_priv : NULL; | ||
58 | } | ||
59 | |||
52 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | 60 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
53 | 61 | ||
54 | #include "nouveau_drm.h" | 62 | #include "nouveau_drm.h" |
@@ -69,7 +77,7 @@ struct nouveau_mem { | |||
69 | struct drm_device *dev; | 77 | struct drm_device *dev; |
70 | 78 | ||
71 | struct nouveau_vma bar_vma; | 79 | struct nouveau_vma bar_vma; |
72 | struct nouveau_vma tmp_vma; | 80 | struct nouveau_vma vma[2]; |
73 | u8 page_shift; | 81 | u8 page_shift; |
74 | 82 | ||
75 | struct drm_mm_node *tag; | 83 | struct drm_mm_node *tag; |
@@ -107,7 +115,8 @@ struct nouveau_bo { | |||
107 | 115 | ||
108 | struct nouveau_channel *channel; | 116 | struct nouveau_channel *channel; |
109 | 117 | ||
110 | struct nouveau_vma vma; | 118 | struct list_head vma_list; |
119 | unsigned page_shift; | ||
111 | 120 | ||
112 | uint32_t tile_mode; | 121 | uint32_t tile_mode; |
113 | uint32_t tile_flags; | 122 | uint32_t tile_flags; |
@@ -176,9 +185,10 @@ struct nouveau_gpuobj { | |||
176 | uint32_t flags; | 185 | uint32_t flags; |
177 | 186 | ||
178 | u32 size; | 187 | u32 size; |
179 | u32 pinst; | 188 | u32 pinst; /* PRAMIN BAR offset */ |
180 | u32 cinst; | 189 | u32 cinst; /* Channel offset */ |
181 | u64 vinst; | 190 | u64 vinst; /* VRAM address */ |
191 | u64 linst; /* VM address */ | ||
182 | 192 | ||
183 | uint32_t engine; | 193 | uint32_t engine; |
184 | uint32_t class; | 194 | uint32_t class; |
@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class { | |||
201 | 211 | ||
202 | struct nouveau_channel { | 212 | struct nouveau_channel { |
203 | struct drm_device *dev; | 213 | struct drm_device *dev; |
214 | struct list_head list; | ||
204 | int id; | 215 | int id; |
205 | 216 | ||
206 | /* references to the channel data structure */ | 217 | /* references to the channel data structure */ |
@@ -228,15 +239,18 @@ struct nouveau_channel { | |||
228 | uint32_t sequence; | 239 | uint32_t sequence; |
229 | uint32_t sequence_ack; | 240 | uint32_t sequence_ack; |
230 | atomic_t last_sequence_irq; | 241 | atomic_t last_sequence_irq; |
242 | struct nouveau_vma vma; | ||
231 | } fence; | 243 | } fence; |
232 | 244 | ||
233 | /* DMA push buffer */ | 245 | /* DMA push buffer */ |
234 | struct nouveau_gpuobj *pushbuf; | 246 | struct nouveau_gpuobj *pushbuf; |
235 | struct nouveau_bo *pushbuf_bo; | 247 | struct nouveau_bo *pushbuf_bo; |
248 | struct nouveau_vma pushbuf_vma; | ||
236 | uint32_t pushbuf_base; | 249 | uint32_t pushbuf_base; |
237 | 250 | ||
238 | /* Notifier memory */ | 251 | /* Notifier memory */ |
239 | struct nouveau_bo *notifier_bo; | 252 | struct nouveau_bo *notifier_bo; |
253 | struct nouveau_vma notifier_vma; | ||
240 | struct drm_mm notifier_heap; | 254 | struct drm_mm notifier_heap; |
241 | 255 | ||
242 | /* PFIFO context */ | 256 | /* PFIFO context */ |
@@ -278,6 +292,7 @@ struct nouveau_channel { | |||
278 | 292 | ||
279 | uint32_t sw_subchannel[8]; | 293 | uint32_t sw_subchannel[8]; |
280 | 294 | ||
295 | struct nouveau_vma dispc_vma[2]; | ||
281 | struct { | 296 | struct { |
282 | struct nouveau_gpuobj *vblsem; | 297 | struct nouveau_gpuobj *vblsem; |
283 | uint32_t vblsem_head; | 298 | uint32_t vblsem_head; |
@@ -297,7 +312,7 @@ struct nouveau_channel { | |||
297 | struct nouveau_exec_engine { | 312 | struct nouveau_exec_engine { |
298 | void (*destroy)(struct drm_device *, int engine); | 313 | void (*destroy)(struct drm_device *, int engine); |
299 | int (*init)(struct drm_device *, int engine); | 314 | int (*init)(struct drm_device *, int engine); |
300 | int (*fini)(struct drm_device *, int engine); | 315 | int (*fini)(struct drm_device *, int engine, bool suspend); |
301 | int (*context_new)(struct nouveau_channel *, int engine); | 316 | int (*context_new)(struct nouveau_channel *, int engine); |
302 | void (*context_del)(struct nouveau_channel *, int engine); | 317 | void (*context_del)(struct nouveau_channel *, int engine); |
303 | int (*object_new)(struct nouveau_channel *, int engine, | 318 | int (*object_new)(struct nouveau_channel *, int engine, |
@@ -314,7 +329,8 @@ struct nouveau_instmem_engine { | |||
314 | int (*suspend)(struct drm_device *dev); | 329 | int (*suspend)(struct drm_device *dev); |
315 | void (*resume)(struct drm_device *dev); | 330 | void (*resume)(struct drm_device *dev); |
316 | 331 | ||
317 | int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); | 332 | int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *, |
333 | u32 size, u32 align); | ||
318 | void (*put)(struct nouveau_gpuobj *); | 334 | void (*put)(struct nouveau_gpuobj *); |
319 | int (*map)(struct nouveau_gpuobj *); | 335 | int (*map)(struct nouveau_gpuobj *); |
320 | void (*unmap)(struct nouveau_gpuobj *); | 336 | void (*unmap)(struct nouveau_gpuobj *); |
@@ -445,9 +461,9 @@ struct nouveau_pm_level { | |||
445 | struct nouveau_pm_temp_sensor_constants { | 461 | struct nouveau_pm_temp_sensor_constants { |
446 | u16 offset_constant; | 462 | u16 offset_constant; |
447 | s16 offset_mult; | 463 | s16 offset_mult; |
448 | u16 offset_div; | 464 | s16 offset_div; |
449 | u16 slope_mult; | 465 | s16 slope_mult; |
450 | u16 slope_div; | 466 | s16 slope_div; |
451 | }; | 467 | }; |
452 | 468 | ||
453 | struct nouveau_pm_threshold_temp { | 469 | struct nouveau_pm_threshold_temp { |
@@ -488,7 +504,10 @@ struct nouveau_pm_engine { | |||
488 | }; | 504 | }; |
489 | 505 | ||
490 | struct nouveau_vram_engine { | 506 | struct nouveau_vram_engine { |
507 | struct nouveau_mm *mm; | ||
508 | |||
491 | int (*init)(struct drm_device *); | 509 | int (*init)(struct drm_device *); |
510 | void (*takedown)(struct drm_device *dev); | ||
492 | int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, | 511 | int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, |
493 | u32 type, struct nouveau_mem **); | 512 | u32 type, struct nouveau_mem **); |
494 | void (*put)(struct drm_device *, struct nouveau_mem **); | 513 | void (*put)(struct drm_device *, struct nouveau_mem **); |
@@ -608,6 +627,7 @@ enum nouveau_card_type { | |||
608 | 627 | ||
609 | struct drm_nouveau_private { | 628 | struct drm_nouveau_private { |
610 | struct drm_device *dev; | 629 | struct drm_device *dev; |
630 | bool noaccel; | ||
611 | 631 | ||
612 | /* the card type, takes NV_* as values */ | 632 | /* the card type, takes NV_* as values */ |
613 | enum nouveau_card_type card_type; | 633 | enum nouveau_card_type card_type; |
@@ -700,7 +720,6 @@ struct drm_nouveau_private { | |||
700 | /* VRAM/fb configuration */ | 720 | /* VRAM/fb configuration */ |
701 | uint64_t vram_size; | 721 | uint64_t vram_size; |
702 | uint64_t vram_sys_base; | 722 | uint64_t vram_sys_base; |
703 | u32 vram_rblock_size; | ||
704 | 723 | ||
705 | uint64_t fb_phys; | 724 | uint64_t fb_phys; |
706 | uint64_t fb_available_size; | 725 | uint64_t fb_available_size; |
@@ -784,12 +803,15 @@ extern int nouveau_override_conntype; | |||
784 | extern char *nouveau_perflvl; | 803 | extern char *nouveau_perflvl; |
785 | extern int nouveau_perflvl_wr; | 804 | extern int nouveau_perflvl_wr; |
786 | extern int nouveau_msi; | 805 | extern int nouveau_msi; |
806 | extern int nouveau_ctxfw; | ||
787 | 807 | ||
788 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); | 808 | extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); |
789 | extern int nouveau_pci_resume(struct pci_dev *pdev); | 809 | extern int nouveau_pci_resume(struct pci_dev *pdev); |
790 | 810 | ||
791 | /* nouveau_state.c */ | 811 | /* nouveau_state.c */ |
812 | extern int nouveau_open(struct drm_device *, struct drm_file *); | ||
792 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 813 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
814 | extern void nouveau_postclose(struct drm_device *, struct drm_file *); | ||
793 | extern int nouveau_load(struct drm_device *, unsigned long flags); | 815 | extern int nouveau_load(struct drm_device *, unsigned long flags); |
794 | extern int nouveau_firstopen(struct drm_device *); | 816 | extern int nouveau_firstopen(struct drm_device *); |
795 | extern void nouveau_lastclose(struct drm_device *); | 817 | extern void nouveau_lastclose(struct drm_device *); |
@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev, | |||
847 | extern struct nouveau_channel * | 869 | extern struct nouveau_channel * |
848 | nouveau_channel_get_unlocked(struct nouveau_channel *); | 870 | nouveau_channel_get_unlocked(struct nouveau_channel *); |
849 | extern struct nouveau_channel * | 871 | extern struct nouveau_channel * |
850 | nouveau_channel_get(struct drm_device *, struct drm_file *, int id); | 872 | nouveau_channel_get(struct drm_file *, int id); |
851 | extern void nouveau_channel_put_unlocked(struct nouveau_channel **); | 873 | extern void nouveau_channel_put_unlocked(struct nouveau_channel **); |
852 | extern void nouveau_channel_put(struct nouveau_channel **); | 874 | extern void nouveau_channel_put(struct nouveau_channel **); |
853 | extern void nouveau_channel_ref(struct nouveau_channel *chan, | 875 | extern void nouveau_channel_ref(struct nouveau_channel *chan, |
@@ -1120,7 +1142,6 @@ extern int nvc0_fifo_unload_context(struct drm_device *); | |||
1120 | 1142 | ||
1121 | /* nv04_graph.c */ | 1143 | /* nv04_graph.c */ |
1122 | extern int nv04_graph_create(struct drm_device *); | 1144 | extern int nv04_graph_create(struct drm_device *); |
1123 | extern void nv04_graph_fifo_access(struct drm_device *, bool); | ||
1124 | extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); | 1145 | extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); |
1125 | extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, | 1146 | extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, |
1126 | u32 class, u32 mthd, u32 data); | 1147 | u32 class, u32 mthd, u32 data); |
@@ -1169,7 +1190,8 @@ extern int nv04_instmem_init(struct drm_device *); | |||
1169 | extern void nv04_instmem_takedown(struct drm_device *); | 1190 | extern void nv04_instmem_takedown(struct drm_device *); |
1170 | extern int nv04_instmem_suspend(struct drm_device *); | 1191 | extern int nv04_instmem_suspend(struct drm_device *); |
1171 | extern void nv04_instmem_resume(struct drm_device *); | 1192 | extern void nv04_instmem_resume(struct drm_device *); |
1172 | extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); | 1193 | extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, |
1194 | u32 size, u32 align); | ||
1173 | extern void nv04_instmem_put(struct nouveau_gpuobj *); | 1195 | extern void nv04_instmem_put(struct nouveau_gpuobj *); |
1174 | extern int nv04_instmem_map(struct nouveau_gpuobj *); | 1196 | extern int nv04_instmem_map(struct nouveau_gpuobj *); |
1175 | extern void nv04_instmem_unmap(struct nouveau_gpuobj *); | 1197 | extern void nv04_instmem_unmap(struct nouveau_gpuobj *); |
@@ -1180,7 +1202,8 @@ extern int nv50_instmem_init(struct drm_device *); | |||
1180 | extern void nv50_instmem_takedown(struct drm_device *); | 1202 | extern void nv50_instmem_takedown(struct drm_device *); |
1181 | extern int nv50_instmem_suspend(struct drm_device *); | 1203 | extern int nv50_instmem_suspend(struct drm_device *); |
1182 | extern void nv50_instmem_resume(struct drm_device *); | 1204 | extern void nv50_instmem_resume(struct drm_device *); |
1183 | extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); | 1205 | extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *, |
1206 | u32 size, u32 align); | ||
1184 | extern void nv50_instmem_put(struct nouveau_gpuobj *); | 1207 | extern void nv50_instmem_put(struct nouveau_gpuobj *); |
1185 | extern int nv50_instmem_map(struct nouveau_gpuobj *); | 1208 | extern int nv50_instmem_map(struct nouveau_gpuobj *); |
1186 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); | 1209 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); |
@@ -1247,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index); | |||
1247 | 1270 | ||
1248 | /* nouveau_bo.c */ | 1271 | /* nouveau_bo.c */ |
1249 | extern struct ttm_bo_driver nouveau_bo_driver; | 1272 | extern struct ttm_bo_driver nouveau_bo_driver; |
1250 | extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, | 1273 | extern int nouveau_bo_new(struct drm_device *, int size, int align, |
1251 | int size, int align, uint32_t flags, | 1274 | uint32_t flags, uint32_t tile_mode, |
1252 | uint32_t tile_mode, uint32_t tile_flags, | 1275 | uint32_t tile_flags, struct nouveau_bo **); |
1253 | struct nouveau_bo **); | ||
1254 | extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); | 1276 | extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); |
1255 | extern int nouveau_bo_unpin(struct nouveau_bo *); | 1277 | extern int nouveau_bo_unpin(struct nouveau_bo *); |
1256 | extern int nouveau_bo_map(struct nouveau_bo *); | 1278 | extern int nouveau_bo_map(struct nouveau_bo *); |
@@ -1265,6 +1287,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); | |||
1265 | extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, | 1287 | extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, |
1266 | bool no_wait_reserve, bool no_wait_gpu); | 1288 | bool no_wait_reserve, bool no_wait_gpu); |
1267 | 1289 | ||
1290 | extern struct nouveau_vma * | ||
1291 | nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); | ||
1292 | extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, | ||
1293 | struct nouveau_vma *); | ||
1294 | extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); | ||
1295 | |||
1268 | /* nouveau_fence.c */ | 1296 | /* nouveau_fence.c */ |
1269 | struct nouveau_fence; | 1297 | struct nouveau_fence; |
1270 | extern int nouveau_fence_init(struct drm_device *); | 1298 | extern int nouveau_fence_init(struct drm_device *); |
@@ -1310,12 +1338,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) | |||
1310 | } | 1338 | } |
1311 | 1339 | ||
1312 | /* nouveau_gem.c */ | 1340 | /* nouveau_gem.c */ |
1313 | extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, | 1341 | extern int nouveau_gem_new(struct drm_device *, int size, int align, |
1314 | int size, int align, uint32_t domain, | 1342 | uint32_t domain, uint32_t tile_mode, |
1315 | uint32_t tile_mode, uint32_t tile_flags, | 1343 | uint32_t tile_flags, struct nouveau_bo **); |
1316 | struct nouveau_bo **); | ||
1317 | extern int nouveau_gem_object_new(struct drm_gem_object *); | 1344 | extern int nouveau_gem_object_new(struct drm_gem_object *); |
1318 | extern void nouveau_gem_object_del(struct drm_gem_object *); | 1345 | extern void nouveau_gem_object_del(struct drm_gem_object *); |
1346 | extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); | ||
1347 | extern void nouveau_gem_object_close(struct drm_gem_object *, | ||
1348 | struct drm_file *); | ||
1319 | extern int nouveau_gem_ioctl_new(struct drm_device *, void *, | 1349 | extern int nouveau_gem_ioctl_new(struct drm_device *, void *, |
1320 | struct drm_file *); | 1350 | struct drm_file *); |
1321 | extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, | 1351 | extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h index a3a88ad00f8..95c843e684b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fb.h +++ b/drivers/gpu/drm/nouveau/nouveau_fb.h | |||
@@ -30,6 +30,7 @@ | |||
30 | struct nouveau_framebuffer { | 30 | struct nouveau_framebuffer { |
31 | struct drm_framebuffer base; | 31 | struct drm_framebuffer base; |
32 | struct nouveau_bo *nvbo; | 32 | struct nouveau_bo *nvbo; |
33 | struct nouveau_vma vma; | ||
33 | u32 r_dma; | 34 | u32 r_dma; |
34 | u32 r_format; | 35 | u32 r_format; |
35 | u32 r_pitch; | 36 | u32 r_pitch; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 39aee6d4daf..14a8627efe4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
279 | struct fb_info *info; | 279 | struct fb_info *info; |
280 | struct drm_framebuffer *fb; | 280 | struct drm_framebuffer *fb; |
281 | struct nouveau_framebuffer *nouveau_fb; | 281 | struct nouveau_framebuffer *nouveau_fb; |
282 | struct nouveau_channel *chan; | ||
282 | struct nouveau_bo *nvbo; | 283 | struct nouveau_bo *nvbo; |
283 | struct drm_mode_fb_cmd mode_cmd; | 284 | struct drm_mode_fb_cmd mode_cmd; |
284 | struct pci_dev *pdev = dev->pdev; | 285 | struct pci_dev *pdev = dev->pdev; |
@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
296 | size = mode_cmd.pitch * mode_cmd.height; | 297 | size = mode_cmd.pitch * mode_cmd.height; |
297 | size = roundup(size, PAGE_SIZE); | 298 | size = roundup(size, PAGE_SIZE); |
298 | 299 | ||
299 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, | 300 | ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, |
300 | NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); | 301 | 0, 0x0000, &nvbo); |
301 | if (ret) { | 302 | if (ret) { |
302 | NV_ERROR(dev, "failed to allocate framebuffer\n"); | 303 | NV_ERROR(dev, "failed to allocate framebuffer\n"); |
303 | goto out; | 304 | goto out; |
@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
318 | goto out; | 319 | goto out; |
319 | } | 320 | } |
320 | 321 | ||
322 | chan = nouveau_nofbaccel ? NULL : dev_priv->channel; | ||
323 | if (chan && dev_priv->card_type >= NV_50) { | ||
324 | ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); | ||
325 | if (ret) { | ||
326 | NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); | ||
327 | chan = NULL; | ||
328 | } | ||
329 | } | ||
330 | |||
321 | mutex_lock(&dev->struct_mutex); | 331 | mutex_lock(&dev->struct_mutex); |
322 | 332 | ||
323 | info = framebuffer_alloc(0, device); | 333 | info = framebuffer_alloc(0, device); |
@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) | |||
448 | 458 | ||
449 | if (nouveau_fb->nvbo) { | 459 | if (nouveau_fb->nvbo) { |
450 | nouveau_bo_unmap(nouveau_fb->nvbo); | 460 | nouveau_bo_unmap(nouveau_fb->nvbo); |
461 | nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); | ||
451 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); | 462 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
452 | nouveau_fb->nvbo = NULL; | 463 | nouveau_fb->nvbo = NULL; |
453 | } | 464 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 7347075ca5b..ae22dfaa2a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
336 | { | 336 | { |
337 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 337 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
338 | struct nouveau_fence *fence = NULL; | 338 | struct nouveau_fence *fence = NULL; |
339 | u64 offset = chan->fence.vma.offset + sema->mem->start; | ||
339 | int ret; | 340 | int ret; |
340 | 341 | ||
341 | if (dev_priv->chipset < 0x84) { | 342 | if (dev_priv->chipset < 0x84) { |
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
345 | 346 | ||
346 | BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); | 347 | BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); |
347 | OUT_RING (chan, NvSema); | 348 | OUT_RING (chan, NvSema); |
348 | OUT_RING (chan, sema->mem->start); | 349 | OUT_RING (chan, offset); |
349 | OUT_RING (chan, 1); | 350 | OUT_RING (chan, 1); |
350 | } else | 351 | } else |
351 | if (dev_priv->chipset < 0xc0) { | 352 | if (dev_priv->chipset < 0xc0) { |
352 | struct nouveau_vma *vma = &dev_priv->fence.bo->vma; | ||
353 | u64 offset = vma->offset + sema->mem->start; | ||
354 | |||
355 | ret = RING_SPACE(chan, 7); | 353 | ret = RING_SPACE(chan, 7); |
356 | if (ret) | 354 | if (ret) |
357 | return ret; | 355 | return ret; |
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
364 | OUT_RING (chan, 1); | 362 | OUT_RING (chan, 1); |
365 | OUT_RING (chan, 1); /* ACQUIRE_EQ */ | 363 | OUT_RING (chan, 1); /* ACQUIRE_EQ */ |
366 | } else { | 364 | } else { |
367 | struct nouveau_vma *vma = &dev_priv->fence.bo->vma; | ||
368 | u64 offset = vma->offset + sema->mem->start; | ||
369 | |||
370 | ret = RING_SPACE(chan, 5); | 365 | ret = RING_SPACE(chan, 5); |
371 | if (ret) | 366 | if (ret) |
372 | return ret; | 367 | return ret; |
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
394 | { | 389 | { |
395 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 390 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
396 | struct nouveau_fence *fence = NULL; | 391 | struct nouveau_fence *fence = NULL; |
392 | u64 offset = chan->fence.vma.offset + sema->mem->start; | ||
397 | int ret; | 393 | int ret; |
398 | 394 | ||
399 | if (dev_priv->chipset < 0x84) { | 395 | if (dev_priv->chipset < 0x84) { |
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
403 | 399 | ||
404 | BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); | 400 | BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); |
405 | OUT_RING (chan, NvSema); | 401 | OUT_RING (chan, NvSema); |
406 | OUT_RING (chan, sema->mem->start); | 402 | OUT_RING (chan, offset); |
407 | BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); | 403 | BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); |
408 | OUT_RING (chan, 1); | 404 | OUT_RING (chan, 1); |
409 | } else | 405 | } else |
410 | if (dev_priv->chipset < 0xc0) { | 406 | if (dev_priv->chipset < 0xc0) { |
411 | struct nouveau_vma *vma = &dev_priv->fence.bo->vma; | ||
412 | u64 offset = vma->offset + sema->mem->start; | ||
413 | |||
414 | ret = RING_SPACE(chan, 7); | 407 | ret = RING_SPACE(chan, 7); |
415 | if (ret) | 408 | if (ret) |
416 | return ret; | 409 | return ret; |
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | |||
423 | OUT_RING (chan, 1); | 416 | OUT_RING (chan, 1); |
424 | OUT_RING (chan, 2); /* RELEASE */ | 417 | OUT_RING (chan, 2); /* RELEASE */ |
425 | } else { | 418 | } else { |
426 | struct nouveau_vma *vma = &dev_priv->fence.bo->vma; | ||
427 | u64 offset = vma->offset + sema->mem->start; | ||
428 | |||
429 | ret = RING_SPACE(chan, 5); | 419 | ret = RING_SPACE(chan, 5); |
430 | if (ret) | 420 | if (ret) |
431 | return ret; | 421 | return ret; |
@@ -540,10 +530,15 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
540 | nouveau_gpuobj_ref(NULL, &obj); | 530 | nouveau_gpuobj_ref(NULL, &obj); |
541 | if (ret) | 531 | if (ret) |
542 | return ret; | 532 | return ret; |
533 | } else | ||
534 | if (USE_SEMA(dev)) { | ||
535 | /* map fence bo into channel's vm */ | ||
536 | ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, | ||
537 | &chan->fence.vma); | ||
538 | if (ret) | ||
539 | return ret; | ||
543 | } | 540 | } |
544 | 541 | ||
545 | INIT_LIST_HEAD(&chan->fence.pending); | ||
546 | spin_lock_init(&chan->fence.lock); | ||
547 | atomic_set(&chan->fence.last_sequence_irq, 0); | 542 | atomic_set(&chan->fence.last_sequence_irq, 0); |
548 | return 0; | 543 | return 0; |
549 | } | 544 | } |
@@ -551,10 +546,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
551 | void | 546 | void |
552 | nouveau_fence_channel_fini(struct nouveau_channel *chan) | 547 | nouveau_fence_channel_fini(struct nouveau_channel *chan) |
553 | { | 548 | { |
549 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
554 | struct nouveau_fence *tmp, *fence; | 550 | struct nouveau_fence *tmp, *fence; |
555 | 551 | ||
556 | spin_lock(&chan->fence.lock); | 552 | spin_lock(&chan->fence.lock); |
557 | |||
558 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { | 553 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { |
559 | fence->signalled = true; | 554 | fence->signalled = true; |
560 | list_del(&fence->entry); | 555 | list_del(&fence->entry); |
@@ -564,8 +559,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) | |||
564 | 559 | ||
565 | kref_put(&fence->refcount, nouveau_fence_del); | 560 | kref_put(&fence->refcount, nouveau_fence_del); |
566 | } | 561 | } |
567 | |||
568 | spin_unlock(&chan->fence.lock); | 562 | spin_unlock(&chan->fence.lock); |
563 | |||
564 | nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma); | ||
569 | } | 565 | } |
570 | 566 | ||
571 | int | 567 | int |
@@ -577,7 +573,7 @@ nouveau_fence_init(struct drm_device *dev) | |||
577 | 573 | ||
578 | /* Create a shared VRAM heap for cross-channel sync. */ | 574 | /* Create a shared VRAM heap for cross-channel sync. */ |
579 | if (USE_SEMA(dev)) { | 575 | if (USE_SEMA(dev)) { |
580 | ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, | 576 | ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM, |
581 | 0, 0, &dev_priv->fence.bo); | 577 | 0, 0, &dev_priv->fence.bo); |
582 | if (ret) | 578 | if (ret) |
583 | return ret; | 579 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b52e4601824..5f0bc57fdaa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -60,9 +60,71 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | int | 62 | int |
63 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | 63 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) |
64 | int size, int align, uint32_t domain, uint32_t tile_mode, | 64 | { |
65 | uint32_t tile_flags, struct nouveau_bo **pnvbo) | 65 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
66 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | ||
67 | struct nouveau_vma *vma; | ||
68 | int ret; | ||
69 | |||
70 | if (!fpriv->vm) | ||
71 | return 0; | ||
72 | |||
73 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | ||
74 | if (ret) | ||
75 | return ret; | ||
76 | |||
77 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | ||
78 | if (!vma) { | ||
79 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | ||
80 | if (!vma) { | ||
81 | ret = -ENOMEM; | ||
82 | goto out; | ||
83 | } | ||
84 | |||
85 | ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); | ||
86 | if (ret) { | ||
87 | kfree(vma); | ||
88 | goto out; | ||
89 | } | ||
90 | } else { | ||
91 | vma->refcount++; | ||
92 | } | ||
93 | |||
94 | out: | ||
95 | ttm_bo_unreserve(&nvbo->bo); | ||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | void | ||
100 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | ||
101 | { | ||
102 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
103 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | ||
104 | struct nouveau_vma *vma; | ||
105 | int ret; | ||
106 | |||
107 | if (!fpriv->vm) | ||
108 | return; | ||
109 | |||
110 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | ||
111 | if (ret) | ||
112 | return; | ||
113 | |||
114 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | ||
115 | if (vma) { | ||
116 | if (--vma->refcount == 0) { | ||
117 | nouveau_bo_vma_del(nvbo, vma); | ||
118 | kfree(vma); | ||
119 | } | ||
120 | } | ||
121 | ttm_bo_unreserve(&nvbo->bo); | ||
122 | } | ||
123 | |||
124 | int | ||
125 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | ||
126 | uint32_t tile_mode, uint32_t tile_flags, | ||
127 | struct nouveau_bo **pnvbo) | ||
66 | { | 128 | { |
67 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 129 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
68 | struct nouveau_bo *nvbo; | 130 | struct nouveau_bo *nvbo; |
@@ -76,7 +138,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
76 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) | 138 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) |
77 | flags |= TTM_PL_FLAG_SYSTEM; | 139 | flags |= TTM_PL_FLAG_SYSTEM; |
78 | 140 | ||
79 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, | 141 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
80 | tile_flags, pnvbo); | 142 | tile_flags, pnvbo); |
81 | if (ret) | 143 | if (ret) |
82 | return ret; | 144 | return ret; |
@@ -103,17 +165,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
103 | } | 165 | } |
104 | 166 | ||
105 | static int | 167 | static int |
106 | nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | 168 | nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, |
169 | struct drm_nouveau_gem_info *rep) | ||
107 | { | 170 | { |
171 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
108 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | 172 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
173 | struct nouveau_vma *vma; | ||
109 | 174 | ||
110 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | 175 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) |
111 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | 176 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; |
112 | else | 177 | else |
113 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | 178 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; |
114 | 179 | ||
115 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | ||
116 | rep->offset = nvbo->bo.offset; | 180 | rep->offset = nvbo->bo.offset; |
181 | if (fpriv->vm) { | ||
182 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | ||
183 | if (!vma) | ||
184 | return -EINVAL; | ||
185 | |||
186 | rep->offset = vma->offset; | ||
187 | } | ||
188 | |||
189 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | ||
117 | rep->map_handle = nvbo->bo.addr_space_offset; | 190 | rep->map_handle = nvbo->bo.addr_space_offset; |
118 | rep->tile_mode = nvbo->tile_mode; | 191 | rep->tile_mode = nvbo->tile_mode; |
119 | rep->tile_flags = nvbo->tile_flags; | 192 | rep->tile_flags = nvbo->tile_flags; |
@@ -127,7 +200,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
127 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 200 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
128 | struct drm_nouveau_gem_new *req = data; | 201 | struct drm_nouveau_gem_new *req = data; |
129 | struct nouveau_bo *nvbo = NULL; | 202 | struct nouveau_bo *nvbo = NULL; |
130 | struct nouveau_channel *chan = NULL; | ||
131 | int ret = 0; | 203 | int ret = 0; |
132 | 204 | ||
133 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | 205 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) |
@@ -138,28 +210,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
138 | return -EINVAL; | 210 | return -EINVAL; |
139 | } | 211 | } |
140 | 212 | ||
141 | if (req->channel_hint) { | 213 | ret = nouveau_gem_new(dev, req->info.size, req->align, |
142 | chan = nouveau_channel_get(dev, file_priv, req->channel_hint); | ||
143 | if (IS_ERR(chan)) | ||
144 | return PTR_ERR(chan); | ||
145 | } | ||
146 | |||
147 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, | ||
148 | req->info.domain, req->info.tile_mode, | 214 | req->info.domain, req->info.tile_mode, |
149 | req->info.tile_flags, &nvbo); | 215 | req->info.tile_flags, &nvbo); |
150 | if (chan) | ||
151 | nouveau_channel_put(&chan); | ||
152 | if (ret) | 216 | if (ret) |
153 | return ret; | 217 | return ret; |
154 | 218 | ||
155 | ret = nouveau_gem_info(nvbo->gem, &req->info); | ||
156 | if (ret) | ||
157 | goto out; | ||
158 | |||
159 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 219 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
220 | if (ret == 0) { | ||
221 | ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); | ||
222 | if (ret) | ||
223 | drm_gem_handle_delete(file_priv, req->info.handle); | ||
224 | } | ||
225 | |||
160 | /* drop reference from allocate - handle holds it now */ | 226 | /* drop reference from allocate - handle holds it now */ |
161 | drm_gem_object_unreference_unlocked(nvbo->gem); | 227 | drm_gem_object_unreference_unlocked(nvbo->gem); |
162 | out: | ||
163 | return ret; | 228 | return ret; |
164 | } | 229 | } |
165 | 230 | ||
@@ -318,6 +383,7 @@ static int | |||
318 | validate_list(struct nouveau_channel *chan, struct list_head *list, | 383 | validate_list(struct nouveau_channel *chan, struct list_head *list, |
319 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) | 384 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) |
320 | { | 385 | { |
386 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
321 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = | 387 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
322 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | 388 | (void __force __user *)(uintptr_t)user_pbbo_ptr; |
323 | struct drm_device *dev = chan->dev; | 389 | struct drm_device *dev = chan->dev; |
@@ -356,24 +422,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
356 | return ret; | 422 | return ret; |
357 | } | 423 | } |
358 | 424 | ||
359 | if (nvbo->bo.offset == b->presumed.offset && | 425 | if (dev_priv->card_type < NV_50) { |
360 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | 426 | if (nvbo->bo.offset == b->presumed.offset && |
361 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | 427 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
362 | (nvbo->bo.mem.mem_type == TTM_PL_TT && | 428 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
363 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) | 429 | (nvbo->bo.mem.mem_type == TTM_PL_TT && |
364 | continue; | 430 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) |
431 | continue; | ||
365 | 432 | ||
366 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | 433 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) |
367 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; | 434 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; |
368 | else | 435 | else |
369 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; | 436 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; |
370 | b->presumed.offset = nvbo->bo.offset; | 437 | b->presumed.offset = nvbo->bo.offset; |
371 | b->presumed.valid = 0; | 438 | b->presumed.valid = 0; |
372 | relocs++; | 439 | relocs++; |
373 | 440 | ||
374 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, | 441 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, |
375 | &b->presumed, sizeof(b->presumed))) | 442 | &b->presumed, sizeof(b->presumed))) |
376 | return -EFAULT; | 443 | return -EFAULT; |
444 | } | ||
377 | } | 445 | } |
378 | 446 | ||
379 | return relocs; | 447 | return relocs; |
@@ -548,7 +616,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
548 | struct nouveau_fence *fence = NULL; | 616 | struct nouveau_fence *fence = NULL; |
549 | int i, j, ret = 0, do_reloc = 0; | 617 | int i, j, ret = 0, do_reloc = 0; |
550 | 618 | ||
551 | chan = nouveau_channel_get(dev, file_priv, req->channel); | 619 | chan = nouveau_channel_get(file_priv, req->channel); |
552 | if (IS_ERR(chan)) | 620 | if (IS_ERR(chan)) |
553 | return PTR_ERR(chan); | 621 | return PTR_ERR(chan); |
554 | 622 | ||
@@ -782,7 +850,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |||
782 | if (!gem) | 850 | if (!gem) |
783 | return -ENOENT; | 851 | return -ENOENT; |
784 | 852 | ||
785 | ret = nouveau_gem_info(gem, req); | 853 | ret = nouveau_gem_info(file_priv, gem, req); |
786 | drm_gem_object_unreference_unlocked(gem); | 854 | drm_gem_object_unreference_unlocked(gem); |
787 | return ret; | 855 | return ret; |
788 | } | 856 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 2ba7265bc96..868c7fd7485 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) | |||
79 | int i; | 79 | int i; |
80 | 80 | ||
81 | stat = nv_rd32(dev, NV03_PMC_INTR_0); | 81 | stat = nv_rd32(dev, NV03_PMC_INTR_0); |
82 | if (!stat) | 82 | if (stat == 0 || stat == ~0) |
83 | return IRQ_NONE; | 83 | return IRQ_NONE; |
84 | 84 | ||
85 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 85 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5ee14d216ce..f9ae2fc3d6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
397 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) | 397 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) |
398 | dma_bits = 40; | 398 | dma_bits = 40; |
399 | } else | 399 | } else |
400 | if (0 && drm_pci_device_is_pcie(dev) && | 400 | if (0 && pci_is_pcie(dev->pdev) && |
401 | dev_priv->chipset > 0x40 && | 401 | dev_priv->chipset > 0x40 && |
402 | dev_priv->chipset != 0x45) { | 402 | dev_priv->chipset != 0x45) { |
403 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) | 403 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) |
@@ -423,38 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | 425 | ||
426 | /* reserve space at end of VRAM for PRAMIN */ | ||
427 | if (dev_priv->card_type >= NV_50) { | ||
428 | dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024; | ||
429 | } else | ||
430 | if (dev_priv->card_type >= NV_40) { | ||
431 | u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); | ||
432 | u32 rsvd; | ||
433 | |||
434 | /* estimate grctx size, the magics come from nv40_grctx.c */ | ||
435 | if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; | ||
436 | else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; | ||
437 | else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; | ||
438 | else rsvd = 0x4a40 * vs; | ||
439 | rsvd += 16 * 1024; | ||
440 | rsvd *= dev_priv->engine.fifo.channels; | ||
441 | |||
442 | /* pciegart table */ | ||
443 | if (drm_pci_device_is_pcie(dev)) | ||
444 | rsvd += 512 * 1024; | ||
445 | |||
446 | /* object storage */ | ||
447 | rsvd += 512 * 1024; | ||
448 | |||
449 | dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); | ||
450 | } else { | ||
451 | dev_priv->ramin_rsvd_vram = 512 * 1024; | ||
452 | } | ||
453 | |||
454 | ret = dev_priv->engine.vram.init(dev); | ||
455 | if (ret) | ||
456 | return ret; | ||
457 | |||
458 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | 426 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); |
459 | if (dev_priv->vram_sys_base) { | 427 | if (dev_priv->vram_sys_base) { |
460 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | 428 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", |
@@ -479,7 +447,7 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
479 | } | 447 | } |
480 | 448 | ||
481 | if (dev_priv->card_type < NV_50) { | 449 | if (dev_priv->card_type < NV_50) { |
482 | ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, | 450 | ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM, |
483 | 0, 0, &dev_priv->vga_ram); | 451 | 0, 0, &dev_priv->vga_ram); |
484 | if (ret == 0) | 452 | if (ret == 0) |
485 | ret = nouveau_bo_pin(dev_priv->vga_ram, | 453 | ret = nouveau_bo_pin(dev_priv->vga_ram, |
@@ -729,37 +697,31 @@ nouveau_mem_timing_fini(struct drm_device *dev) | |||
729 | } | 697 | } |
730 | 698 | ||
731 | static int | 699 | static int |
732 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) | 700 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
733 | { | 701 | { |
734 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | 702 | /* nothing to do */ |
735 | struct nouveau_mm *mm; | ||
736 | u64 size, block, rsvd; | ||
737 | int ret; | ||
738 | |||
739 | rsvd = (256 * 1024); /* vga memory */ | ||
740 | size = (p_size << PAGE_SHIFT) - rsvd; | ||
741 | block = dev_priv->vram_rblock_size; | ||
742 | |||
743 | ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12); | ||
744 | if (ret) | ||
745 | return ret; | ||
746 | |||
747 | man->priv = mm; | ||
748 | return 0; | 703 | return 0; |
749 | } | 704 | } |
750 | 705 | ||
751 | static int | 706 | static int |
752 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | 707 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) |
753 | { | 708 | { |
754 | struct nouveau_mm *mm = man->priv; | 709 | /* nothing to do */ |
755 | int ret; | 710 | return 0; |
711 | } | ||
756 | 712 | ||
757 | ret = nouveau_mm_fini(&mm); | 713 | static inline void |
758 | if (ret) | 714 | nouveau_mem_node_cleanup(struct nouveau_mem *node) |
759 | return ret; | 715 | { |
716 | if (node->vma[0].node) { | ||
717 | nouveau_vm_unmap(&node->vma[0]); | ||
718 | nouveau_vm_put(&node->vma[0]); | ||
719 | } | ||
760 | 720 | ||
761 | man->priv = NULL; | 721 | if (node->vma[1].node) { |
762 | return 0; | 722 | nouveau_vm_unmap(&node->vma[1]); |
723 | nouveau_vm_put(&node->vma[1]); | ||
724 | } | ||
763 | } | 725 | } |
764 | 726 | ||
765 | static void | 727 | static void |
@@ -768,14 +730,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |||
768 | { | 730 | { |
769 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | 731 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); |
770 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | 732 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
771 | struct nouveau_mem *node = mem->mm_node; | ||
772 | struct drm_device *dev = dev_priv->dev; | 733 | struct drm_device *dev = dev_priv->dev; |
773 | 734 | ||
774 | if (node->tmp_vma.node) { | 735 | nouveau_mem_node_cleanup(mem->mm_node); |
775 | nouveau_vm_unmap(&node->tmp_vma); | ||
776 | nouveau_vm_put(&node->tmp_vma); | ||
777 | } | ||
778 | |||
779 | vram->put(dev, (struct nouveau_mem **)&mem->mm_node); | 736 | vram->put(dev, (struct nouveau_mem **)&mem->mm_node); |
780 | } | 737 | } |
781 | 738 | ||
@@ -794,7 +751,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
794 | int ret; | 751 | int ret; |
795 | 752 | ||
796 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | 753 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) |
797 | size_nc = 1 << nvbo->vma.node->type; | 754 | size_nc = 1 << nvbo->page_shift; |
798 | 755 | ||
799 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | 756 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
800 | mem->page_alignment << PAGE_SHIFT, size_nc, | 757 | mem->page_alignment << PAGE_SHIFT, size_nc, |
@@ -804,9 +761,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
804 | return (ret == -ENOSPC) ? 0 : ret; | 761 | return (ret == -ENOSPC) ? 0 : ret; |
805 | } | 762 | } |
806 | 763 | ||
807 | node->page_shift = 12; | 764 | node->page_shift = nvbo->page_shift; |
808 | if (nvbo->vma.node) | ||
809 | node->page_shift = nvbo->vma.node->type; | ||
810 | 765 | ||
811 | mem->mm_node = node; | 766 | mem->mm_node = node; |
812 | mem->start = node->offset >> PAGE_SHIFT; | 767 | mem->start = node->offset >> PAGE_SHIFT; |
@@ -862,15 +817,9 @@ static void | |||
862 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | 817 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, |
863 | struct ttm_mem_reg *mem) | 818 | struct ttm_mem_reg *mem) |
864 | { | 819 | { |
865 | struct nouveau_mem *node = mem->mm_node; | 820 | nouveau_mem_node_cleanup(mem->mm_node); |
866 | 821 | kfree(mem->mm_node); | |
867 | if (node->tmp_vma.node) { | ||
868 | nouveau_vm_unmap(&node->tmp_vma); | ||
869 | nouveau_vm_put(&node->tmp_vma); | ||
870 | } | ||
871 | |||
872 | mem->mm_node = NULL; | 822 | mem->mm_node = NULL; |
873 | kfree(node); | ||
874 | } | 823 | } |
875 | 824 | ||
876 | static int | 825 | static int |
@@ -880,11 +829,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
880 | struct ttm_mem_reg *mem) | 829 | struct ttm_mem_reg *mem) |
881 | { | 830 | { |
882 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 831 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
883 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
884 | struct nouveau_vma *vma = &nvbo->vma; | ||
885 | struct nouveau_vm *vm = vma->vm; | ||
886 | struct nouveau_mem *node; | 832 | struct nouveau_mem *node; |
887 | int ret; | ||
888 | 833 | ||
889 | if (unlikely((mem->num_pages << PAGE_SHIFT) >= | 834 | if (unlikely((mem->num_pages << PAGE_SHIFT) >= |
890 | dev_priv->gart_info.aper_size)) | 835 | dev_priv->gart_info.aper_size)) |
@@ -893,24 +838,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
893 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 838 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
894 | if (!node) | 839 | if (!node) |
895 | return -ENOMEM; | 840 | return -ENOMEM; |
841 | node->page_shift = 12; | ||
896 | 842 | ||
897 | /* This node must be for evicting large-paged VRAM | ||
898 | * to system memory. Due to a nv50 limitation of | ||
899 | * not being able to mix large/small pages within | ||
900 | * the same PDE, we need to create a temporary | ||
901 | * small-paged VMA for the eviction. | ||
902 | */ | ||
903 | if (vma->node->type != vm->spg_shift) { | ||
904 | ret = nouveau_vm_get(vm, (u64)vma->node->length << 12, | ||
905 | vm->spg_shift, NV_MEM_ACCESS_RW, | ||
906 | &node->tmp_vma); | ||
907 | if (ret) { | ||
908 | kfree(node); | ||
909 | return ret; | ||
910 | } | ||
911 | } | ||
912 | |||
913 | node->page_shift = nvbo->vma.node->type; | ||
914 | mem->mm_node = node; | 843 | mem->mm_node = node; |
915 | mem->start = 0; | 844 | mem->start = 0; |
916 | return 0; | 845 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 7609756b6fa..1640dec3b82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -158,11 +158,18 @@ int | |||
158 | nouveau_mm_fini(struct nouveau_mm **prmm) | 158 | nouveau_mm_fini(struct nouveau_mm **prmm) |
159 | { | 159 | { |
160 | struct nouveau_mm *rmm = *prmm; | 160 | struct nouveau_mm *rmm = *prmm; |
161 | struct nouveau_mm_node *heap = | 161 | struct nouveau_mm_node *node, *heap = |
162 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); | 162 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); |
163 | 163 | ||
164 | if (!list_is_singular(&rmm->nodes)) | 164 | if (!list_is_singular(&rmm->nodes)) { |
165 | printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); | ||
166 | list_for_each_entry(node, &rmm->nodes, nl_entry) { | ||
167 | printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", | ||
168 | node->type, node->offset, node->length); | ||
169 | } | ||
170 | WARN_ON(1); | ||
165 | return -EBUSY; | 171 | return -EBUSY; |
172 | } | ||
166 | 173 | ||
167 | kfree(heap); | 174 | kfree(heap); |
168 | kfree(rmm); | 175 | kfree(rmm); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index 1f7483aae9a..b9c016d2155 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h | |||
@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, | |||
52 | void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); | 52 | void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); |
53 | 53 | ||
54 | int nv50_vram_init(struct drm_device *); | 54 | int nv50_vram_init(struct drm_device *); |
55 | void nv50_vram_fini(struct drm_device *); | ||
55 | int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, | 56 | int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, |
56 | u32 memtype, struct nouveau_mem **); | 57 | u32 memtype, struct nouveau_mem **); |
57 | void nv50_vram_del(struct drm_device *, struct nouveau_mem **); | 58 | void nv50_vram_del(struct drm_device *, struct nouveau_mem **); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 5b39718ae1f..6abdbe6530a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -34,6 +34,7 @@ int | |||
34 | nouveau_notifier_init_channel(struct nouveau_channel *chan) | 34 | nouveau_notifier_init_channel(struct nouveau_channel *chan) |
35 | { | 35 | { |
36 | struct drm_device *dev = chan->dev; | 36 | struct drm_device *dev = chan->dev; |
37 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
37 | struct nouveau_bo *ntfy = NULL; | 38 | struct nouveau_bo *ntfy = NULL; |
38 | uint32_t flags, ttmpl; | 39 | uint32_t flags, ttmpl; |
39 | int ret; | 40 | int ret; |
@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
46 | ttmpl = TTM_PL_FLAG_TT; | 47 | ttmpl = TTM_PL_FLAG_TT; |
47 | } | 48 | } |
48 | 49 | ||
49 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); | 50 | ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy); |
50 | if (ret) | 51 | if (ret) |
51 | return ret; | 52 | return ret; |
52 | 53 | ||
@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
58 | if (ret) | 59 | if (ret) |
59 | goto out_err; | 60 | goto out_err; |
60 | 61 | ||
62 | if (dev_priv->card_type >= NV_50) { | ||
63 | ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma); | ||
64 | if (ret) | ||
65 | goto out_err; | ||
66 | } | ||
67 | |||
61 | ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); | 68 | ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); |
62 | if (ret) | 69 | if (ret) |
63 | goto out_err; | 70 | goto out_err; |
64 | 71 | ||
65 | chan->notifier_bo = ntfy; | 72 | chan->notifier_bo = ntfy; |
66 | out_err: | 73 | out_err: |
67 | if (ret) | 74 | if (ret) { |
75 | nouveau_bo_vma_del(ntfy, &chan->notifier_vma); | ||
68 | drm_gem_object_unreference_unlocked(ntfy->gem); | 76 | drm_gem_object_unreference_unlocked(ntfy->gem); |
77 | } | ||
69 | 78 | ||
70 | return ret; | 79 | return ret; |
71 | } | 80 | } |
@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
78 | if (!chan->notifier_bo) | 87 | if (!chan->notifier_bo) |
79 | return; | 88 | return; |
80 | 89 | ||
90 | nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma); | ||
81 | nouveau_bo_unmap(chan->notifier_bo); | 91 | nouveau_bo_unmap(chan->notifier_bo); |
82 | mutex_lock(&dev->struct_mutex); | 92 | mutex_lock(&dev->struct_mutex); |
83 | nouveau_bo_unpin(chan->notifier_bo); | 93 | nouveau_bo_unpin(chan->notifier_bo); |
@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
122 | target = NV_MEM_TARGET_VRAM; | 132 | target = NV_MEM_TARGET_VRAM; |
123 | else | 133 | else |
124 | target = NV_MEM_TARGET_GART; | 134 | target = NV_MEM_TARGET_GART; |
125 | offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; | 135 | offset = chan->notifier_bo->bo.offset; |
126 | } else { | 136 | } else { |
127 | target = NV_MEM_TARGET_VM; | 137 | target = NV_MEM_TARGET_VM; |
128 | offset = chan->notifier_bo->vma.offset; | 138 | offset = chan->notifier_vma.offset; |
129 | } | 139 | } |
130 | offset += mem->start; | 140 | offset += mem->start; |
131 | 141 | ||
@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
183 | if (unlikely(dev_priv->card_type >= NV_C0)) | 193 | if (unlikely(dev_priv->card_type >= NV_C0)) |
184 | return -EINVAL; | 194 | return -EINVAL; |
185 | 195 | ||
186 | chan = nouveau_channel_get(dev, file_priv, na->channel); | 196 | chan = nouveau_channel_get(file_priv, na->channel); |
187 | if (IS_ERR(chan)) | 197 | if (IS_ERR(chan)) |
188 | return PTR_ERR(chan); | 198 | return PTR_ERR(chan); |
189 | 199 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8f97016f5b2..159b7c437d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | |||
125 | int ret = -EINVAL; | 125 | int ret = -EINVAL; |
126 | 126 | ||
127 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 127 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
128 | if (chid > 0 && chid < dev_priv->engine.fifo.channels) | 128 | if (chid >= 0 && chid < dev_priv->engine.fifo.channels) |
129 | chan = dev_priv->channels.ptr[chid]; | 129 | chan = dev_priv->channels.ptr[chid]; |
130 | if (chan) | 130 | if (chan) |
131 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | 131 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); |
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
191 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 191 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
192 | spin_unlock(&dev_priv->ramin_lock); | 192 | spin_unlock(&dev_priv->ramin_lock); |
193 | 193 | ||
194 | if (chan) { | 194 | if (!(flags & NVOBJ_FLAG_VM) && chan) { |
195 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); | 195 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
196 | if (ramin) | 196 | if (ramin) |
197 | ramin = drm_mm_get_block(ramin, size, align); | 197 | ramin = drm_mm_get_block(ramin, size, align); |
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
208 | gpuobj->vinst = ramin->start + chan->ramin->vinst; | 208 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
209 | gpuobj->node = ramin; | 209 | gpuobj->node = ramin; |
210 | } else { | 210 | } else { |
211 | ret = instmem->get(gpuobj, size, align); | 211 | ret = instmem->get(gpuobj, chan, size, align); |
212 | if (ret) { | 212 | if (ret) { |
213 | nouveau_gpuobj_ref(NULL, &gpuobj); | 213 | nouveau_gpuobj_ref(NULL, &gpuobj); |
214 | return ret; | 214 | return ret; |
@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | 692 | ||
693 | static int | ||
694 | nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) | ||
695 | { | ||
696 | struct drm_device *dev = chan->dev; | ||
697 | struct nouveau_gpuobj *pgd = NULL; | ||
698 | struct nouveau_vm_pgd *vpgd; | ||
699 | int ret, i; | ||
700 | |||
701 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); | ||
702 | if (ret) | ||
703 | return ret; | ||
704 | |||
705 | /* create page directory for this vm if none currently exists, | ||
706 | * will be destroyed automagically when last reference to the | ||
707 | * vm is removed | ||
708 | */ | ||
709 | if (list_empty(&vm->pgd_list)) { | ||
710 | ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); | ||
711 | if (ret) | ||
712 | return ret; | ||
713 | } | ||
714 | nouveau_vm_ref(vm, &chan->vm, pgd); | ||
715 | nouveau_gpuobj_ref(NULL, &pgd); | ||
716 | |||
717 | /* point channel at vm's page directory */ | ||
718 | vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); | ||
719 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); | ||
720 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); | ||
721 | nv_wo32(chan->ramin, 0x0208, 0xffffffff); | ||
722 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | ||
723 | |||
724 | /* map display semaphore buffers into channel's vm */ | ||
725 | for (i = 0; i < 2; i++) { | ||
726 | struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; | ||
727 | |||
728 | ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, | ||
729 | &chan->dispc_vma[i]); | ||
730 | if (ret) | ||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
693 | int | 737 | int |
694 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | 738 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, |
695 | uint32_t vram_h, uint32_t tt_h) | 739 | uint32_t vram_h, uint32_t tt_h) |
696 | { | 740 | { |
697 | struct drm_device *dev = chan->dev; | 741 | struct drm_device *dev = chan->dev; |
698 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 742 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
743 | struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); | ||
744 | struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; | ||
699 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | 745 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
700 | int ret, i; | 746 | int ret, i; |
701 | 747 | ||
702 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 748 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
703 | 749 | if (dev_priv->card_type == NV_C0) | |
704 | if (dev_priv->card_type == NV_C0) { | 750 | return nvc0_gpuobj_channel_init(chan, vm); |
705 | struct nouveau_vm *vm = dev_priv->chan_vm; | ||
706 | struct nouveau_vm_pgd *vpgd; | ||
707 | |||
708 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, | ||
709 | &chan->ramin); | ||
710 | if (ret) | ||
711 | return ret; | ||
712 | |||
713 | nouveau_vm_ref(vm, &chan->vm, NULL); | ||
714 | |||
715 | vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); | ||
716 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); | ||
717 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); | ||
718 | nv_wo32(chan->ramin, 0x0208, 0xffffffff); | ||
719 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | ||
720 | return 0; | ||
721 | } | ||
722 | 751 | ||
723 | /* Allocate a chunk of memory for per-channel object storage */ | 752 | /* Allocate a chunk of memory for per-channel object storage */ |
724 | ret = nouveau_gpuobj_channel_init_pramin(chan); | 753 | ret = nouveau_gpuobj_channel_init_pramin(chan); |
@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
731 | * - Allocate per-channel page-directory | 760 | * - Allocate per-channel page-directory |
732 | * - Link with shared channel VM | 761 | * - Link with shared channel VM |
733 | */ | 762 | */ |
734 | if (dev_priv->chan_vm) { | 763 | if (vm) { |
735 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | 764 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
736 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | 765 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; |
737 | u32 vm_pinst = chan->ramin->pinst; | 766 | u32 vm_pinst = chan->ramin->pinst; |
@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
744 | if (ret) | 773 | if (ret) |
745 | return ret; | 774 | return ret; |
746 | 775 | ||
747 | nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); | 776 | nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); |
748 | } | 777 | } |
749 | 778 | ||
750 | /* RAMHT */ | 779 | /* RAMHT */ |
@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
768 | struct nouveau_gpuobj *sem = NULL; | 797 | struct nouveau_gpuobj *sem = NULL; |
769 | struct nv50_display_crtc *dispc = | 798 | struct nv50_display_crtc *dispc = |
770 | &nv50_display(dev)->crtc[i]; | 799 | &nv50_display(dev)->crtc[i]; |
771 | u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; | 800 | u64 offset = dispc->sem.bo->bo.offset; |
772 | 801 | ||
773 | ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, | 802 | ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, |
774 | NV_MEM_ACCESS_RW, | 803 | NV_MEM_ACCESS_RW, |
@@ -841,13 +870,22 @@ void | |||
841 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | 870 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) |
842 | { | 871 | { |
843 | struct drm_device *dev = chan->dev; | 872 | struct drm_device *dev = chan->dev; |
873 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
874 | int i; | ||
844 | 875 | ||
845 | NV_DEBUG(dev, "ch%d\n", chan->id); | 876 | NV_DEBUG(dev, "ch%d\n", chan->id); |
846 | 877 | ||
847 | nouveau_ramht_ref(NULL, &chan->ramht, chan); | 878 | if (dev_priv->card_type >= NV_50) { |
879 | struct nv50_display *disp = nv50_display(dev); | ||
880 | |||
881 | for (i = 0; i < 2; i++) { | ||
882 | struct nv50_display_crtc *dispc = &disp->crtc[i]; | ||
883 | nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); | ||
884 | } | ||
848 | 885 | ||
849 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | 886 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); |
850 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 887 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
888 | } | ||
851 | 889 | ||
852 | if (drm_mm_initialized(&chan->ramin_heap)) | 890 | if (drm_mm_initialized(&chan->ramin_heap)) |
853 | drm_mm_takedown(&chan->ramin_heap); | 891 | drm_mm_takedown(&chan->ramin_heap); |
@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
909 | if (init->handle == ~0) | 947 | if (init->handle == ~0) |
910 | return -EINVAL; | 948 | return -EINVAL; |
911 | 949 | ||
912 | chan = nouveau_channel_get(dev, file_priv, init->channel); | 950 | chan = nouveau_channel_get(file_priv, init->channel); |
913 | if (IS_ERR(chan)) | 951 | if (IS_ERR(chan)) |
914 | return PTR_ERR(chan); | 952 | return PTR_ERR(chan); |
915 | 953 | ||
@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |||
936 | struct nouveau_channel *chan; | 974 | struct nouveau_channel *chan; |
937 | int ret; | 975 | int ret; |
938 | 976 | ||
939 | chan = nouveau_channel_get(dev, file_priv, objfree->channel); | 977 | chan = nouveau_channel_get(file_priv, objfree->channel); |
940 | if (IS_ERR(chan)) | 978 | if (IS_ERR(chan)) |
941 | return PTR_ERR(chan); | 979 | return PTR_ERR(chan); |
942 | 980 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 82fad914e64..2706cb3d871 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
37 | return -ENOMEM; | 37 | return -ENOMEM; |
38 | 38 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | 39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); |
40 | if (!nvbe->ttm_alloced) | 40 | if (!nvbe->ttm_alloced) { |
41 | kfree(nvbe->pages); | ||
42 | nvbe->pages = NULL; | ||
41 | return -ENOMEM; | 43 | return -ENOMEM; |
44 | } | ||
42 | 45 | ||
43 | nvbe->nr_pages = 0; | 46 | nvbe->nr_pages = 0; |
44 | while (num_pages--) { | 47 | while (num_pages--) { |
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |||
126 | 129 | ||
127 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { | 130 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
128 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | 131 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); |
129 | dma_offset += NV_CTXDMA_PAGE_SIZE; | 132 | offset_l += NV_CTXDMA_PAGE_SIZE; |
130 | } | 133 | } |
131 | } | 134 | } |
132 | 135 | ||
@@ -429,7 +432,7 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
429 | u32 aper_size, align; | 432 | u32 aper_size, align; |
430 | int ret; | 433 | int ret; |
431 | 434 | ||
432 | if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) | 435 | if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev)) |
433 | aper_size = 512 * 1024 * 1024; | 436 | aper_size = 512 * 1024 * 1024; |
434 | else | 437 | else |
435 | aper_size = 64 * 1024 * 1024; | 438 | aper_size = 64 * 1024 * 1024; |
@@ -458,7 +461,7 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
458 | dev_priv->gart_info.type = NOUVEAU_GART_HW; | 461 | dev_priv->gart_info.type = NOUVEAU_GART_HW; |
459 | dev_priv->gart_info.func = &nv50_sgdma_backend; | 462 | dev_priv->gart_info.func = &nv50_sgdma_backend; |
460 | } else | 463 | } else |
461 | if (0 && drm_pci_device_is_pcie(dev) && | 464 | if (0 && pci_is_pcie(dev->pdev) && |
462 | dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { | 465 | dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { |
463 | if (nv44_graph_class(dev)) { | 466 | if (nv44_graph_class(dev)) { |
464 | dev_priv->gart_info.func = &nv44_sgdma_backend; | 467 | dev_priv->gart_info.func = &nv44_sgdma_backend; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 731acea865b..10656e430b4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
91 | engine->pm.clock_pre = nv04_pm_clock_pre; | 91 | engine->pm.clock_pre = nv04_pm_clock_pre; |
92 | engine->pm.clock_set = nv04_pm_clock_set; | 92 | engine->pm.clock_set = nv04_pm_clock_set; |
93 | engine->vram.init = nouveau_mem_detect; | 93 | engine->vram.init = nouveau_mem_detect; |
94 | engine->vram.takedown = nouveau_stub_takedown; | ||
94 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 95 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
95 | break; | 96 | break; |
96 | case 0x10: | 97 | case 0x10: |
@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
139 | engine->pm.clock_pre = nv04_pm_clock_pre; | 140 | engine->pm.clock_pre = nv04_pm_clock_pre; |
140 | engine->pm.clock_set = nv04_pm_clock_set; | 141 | engine->pm.clock_set = nv04_pm_clock_set; |
141 | engine->vram.init = nouveau_mem_detect; | 142 | engine->vram.init = nouveau_mem_detect; |
143 | engine->vram.takedown = nouveau_stub_takedown; | ||
142 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 144 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
143 | break; | 145 | break; |
144 | case 0x20: | 146 | case 0x20: |
@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
187 | engine->pm.clock_pre = nv04_pm_clock_pre; | 189 | engine->pm.clock_pre = nv04_pm_clock_pre; |
188 | engine->pm.clock_set = nv04_pm_clock_set; | 190 | engine->pm.clock_set = nv04_pm_clock_set; |
189 | engine->vram.init = nouveau_mem_detect; | 191 | engine->vram.init = nouveau_mem_detect; |
192 | engine->vram.takedown = nouveau_stub_takedown; | ||
190 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 193 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
191 | break; | 194 | break; |
192 | case 0x30: | 195 | case 0x30: |
@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
237 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 240 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
238 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 241 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
239 | engine->vram.init = nouveau_mem_detect; | 242 | engine->vram.init = nouveau_mem_detect; |
243 | engine->vram.takedown = nouveau_stub_takedown; | ||
240 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 244 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
241 | break; | 245 | break; |
242 | case 0x40: | 246 | case 0x40: |
@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
289 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 293 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
290 | engine->pm.temp_get = nv40_temp_get; | 294 | engine->pm.temp_get = nv40_temp_get; |
291 | engine->vram.init = nouveau_mem_detect; | 295 | engine->vram.init = nouveau_mem_detect; |
296 | engine->vram.takedown = nouveau_stub_takedown; | ||
292 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 297 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
293 | break; | 298 | break; |
294 | case 0x50: | 299 | case 0x50: |
@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
366 | else | 371 | else |
367 | engine->pm.temp_get = nv40_temp_get; | 372 | engine->pm.temp_get = nv40_temp_get; |
368 | engine->vram.init = nv50_vram_init; | 373 | engine->vram.init = nv50_vram_init; |
374 | engine->vram.takedown = nv50_vram_fini; | ||
369 | engine->vram.get = nv50_vram_new; | 375 | engine->vram.get = nv50_vram_new; |
370 | engine->vram.put = nv50_vram_del; | 376 | engine->vram.put = nv50_vram_del; |
371 | engine->vram.flags_valid = nv50_vram_flags_valid; | 377 | engine->vram.flags_valid = nv50_vram_flags_valid; |
@@ -411,9 +417,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
411 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; | 417 | engine->gpio.irq_unregister = nv50_gpio_irq_unregister; |
412 | engine->gpio.irq_enable = nv50_gpio_irq_enable; | 418 | engine->gpio.irq_enable = nv50_gpio_irq_enable; |
413 | engine->vram.init = nvc0_vram_init; | 419 | engine->vram.init = nvc0_vram_init; |
420 | engine->vram.takedown = nv50_vram_fini; | ||
414 | engine->vram.get = nvc0_vram_new; | 421 | engine->vram.get = nvc0_vram_new; |
415 | engine->vram.put = nv50_vram_del; | 422 | engine->vram.put = nv50_vram_del; |
416 | engine->vram.flags_valid = nvc0_vram_flags_valid; | 423 | engine->vram.flags_valid = nvc0_vram_flags_valid; |
424 | engine->pm.temp_get = nv84_temp_get; | ||
417 | break; | 425 | break; |
418 | default: | 426 | default: |
419 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); | 427 | NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); |
@@ -447,8 +455,8 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
447 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 455 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
448 | int ret; | 456 | int ret; |
449 | 457 | ||
450 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, | 458 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, |
451 | (struct drm_file *)-2, NvDmaFB, NvDmaTT); | 459 | NvDmaFB, NvDmaTT); |
452 | if (ret) | 460 | if (ret) |
453 | return ret; | 461 | return ret; |
454 | 462 | ||
@@ -527,7 +535,7 @@ nouveau_card_init(struct drm_device *dev) | |||
527 | 535 | ||
528 | nouveau_pm_init(dev); | 536 | nouveau_pm_init(dev); |
529 | 537 | ||
530 | ret = nouveau_mem_vram_init(dev); | 538 | ret = engine->vram.init(dev); |
531 | if (ret) | 539 | if (ret) |
532 | goto out_bios; | 540 | goto out_bios; |
533 | 541 | ||
@@ -539,10 +547,14 @@ nouveau_card_init(struct drm_device *dev) | |||
539 | if (ret) | 547 | if (ret) |
540 | goto out_gpuobj; | 548 | goto out_gpuobj; |
541 | 549 | ||
542 | ret = nouveau_mem_gart_init(dev); | 550 | ret = nouveau_mem_vram_init(dev); |
543 | if (ret) | 551 | if (ret) |
544 | goto out_instmem; | 552 | goto out_instmem; |
545 | 553 | ||
554 | ret = nouveau_mem_gart_init(dev); | ||
555 | if (ret) | ||
556 | goto out_ttmvram; | ||
557 | |||
546 | /* PMC */ | 558 | /* PMC */ |
547 | ret = engine->mc.init(dev); | 559 | ret = engine->mc.init(dev); |
548 | if (ret) | 560 | if (ret) |
@@ -563,7 +575,7 @@ nouveau_card_init(struct drm_device *dev) | |||
563 | if (ret) | 575 | if (ret) |
564 | goto out_timer; | 576 | goto out_timer; |
565 | 577 | ||
566 | if (!nouveau_noaccel) { | 578 | if (!dev_priv->noaccel) { |
567 | switch (dev_priv->card_type) { | 579 | switch (dev_priv->card_type) { |
568 | case NV_04: | 580 | case NV_04: |
569 | nv04_graph_create(dev); | 581 | nv04_graph_create(dev); |
@@ -675,14 +687,14 @@ out_vblank: | |||
675 | drm_vblank_cleanup(dev); | 687 | drm_vblank_cleanup(dev); |
676 | engine->display.destroy(dev); | 688 | engine->display.destroy(dev); |
677 | out_fifo: | 689 | out_fifo: |
678 | if (!nouveau_noaccel) | 690 | if (!dev_priv->noaccel) |
679 | engine->fifo.takedown(dev); | 691 | engine->fifo.takedown(dev); |
680 | out_engine: | 692 | out_engine: |
681 | if (!nouveau_noaccel) { | 693 | if (!dev_priv->noaccel) { |
682 | for (e = e - 1; e >= 0; e--) { | 694 | for (e = e - 1; e >= 0; e--) { |
683 | if (!dev_priv->eng[e]) | 695 | if (!dev_priv->eng[e]) |
684 | continue; | 696 | continue; |
685 | dev_priv->eng[e]->fini(dev, e); | 697 | dev_priv->eng[e]->fini(dev, e, false); |
686 | dev_priv->eng[e]->destroy(dev,e ); | 698 | dev_priv->eng[e]->destroy(dev,e ); |
687 | } | 699 | } |
688 | } | 700 | } |
@@ -696,12 +708,14 @@ out_mc: | |||
696 | engine->mc.takedown(dev); | 708 | engine->mc.takedown(dev); |
697 | out_gart: | 709 | out_gart: |
698 | nouveau_mem_gart_fini(dev); | 710 | nouveau_mem_gart_fini(dev); |
711 | out_ttmvram: | ||
712 | nouveau_mem_vram_fini(dev); | ||
699 | out_instmem: | 713 | out_instmem: |
700 | engine->instmem.takedown(dev); | 714 | engine->instmem.takedown(dev); |
701 | out_gpuobj: | 715 | out_gpuobj: |
702 | nouveau_gpuobj_takedown(dev); | 716 | nouveau_gpuobj_takedown(dev); |
703 | out_vram: | 717 | out_vram: |
704 | nouveau_mem_vram_fini(dev); | 718 | engine->vram.takedown(dev); |
705 | out_bios: | 719 | out_bios: |
706 | nouveau_pm_fini(dev); | 720 | nouveau_pm_fini(dev); |
707 | nouveau_bios_takedown(dev); | 721 | nouveau_bios_takedown(dev); |
@@ -718,16 +732,21 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
718 | struct nouveau_engine *engine = &dev_priv->engine; | 732 | struct nouveau_engine *engine = &dev_priv->engine; |
719 | int e; | 733 | int e; |
720 | 734 | ||
735 | drm_kms_helper_poll_fini(dev); | ||
736 | nouveau_fbcon_fini(dev); | ||
737 | |||
721 | if (dev_priv->channel) { | 738 | if (dev_priv->channel) { |
722 | nouveau_fence_fini(dev); | ||
723 | nouveau_channel_put_unlocked(&dev_priv->channel); | 739 | nouveau_channel_put_unlocked(&dev_priv->channel); |
740 | nouveau_fence_fini(dev); | ||
724 | } | 741 | } |
725 | 742 | ||
726 | if (!nouveau_noaccel) { | 743 | engine->display.destroy(dev); |
744 | |||
745 | if (!dev_priv->noaccel) { | ||
727 | engine->fifo.takedown(dev); | 746 | engine->fifo.takedown(dev); |
728 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | 747 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { |
729 | if (dev_priv->eng[e]) { | 748 | if (dev_priv->eng[e]) { |
730 | dev_priv->eng[e]->fini(dev, e); | 749 | dev_priv->eng[e]->fini(dev, e, false); |
731 | dev_priv->eng[e]->destroy(dev,e ); | 750 | dev_priv->eng[e]->destroy(dev,e ); |
732 | } | 751 | } |
733 | } | 752 | } |
@@ -748,10 +767,11 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
748 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); | 767 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); |
749 | mutex_unlock(&dev->struct_mutex); | 768 | mutex_unlock(&dev->struct_mutex); |
750 | nouveau_mem_gart_fini(dev); | 769 | nouveau_mem_gart_fini(dev); |
770 | nouveau_mem_vram_fini(dev); | ||
751 | 771 | ||
752 | engine->instmem.takedown(dev); | 772 | engine->instmem.takedown(dev); |
753 | nouveau_gpuobj_takedown(dev); | 773 | nouveau_gpuobj_takedown(dev); |
754 | nouveau_mem_vram_fini(dev); | 774 | engine->vram.takedown(dev); |
755 | 775 | ||
756 | nouveau_irq_fini(dev); | 776 | nouveau_irq_fini(dev); |
757 | drm_vblank_cleanup(dev); | 777 | drm_vblank_cleanup(dev); |
@@ -762,6 +782,41 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
762 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 782 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
763 | } | 783 | } |
764 | 784 | ||
785 | int | ||
786 | nouveau_open(struct drm_device *dev, struct drm_file *file_priv) | ||
787 | { | ||
788 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
789 | struct nouveau_fpriv *fpriv; | ||
790 | int ret; | ||
791 | |||
792 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | ||
793 | if (unlikely(!fpriv)) | ||
794 | return -ENOMEM; | ||
795 | |||
796 | spin_lock_init(&fpriv->lock); | ||
797 | INIT_LIST_HEAD(&fpriv->channels); | ||
798 | |||
799 | if (dev_priv->card_type == NV_50) { | ||
800 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, | ||
801 | &fpriv->vm); | ||
802 | if (ret) { | ||
803 | kfree(fpriv); | ||
804 | return ret; | ||
805 | } | ||
806 | } else | ||
807 | if (dev_priv->card_type >= NV_C0) { | ||
808 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, | ||
809 | &fpriv->vm); | ||
810 | if (ret) { | ||
811 | kfree(fpriv); | ||
812 | return ret; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | file_priv->driver_priv = fpriv; | ||
817 | return 0; | ||
818 | } | ||
819 | |||
765 | /* here a client dies, release the stuff that was allocated for its | 820 | /* here a client dies, release the stuff that was allocated for its |
766 | * file_priv */ | 821 | * file_priv */ |
767 | void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) | 822 | void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) |
@@ -769,6 +824,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
769 | nouveau_channel_cleanup(dev, file_priv); | 824 | nouveau_channel_cleanup(dev, file_priv); |
770 | } | 825 | } |
771 | 826 | ||
827 | void | ||
828 | nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv) | ||
829 | { | ||
830 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
831 | nouveau_vm_ref(NULL, &fpriv->vm, NULL); | ||
832 | kfree(fpriv); | ||
833 | } | ||
834 | |||
772 | /* first module load, setup the mmio/fb mapping */ | 835 | /* first module load, setup the mmio/fb mapping */ |
773 | /* KMS: we need mmio at load time, not when the first drm client opens. */ | 836 | /* KMS: we need mmio at load time, not when the first drm client opens. */ |
774 | int nouveau_firstopen(struct drm_device *dev) | 837 | int nouveau_firstopen(struct drm_device *dev) |
@@ -933,6 +996,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
933 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | 996 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", |
934 | dev_priv->card_type, reg0); | 997 | dev_priv->card_type, reg0); |
935 | 998 | ||
999 | /* Determine whether we'll attempt acceleration or not, some | ||
1000 | * cards are disabled by default here due to them being known | ||
1001 | * non-functional, or never been tested due to lack of hw. | ||
1002 | */ | ||
1003 | dev_priv->noaccel = !!nouveau_noaccel; | ||
1004 | if (nouveau_noaccel == -1) { | ||
1005 | switch (dev_priv->chipset) { | ||
1006 | case 0xc1: /* known broken */ | ||
1007 | case 0xc8: /* never tested */ | ||
1008 | NV_INFO(dev, "acceleration disabled by default, pass " | ||
1009 | "noaccel=0 to force enable\n"); | ||
1010 | dev_priv->noaccel = true; | ||
1011 | break; | ||
1012 | default: | ||
1013 | dev_priv->noaccel = false; | ||
1014 | break; | ||
1015 | } | ||
1016 | } | ||
1017 | |||
936 | ret = nouveau_remove_conflicting_drivers(dev); | 1018 | ret = nouveau_remove_conflicting_drivers(dev); |
937 | if (ret) | 1019 | if (ret) |
938 | goto err_mmio; | 1020 | goto err_mmio; |
@@ -997,11 +1079,7 @@ void nouveau_lastclose(struct drm_device *dev) | |||
997 | int nouveau_unload(struct drm_device *dev) | 1079 | int nouveau_unload(struct drm_device *dev) |
998 | { | 1080 | { |
999 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1081 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1000 | struct nouveau_engine *engine = &dev_priv->engine; | ||
1001 | 1082 | ||
1002 | drm_kms_helper_poll_fini(dev); | ||
1003 | nouveau_fbcon_fini(dev); | ||
1004 | engine->display.destroy(dev); | ||
1005 | nouveau_card_takedown(dev); | 1083 | nouveau_card_takedown(dev); |
1006 | 1084 | ||
1007 | iounmap(dev_priv->mmio); | 1085 | iounmap(dev_priv->mmio); |
@@ -1031,7 +1109,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1031 | case NOUVEAU_GETPARAM_BUS_TYPE: | 1109 | case NOUVEAU_GETPARAM_BUS_TYPE: |
1032 | if (drm_pci_device_is_agp(dev)) | 1110 | if (drm_pci_device_is_agp(dev)) |
1033 | getparam->value = NV_AGP; | 1111 | getparam->value = NV_AGP; |
1034 | else if (drm_pci_device_is_pcie(dev)) | 1112 | else if (pci_is_pcie(dev->pdev)) |
1035 | getparam->value = NV_PCIE; | 1113 | getparam->value = NV_PCIE; |
1036 | else | 1114 | else |
1037 | getparam->value = NV_PCI; | 1115 | getparam->value = NV_PCI; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 649b0413b09..081ca7b03e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c | |||
@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) | |||
43 | 43 | ||
44 | /* Set the default sensor's contants */ | 44 | /* Set the default sensor's contants */ |
45 | sensor->offset_constant = 0; | 45 | sensor->offset_constant = 0; |
46 | sensor->offset_mult = 1; | 46 | sensor->offset_mult = 0; |
47 | sensor->offset_div = 1; | 47 | sensor->offset_div = 1; |
48 | sensor->slope_mult = 1; | 48 | sensor->slope_mult = 1; |
49 | sensor->slope_div = 1; | 49 | sensor->slope_div = 1; |
@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) | |||
99 | sensor->slope_mult = 431; | 99 | sensor->slope_mult = 431; |
100 | sensor->slope_div = 10000; | 100 | sensor->slope_div = 10000; |
101 | break; | 101 | break; |
102 | |||
103 | case 0x67: | ||
104 | sensor->offset_mult = -26149; | ||
105 | sensor->offset_div = 100; | ||
106 | sensor->slope_mult = 484; | ||
107 | sensor->slope_div = 10000; | ||
108 | break; | ||
102 | } | 109 | } |
103 | } | 110 | } |
104 | 111 | ||
@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) | |||
109 | 116 | ||
110 | /* Read the entries from the table */ | 117 | /* Read the entries from the table */ |
111 | for (i = 0; i < entries; i++) { | 118 | for (i = 0; i < entries; i++) { |
112 | u16 value = ROM16(temp[1]); | 119 | s16 value = ROM16(temp[1]); |
113 | 120 | ||
114 | switch (temp[0]) { | 121 | switch (temp[0]) { |
115 | case 0x01: | 122 | case 0x01: |
@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev) | |||
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 167 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
161 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 168 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
162 | struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; | 169 | struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; |
163 | u32 offset = sensor->offset_mult / sensor->offset_div; | 170 | s32 offset = sensor->offset_mult / sensor->offset_div; |
164 | u32 sensor_calibration; | 171 | s32 sensor_calibration; |
165 | 172 | ||
166 | /* set up the sensors */ | 173 | /* set up the sensors */ |
167 | sensor_calibration = 120 - offset - sensor->offset_constant; | 174 | sensor_calibration = 120 - offset - sensor->offset_constant; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 519a6b4bba4..244fd38fdb8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | |||
369 | } | 369 | } |
370 | 370 | ||
371 | static void | 371 | static void |
372 | nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | 372 | nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) |
373 | { | 373 | { |
374 | struct nouveau_vm_pgd *vpgd, *tmp; | 374 | struct nouveau_vm_pgd *vpgd, *tmp; |
375 | struct nouveau_gpuobj *pgd = NULL; | ||
375 | 376 | ||
376 | if (!pgd) | 377 | if (!mpgd) |
377 | return; | 378 | return; |
378 | 379 | ||
379 | mutex_lock(&vm->mm->mutex); | 380 | mutex_lock(&vm->mm->mutex); |
380 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
381 | if (vpgd->obj != pgd) | 382 | if (vpgd->obj == mpgd) { |
382 | continue; | 383 | pgd = vpgd->obj; |
383 | 384 | list_del(&vpgd->head); | |
384 | list_del(&vpgd->head); | 385 | kfree(vpgd); |
385 | nouveau_gpuobj_ref(NULL, &vpgd->obj); | 386 | break; |
386 | kfree(vpgd); | 387 | } |
387 | } | 388 | } |
388 | mutex_unlock(&vm->mm->mutex); | 389 | mutex_unlock(&vm->mm->mutex); |
390 | |||
391 | nouveau_gpuobj_ref(NULL, &pgd); | ||
389 | } | 392 | } |
390 | 393 | ||
391 | static void | 394 | static void |
@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm) | |||
396 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 399 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
397 | nouveau_vm_unlink(vm, vpgd->obj); | 400 | nouveau_vm_unlink(vm, vpgd->obj); |
398 | } | 401 | } |
399 | WARN_ON(nouveau_mm_fini(&vm->mm) != 0); | ||
400 | 402 | ||
403 | nouveau_mm_fini(&vm->mm); | ||
401 | kfree(vm->pgt); | 404 | kfree(vm->pgt); |
402 | kfree(vm); | 405 | kfree(vm); |
403 | } | 406 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index c48a9fc2b47..579ca8cc223 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -41,6 +41,8 @@ struct nouveau_vm_pgd { | |||
41 | }; | 41 | }; |
42 | 42 | ||
43 | struct nouveau_vma { | 43 | struct nouveau_vma { |
44 | struct list_head head; | ||
45 | int refcount; | ||
44 | struct nouveau_vm *vm; | 46 | struct nouveau_vm *vm; |
45 | struct nouveau_mm_node *node; | 47 | struct nouveau_mm_node *node; |
46 | u64 offset; | 48 | u64 offset; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index f1a3ae49199..5e45398a9e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
781 | struct drm_device *dev = crtc->dev; | 781 | struct drm_device *dev = crtc->dev; |
782 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 782 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
783 | struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; | 783 | struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; |
784 | struct drm_framebuffer *drm_fb = nv_crtc->base.fb; | 784 | struct drm_framebuffer *drm_fb; |
785 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 785 | struct nouveau_framebuffer *fb; |
786 | int arb_burst, arb_lwm; | 786 | int arb_burst, arb_lwm; |
787 | int ret; | 787 | int ret; |
788 | 788 | ||
789 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | ||
790 | |||
791 | /* no fb bound */ | ||
792 | if (!atomic && !crtc->fb) { | ||
793 | NV_DEBUG_KMS(dev, "No FB bound\n"); | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | |||
789 | /* If atomic, we want to switch to the fb we were passed, so | 798 | /* If atomic, we want to switch to the fb we were passed, so |
790 | * now we update pointers to do that. (We don't pin; just | 799 | * now we update pointers to do that. (We don't pin; just |
791 | * assume we're already pinned and update the base address.) | 800 | * assume we're already pinned and update the base address.) |
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
794 | drm_fb = passed_fb; | 803 | drm_fb = passed_fb; |
795 | fb = nouveau_framebuffer(passed_fb); | 804 | fb = nouveau_framebuffer(passed_fb); |
796 | } else { | 805 | } else { |
806 | drm_fb = crtc->fb; | ||
807 | fb = nouveau_framebuffer(crtc->fb); | ||
797 | /* If not atomic, we can go ahead and pin, and unpin the | 808 | /* If not atomic, we can go ahead and pin, and unpin the |
798 | * old fb we were passed. | 809 | * old fb we were passed. |
799 | */ | 810 | */ |
@@ -1035,7 +1046,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) | |||
1035 | drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); | 1046 | drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); |
1036 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 1047 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
1037 | 1048 | ||
1038 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 1049 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, |
1039 | 0, 0x0000, &nv_crtc->cursor.nvbo); | 1050 | 0, 0x0000, &nv_crtc->cursor.nvbo); |
1040 | if (!ret) { | 1051 | if (!ret) { |
1041 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | 1052 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); |
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 3626ee7db3b..dbdea8ed392 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine) | |||
450 | unsigned long flags; | 450 | unsigned long flags; |
451 | 451 | ||
452 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 452 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
453 | nv04_graph_fifo_access(dev, false); | 453 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); |
454 | 454 | ||
455 | /* Unload the context if it's the currently active one */ | 455 | /* Unload the context if it's the currently active one */ |
456 | if (nv04_graph_channel(dev) == chan) | 456 | if (nv04_graph_channel(dev) == chan) |
457 | nv04_graph_unload_context(dev); | 457 | nv04_graph_unload_context(dev); |
458 | 458 | ||
459 | nv04_graph_fifo_access(dev, true); | 459 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); |
460 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 460 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
461 | 461 | ||
462 | /* Free the context resources */ | 462 | /* Free the context resources */ |
@@ -538,24 +538,18 @@ nv04_graph_init(struct drm_device *dev, int engine) | |||
538 | } | 538 | } |
539 | 539 | ||
540 | static int | 540 | static int |
541 | nv04_graph_fini(struct drm_device *dev, int engine) | 541 | nv04_graph_fini(struct drm_device *dev, int engine, bool suspend) |
542 | { | 542 | { |
543 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
544 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
545 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
546 | return -EBUSY; | ||
547 | } | ||
543 | nv04_graph_unload_context(dev); | 548 | nv04_graph_unload_context(dev); |
544 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | 549 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); |
545 | return 0; | 550 | return 0; |
546 | } | 551 | } |
547 | 552 | ||
548 | void | ||
549 | nv04_graph_fifo_access(struct drm_device *dev, bool enabled) | ||
550 | { | ||
551 | if (enabled) | ||
552 | nv_wr32(dev, NV04_PGRAPH_FIFO, | ||
553 | nv_rd32(dev, NV04_PGRAPH_FIFO) | 1); | ||
554 | else | ||
555 | nv_wr32(dev, NV04_PGRAPH_FIFO, | ||
556 | nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1); | ||
557 | } | ||
558 | |||
559 | static int | 553 | static int |
560 | nv04_graph_mthd_set_ref(struct nouveau_channel *chan, | 554 | nv04_graph_mthd_set_ref(struct nouveau_channel *chan, |
561 | u32 class, u32 mthd, u32 data) | 555 | u32 class, u32 mthd, u32 data) |
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index b8611b95531..c1248e0740a 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev) | |||
28 | /* RAMIN always available */ | 28 | /* RAMIN always available */ |
29 | dev_priv->ramin_available = true; | 29 | dev_priv->ramin_available = true; |
30 | 30 | ||
31 | /* Reserve space at end of VRAM for PRAMIN */ | ||
32 | if (dev_priv->card_type >= NV_40) { | ||
33 | u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8); | ||
34 | u32 rsvd; | ||
35 | |||
36 | /* estimate grctx size, the magics come from nv40_grctx.c */ | ||
37 | if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs; | ||
38 | else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs; | ||
39 | else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; | ||
40 | else rsvd = 0x4a40 * vs; | ||
41 | rsvd += 16 * 1024; | ||
42 | rsvd *= dev_priv->engine.fifo.channels; | ||
43 | |||
44 | /* pciegart table */ | ||
45 | if (pci_is_pcie(dev->pdev)) | ||
46 | rsvd += 512 * 1024; | ||
47 | |||
48 | /* object storage */ | ||
49 | rsvd += 512 * 1024; | ||
50 | |||
51 | dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); | ||
52 | } else { | ||
53 | dev_priv->ramin_rsvd_vram = 512 * 1024; | ||
54 | } | ||
55 | |||
31 | /* Setup shared RAMHT */ | 56 | /* Setup shared RAMHT */ |
32 | ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, | 57 | ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, |
33 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | 58 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); |
@@ -112,7 +137,8 @@ nv04_instmem_resume(struct drm_device *dev) | |||
112 | } | 137 | } |
113 | 138 | ||
114 | int | 139 | int |
115 | nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) | 140 | nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, |
141 | u32 size, u32 align) | ||
116 | { | 142 | { |
117 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | 143 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
118 | struct drm_mm_node *ramin = NULL; | 144 | struct drm_mm_node *ramin = NULL; |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 0930c6cb88e..7255e4a4d3f 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan, | |||
708 | 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); | 708 | 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); |
709 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); | 709 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); |
710 | nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); | 710 | nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); |
711 | nv04_graph_fifo_access(dev, true); | 711 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); |
712 | nv04_graph_fifo_access(dev, false); | 712 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); |
713 | 713 | ||
714 | /* Restore the FIFO state */ | 714 | /* Restore the FIFO state */ |
715 | for (i = 0; i < ARRAY_SIZE(fifo); i++) | 715 | for (i = 0; i < ARRAY_SIZE(fifo); i++) |
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine) | |||
879 | unsigned long flags; | 879 | unsigned long flags; |
880 | 880 | ||
881 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 881 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
882 | nv04_graph_fifo_access(dev, false); | 882 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); |
883 | 883 | ||
884 | /* Unload the context if it's the currently active one */ | 884 | /* Unload the context if it's the currently active one */ |
885 | if (nv10_graph_channel(dev) == chan) | 885 | if (nv10_graph_channel(dev) == chan) |
886 | nv10_graph_unload_context(dev); | 886 | nv10_graph_unload_context(dev); |
887 | 887 | ||
888 | nv04_graph_fifo_access(dev, true); | 888 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); |
889 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 889 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
890 | 890 | ||
891 | /* Free the context resources */ | 891 | /* Free the context resources */ |
@@ -957,8 +957,13 @@ nv10_graph_init(struct drm_device *dev, int engine) | |||
957 | } | 957 | } |
958 | 958 | ||
959 | static int | 959 | static int |
960 | nv10_graph_fini(struct drm_device *dev, int engine) | 960 | nv10_graph_fini(struct drm_device *dev, int engine, bool suspend) |
961 | { | 961 | { |
962 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
963 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
964 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
965 | return -EBUSY; | ||
966 | } | ||
962 | nv10_graph_unload_context(dev); | 967 | nv10_graph_unload_context(dev); |
963 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | 968 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); |
964 | return 0; | 969 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index affc7d7dd02..183e37512ef 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine) | |||
454 | unsigned long flags; | 454 | unsigned long flags; |
455 | 455 | ||
456 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 456 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
457 | nv04_graph_fifo_access(dev, false); | 457 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); |
458 | 458 | ||
459 | /* Unload the context if it's the currently active one */ | 459 | /* Unload the context if it's the currently active one */ |
460 | if (nv10_graph_channel(dev) == chan) | 460 | if (nv10_graph_channel(dev) == chan) |
461 | nv20_graph_unload_context(dev); | 461 | nv20_graph_unload_context(dev); |
462 | 462 | ||
463 | nv04_graph_fifo_access(dev, true); | 463 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); |
464 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 464 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
465 | 465 | ||
466 | /* Free the context resources */ | 466 | /* Free the context resources */ |
@@ -654,8 +654,13 @@ nv30_graph_init(struct drm_device *dev, int engine) | |||
654 | } | 654 | } |
655 | 655 | ||
656 | int | 656 | int |
657 | nv20_graph_fini(struct drm_device *dev, int engine) | 657 | nv20_graph_fini(struct drm_device *dev, int engine, bool suspend) |
658 | { | 658 | { |
659 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); | ||
660 | if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { | ||
661 | nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); | ||
662 | return -EBUSY; | ||
663 | } | ||
659 | nv20_graph_unload_context(dev); | 664 | nv20_graph_unload_context(dev); |
660 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | 665 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); |
661 | return 0; | 666 | return 0; |
@@ -753,6 +758,7 @@ nv20_graph_create(struct drm_device *dev) | |||
753 | break; | 758 | break; |
754 | default: | 759 | default: |
755 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); | 760 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); |
761 | kfree(pgraph); | ||
756 | return 0; | 762 | return 0; |
757 | } | 763 | } |
758 | } else { | 764 | } else { |
@@ -774,6 +780,7 @@ nv20_graph_create(struct drm_device *dev) | |||
774 | break; | 780 | break; |
775 | default: | 781 | default: |
776 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); | 782 | NV_ERROR(dev, "PGRAPH: unknown chipset\n"); |
783 | kfree(pgraph); | ||
777 | return 0; | 784 | return 0; |
778 | } | 785 | } |
779 | } | 786 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 5beb01b8ace..ba14a93d8af 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -35,89 +35,6 @@ struct nv40_graph_engine { | |||
35 | u32 grctx_size; | 35 | u32 grctx_size; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static struct nouveau_channel * | ||
39 | nv40_graph_channel(struct drm_device *dev) | ||
40 | { | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct nouveau_gpuobj *grctx; | ||
43 | uint32_t inst; | ||
44 | int i; | ||
45 | |||
46 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | ||
47 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | ||
48 | return NULL; | ||
49 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | ||
50 | |||
51 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
52 | if (!dev_priv->channels.ptr[i]) | ||
53 | continue; | ||
54 | |||
55 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | ||
56 | if (grctx && grctx->pinst == inst) | ||
57 | return dev_priv->channels.ptr[i]; | ||
58 | } | ||
59 | |||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | static int | ||
64 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | ||
65 | { | ||
66 | uint32_t old_cp, tv = 1000, tmp; | ||
67 | int i; | ||
68 | |||
69 | old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER); | ||
70 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | ||
71 | |||
72 | tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310); | ||
73 | tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : | ||
74 | NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; | ||
75 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp); | ||
76 | |||
77 | tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304); | ||
78 | tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; | ||
79 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp); | ||
80 | |||
81 | nouveau_wait_for_idle(dev); | ||
82 | |||
83 | for (i = 0; i < tv; i++) { | ||
84 | if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0) | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); | ||
89 | |||
90 | if (i == tv) { | ||
91 | uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT); | ||
92 | NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save); | ||
93 | NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n", | ||
94 | ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, | ||
95 | ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); | ||
96 | NV_ERROR(dev, "0x40030C = 0x%08x\n", | ||
97 | nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C)); | ||
98 | return -EBUSY; | ||
99 | } | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int | ||
105 | nv40_graph_unload_context(struct drm_device *dev) | ||
106 | { | ||
107 | uint32_t inst; | ||
108 | int ret; | ||
109 | |||
110 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | ||
111 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | ||
112 | return 0; | ||
113 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
114 | |||
115 | ret = nv40_graph_transfer_context(dev, inst, 1); | ||
116 | |||
117 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | static int | 38 | static int |
122 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) | 39 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) |
123 | { | 40 | { |
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine) | |||
163 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; | 80 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; |
164 | struct drm_device *dev = chan->dev; | 81 | struct drm_device *dev = chan->dev; |
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 82 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
83 | u32 inst = 0x01000000 | (grctx->pinst >> 4); | ||
166 | unsigned long flags; | 84 | unsigned long flags; |
167 | 85 | ||
168 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 86 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
169 | nv04_graph_fifo_access(dev, false); | 87 | nv_mask(dev, 0x400720, 0x00000000, 0x00000001); |
170 | 88 | if (nv_rd32(dev, 0x40032c) == inst) | |
171 | /* Unload the context if it's the currently active one */ | 89 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); |
172 | if (nv40_graph_channel(dev) == chan) | 90 | if (nv_rd32(dev, 0x400330) == inst) |
173 | nv40_graph_unload_context(dev); | 91 | nv_mask(dev, 0x400330, 0x01000000, 0x00000000); |
174 | 92 | nv_mask(dev, 0x400720, 0x00000001, 0x00000001); | |
175 | nv04_graph_fifo_access(dev, true); | ||
176 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 93 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
177 | 94 | ||
178 | /* Free the context resources */ | 95 | /* Free the context resources */ |
@@ -429,9 +346,20 @@ nv40_graph_init(struct drm_device *dev, int engine) | |||
429 | } | 346 | } |
430 | 347 | ||
431 | static int | 348 | static int |
432 | nv40_graph_fini(struct drm_device *dev, int engine) | 349 | nv40_graph_fini(struct drm_device *dev, int engine, bool suspend) |
433 | { | 350 | { |
434 | nv40_graph_unload_context(dev); | 351 | u32 inst = nv_rd32(dev, 0x40032c); |
352 | if (inst & 0x01000000) { | ||
353 | nv_wr32(dev, 0x400720, 0x00000000); | ||
354 | nv_wr32(dev, 0x400784, inst); | ||
355 | nv_mask(dev, 0x400310, 0x00000020, 0x00000020); | ||
356 | nv_mask(dev, 0x400304, 0x00000001, 0x00000001); | ||
357 | if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) { | ||
358 | u32 insn = nv_rd32(dev, 0x400308); | ||
359 | NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn); | ||
360 | } | ||
361 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); | ||
362 | } | ||
435 | return 0; | 363 | return 0; |
436 | } | 364 | } |
437 | 365 | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c index 6d2af292a2e..ad03a0e1fc7 100644 --- a/drivers/gpu/drm/nouveau/nv40_mpeg.c +++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c | |||
@@ -137,7 +137,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | static int | 139 | static int |
140 | nv40_mpeg_fini(struct drm_device *dev, int engine) | 140 | nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend) |
141 | { | 141 | { |
142 | /*XXX: context save? */ | 142 | /*XXX: context save? */ |
143 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | 143 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index ebabacf38da..5d989073ba6 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) | |||
104 | OUT_RING(evo, nv_crtc->lut.depth == 8 ? | 104 | OUT_RING(evo, nv_crtc->lut.depth == 8 ? |
105 | NV50_EVO_CRTC_CLUT_MODE_OFF : | 105 | NV50_EVO_CRTC_CLUT_MODE_OFF : |
106 | NV50_EVO_CRTC_CLUT_MODE_ON); | 106 | NV50_EVO_CRTC_CLUT_MODE_ON); |
107 | OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); | 107 | OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); |
108 | if (dev_priv->chipset != 0x50) { | 108 | if (dev_priv->chipset != 0x50) { |
109 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); | 109 | BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); |
110 | OUT_RING(evo, NvEvoVRAM); | 110 | OUT_RING(evo, NvEvoVRAM); |
@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
372 | 372 | ||
373 | nouveau_bo_unmap(cursor); | 373 | nouveau_bo_unmap(cursor); |
374 | 374 | ||
375 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); | 375 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); |
376 | nv_crtc->cursor.show(nv_crtc, true); | 376 | nv_crtc->cursor.show(nv_crtc, true); |
377 | 377 | ||
378 | out: | 378 | out: |
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
519 | struct drm_device *dev = nv_crtc->base.dev; | 519 | struct drm_device *dev = nv_crtc->base.dev; |
520 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 520 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
521 | struct nouveau_channel *evo = nv50_display(dev)->master; | 521 | struct nouveau_channel *evo = nv50_display(dev)->master; |
522 | struct drm_framebuffer *drm_fb = nv_crtc->base.fb; | 522 | struct drm_framebuffer *drm_fb; |
523 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 523 | struct nouveau_framebuffer *fb; |
524 | int ret; | 524 | int ret; |
525 | 525 | ||
526 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 526 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
527 | 527 | ||
528 | /* no fb bound */ | ||
529 | if (!atomic && !crtc->fb) { | ||
530 | NV_DEBUG_KMS(dev, "No FB bound\n"); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
528 | /* If atomic, we want to switch to the fb we were passed, so | 534 | /* If atomic, we want to switch to the fb we were passed, so |
529 | * now we update pointers to do that. (We don't pin; just | 535 | * now we update pointers to do that. (We don't pin; just |
530 | * assume we're already pinned and update the base address.) | 536 | * assume we're already pinned and update the base address.) |
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
533 | drm_fb = passed_fb; | 539 | drm_fb = passed_fb; |
534 | fb = nouveau_framebuffer(passed_fb); | 540 | fb = nouveau_framebuffer(passed_fb); |
535 | } else { | 541 | } else { |
542 | drm_fb = crtc->fb; | ||
543 | fb = nouveau_framebuffer(crtc->fb); | ||
536 | /* If not atomic, we can go ahead and pin, and unpin the | 544 | /* If not atomic, we can go ahead and pin, and unpin the |
537 | * old fb we were passed. | 545 | * old fb we were passed. |
538 | */ | 546 | */ |
@@ -546,7 +554,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
546 | } | 554 | } |
547 | } | 555 | } |
548 | 556 | ||
549 | nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; | 557 | nv_crtc->fb.offset = fb->nvbo->bo.offset; |
550 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); | 558 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); |
551 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | 559 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; |
552 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | 560 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { |
@@ -747,7 +755,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
747 | } | 755 | } |
748 | nv_crtc->lut.depth = 0; | 756 | nv_crtc->lut.depth = 0; |
749 | 757 | ||
750 | ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, | 758 | ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, |
751 | 0, 0x0000, &nv_crtc->lut.nvbo); | 759 | 0, 0x0000, &nv_crtc->lut.nvbo); |
752 | if (!ret) { | 760 | if (!ret) { |
753 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); | 761 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); |
@@ -773,7 +781,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
773 | drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); | 781 | drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); |
774 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 782 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
775 | 783 | ||
776 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 784 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, |
777 | 0, 0x0000, &nv_crtc->cursor.nvbo); | 785 | 0, 0x0000, &nv_crtc->cursor.nvbo); |
778 | if (!ret) { | 786 | if (!ret) { |
779 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | 787 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 08da478ba54..db1a5f4b711 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
415 | 415 | ||
416 | /* synchronise with the rendering channel, if necessary */ | 416 | /* synchronise with the rendering channel, if necessary */ |
417 | if (likely(chan)) { | 417 | if (likely(chan)) { |
418 | u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset; | ||
419 | |||
420 | ret = RING_SPACE(chan, 10); | 418 | ret = RING_SPACE(chan, 10); |
421 | if (ret) { | 419 | if (ret) { |
422 | WIND_RING(evo); | 420 | WIND_RING(evo); |
@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
438 | else | 436 | else |
439 | OUT_RING (chan, chan->vram_handle); | 437 | OUT_RING (chan, chan->vram_handle); |
440 | } else { | 438 | } else { |
439 | u64 offset = chan->dispc_vma[nv_crtc->index].offset; | ||
440 | offset += dispc->sem.offset; | ||
441 | BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); | 441 | BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); |
442 | OUT_RING (chan, upper_32_bits(offset)); | 442 | OUT_RING (chan, upper_32_bits(offset)); |
443 | OUT_RING (chan, lower_32_bits(offset)); | 443 | OUT_RING (chan, lower_32_bits(offset)); |
@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
484 | OUT_RING (evo, 0x00000000); | 484 | OUT_RING (evo, 0x00000000); |
485 | OUT_RING (evo, 0x00000000); | 485 | OUT_RING (evo, 0x00000000); |
486 | BEGIN_RING(evo, 0, 0x0800, 5); | 486 | BEGIN_RING(evo, 0, 0x0800, 5); |
487 | OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8); | 487 | OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); |
488 | OUT_RING (evo, 0); | 488 | OUT_RING (evo, 0); |
489 | OUT_RING (evo, (fb->height << 16) | fb->width); | 489 | OUT_RING (evo, (fb->height << 16) | fb->width); |
490 | OUT_RING (evo, nv_fb->r_pitch); | 490 | OUT_RING (evo, nv_fb->r_pitch); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index c8e83c1a4de..c99d9751880 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo) | |||
38 | return; | 38 | return; |
39 | *pevo = NULL; | 39 | *pevo = NULL; |
40 | 40 | ||
41 | nouveau_ramht_ref(NULL, &evo->ramht, evo); | ||
41 | nouveau_gpuobj_channel_takedown(evo); | 42 | nouveau_gpuobj_channel_takedown(evo); |
42 | nouveau_bo_unmap(evo->pushbuf_bo); | 43 | nouveau_bo_unmap(evo->pushbuf_bo); |
43 | nouveau_bo_ref(NULL, &evo->pushbuf_bo); | 44 | nouveau_bo_ref(NULL, &evo->pushbuf_bo); |
@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid, | |||
116 | evo->user_get = 4; | 117 | evo->user_get = 4; |
117 | evo->user_put = 0; | 118 | evo->user_put = 0; |
118 | 119 | ||
119 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, | 120 | ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, |
120 | &evo->pushbuf_bo); | 121 | &evo->pushbuf_bo); |
121 | if (ret == 0) | 122 | if (ret == 0) |
122 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); | 123 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); |
@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) | |||
153 | { | 154 | { |
154 | struct drm_device *dev = evo->dev; | 155 | struct drm_device *dev = evo->dev; |
155 | int id = evo->id, ret, i; | 156 | int id = evo->id, ret, i; |
156 | u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; | 157 | u64 pushbuf = evo->pushbuf_bo->bo.offset; |
157 | u32 tmp; | 158 | u32 tmp; |
158 | 159 | ||
159 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); | 160 | tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); |
@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev) | |||
331 | if (ret) | 332 | if (ret) |
332 | goto err; | 333 | goto err; |
333 | 334 | ||
334 | ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 335 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
335 | 0, 0x0000, &dispc->sem.bo); | 336 | 0, 0x0000, &dispc->sem.bo); |
336 | if (!ret) { | 337 | if (!ret) { |
337 | offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; | ||
338 | |||
339 | ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); | 338 | ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); |
340 | if (!ret) | 339 | if (!ret) |
341 | ret = nouveau_bo_map(dispc->sem.bo); | 340 | ret = nouveau_bo_map(dispc->sem.bo); |
342 | if (ret) | 341 | if (ret) |
343 | nouveau_bo_ref(NULL, &dispc->sem.bo); | 342 | nouveau_bo_ref(NULL, &dispc->sem.bo); |
343 | offset = dispc->sem.bo->bo.offset; | ||
344 | } | 344 | } |
345 | 345 | ||
346 | if (ret) | 346 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 791ded1c5c6..dc75a720652 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
159 | struct drm_device *dev = nfbdev->dev; | 159 | struct drm_device *dev = nfbdev->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 160 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
161 | struct nouveau_channel *chan = dev_priv->channel; | 161 | struct nouveau_channel *chan = dev_priv->channel; |
162 | struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; | 162 | struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; |
163 | int ret, format; | 163 | int ret, format; |
164 | 164 | ||
165 | switch (info->var.bits_per_pixel) { | 165 | switch (info->var.bits_per_pixel) { |
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
247 | OUT_RING(chan, info->fix.line_length); | 247 | OUT_RING(chan, info->fix.line_length); |
248 | OUT_RING(chan, info->var.xres_virtual); | 248 | OUT_RING(chan, info->var.xres_virtual); |
249 | OUT_RING(chan, info->var.yres_virtual); | 249 | OUT_RING(chan, info->var.yres_virtual); |
250 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); | 250 | OUT_RING(chan, upper_32_bits(fb->vma.offset)); |
251 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); | 251 | OUT_RING(chan, lower_32_bits(fb->vma.offset)); |
252 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); | 252 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); |
253 | OUT_RING(chan, format); | 253 | OUT_RING(chan, format); |
254 | OUT_RING(chan, 1); | 254 | OUT_RING(chan, 1); |
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
256 | OUT_RING(chan, info->fix.line_length); | 256 | OUT_RING(chan, info->fix.line_length); |
257 | OUT_RING(chan, info->var.xres_virtual); | 257 | OUT_RING(chan, info->var.xres_virtual); |
258 | OUT_RING(chan, info->var.yres_virtual); | 258 | OUT_RING(chan, info->var.yres_virtual); |
259 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); | 259 | OUT_RING(chan, upper_32_bits(fb->vma.offset)); |
260 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); | 260 | OUT_RING(chan, lower_32_bits(fb->vma.offset)); |
261 | 261 | ||
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index e25cbb46789..d43c46caa76 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include "nouveau_grctx.h" | 31 | #include "nouveau_grctx.h" |
32 | #include "nouveau_dma.h" | 32 | #include "nouveau_dma.h" |
33 | #include "nouveau_vm.h" | 33 | #include "nouveau_vm.h" |
34 | #include "nouveau_ramht.h" | ||
35 | #include "nv50_evo.h" | 34 | #include "nv50_evo.h" |
36 | 35 | ||
37 | struct nv50_graph_engine { | 36 | struct nv50_graph_engine { |
@@ -125,7 +124,6 @@ static void | |||
125 | nv50_graph_init_reset(struct drm_device *dev) | 124 | nv50_graph_init_reset(struct drm_device *dev) |
126 | { | 125 | { |
127 | uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); | 126 | uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); |
128 | |||
129 | NV_DEBUG(dev, "\n"); | 127 | NV_DEBUG(dev, "\n"); |
130 | 128 | ||
131 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | 129 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); |
@@ -255,9 +253,13 @@ nv50_graph_init(struct drm_device *dev, int engine) | |||
255 | } | 253 | } |
256 | 254 | ||
257 | static int | 255 | static int |
258 | nv50_graph_fini(struct drm_device *dev, int engine) | 256 | nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) |
259 | { | 257 | { |
260 | NV_DEBUG(dev, "\n"); | 258 | nv_mask(dev, 0x400500, 0x00010001, 0x00000000); |
259 | if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) { | ||
260 | nv_mask(dev, 0x400500, 0x00010001, 0x00010001); | ||
261 | return -EBUSY; | ||
262 | } | ||
261 | nv50_graph_unload_context(dev); | 263 | nv50_graph_unload_context(dev); |
262 | nv_wr32(dev, 0x40013c, 0x00000000); | 264 | nv_wr32(dev, 0x40013c, 0x00000000); |
263 | return 0; | 265 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 4f95a1e5822..a7c12c94a5a 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node { | |||
305 | u32 align; | 305 | u32 align; |
306 | }; | 306 | }; |
307 | 307 | ||
308 | |||
309 | int | 308 | int |
310 | nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) | 309 | nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, |
310 | u32 size, u32 align) | ||
311 | { | 311 | { |
312 | struct drm_device *dev = gpuobj->dev; | 312 | struct drm_device *dev = gpuobj->dev; |
313 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 313 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) | |||
336 | if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) | 336 | if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) |
337 | flags |= NV_MEM_ACCESS_SYS; | 337 | flags |= NV_MEM_ACCESS_SYS; |
338 | 338 | ||
339 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, | 339 | ret = nouveau_vm_get(chan->vm, size, 12, flags, |
340 | &node->chan_vma); | 340 | &node->chan_vma); |
341 | if (ret) { | 341 | if (ret) { |
342 | vram->put(dev, &node->vram); | 342 | vram->put(dev, &node->vram); |
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) | |||
345 | } | 345 | } |
346 | 346 | ||
347 | nouveau_vm_map(&node->chan_vma, node->vram); | 347 | nouveau_vm_map(&node->chan_vma, node->vram); |
348 | gpuobj->vinst = node->chan_vma.offset; | 348 | gpuobj->linst = node->chan_vma.offset; |
349 | } | 349 | } |
350 | 350 | ||
351 | gpuobj->size = size; | 351 | gpuobj->size = size; |
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c index 1dc5913f78c..b57a2d180ad 100644 --- a/drivers/gpu/drm/nouveau/nv50_mpeg.c +++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c | |||
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | static int | 162 | static int |
163 | nv50_mpeg_fini(struct drm_device *dev, int engine) | 163 | nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend) |
164 | { | 164 | { |
165 | /*XXX: context save for s/r */ | 165 | /*XXX: context save for s/r */ |
166 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | 166 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index c25c5938642..ffe8b483b7b 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -318,6 +318,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) | |||
318 | uint32_t tmp; | 318 | uint32_t tmp; |
319 | 319 | ||
320 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); | 320 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); |
321 | if (!tmp) | ||
322 | tmp = nv_rd32(dev, 0x610798 + (or * 8)); | ||
321 | 323 | ||
322 | switch ((tmp & 0x00000f00) >> 8) { | 324 | switch ((tmp & 0x00000f00) >> 8) { |
323 | case 8: | 325 | case 8: |
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 1a0dd491a0e..40b84f22d81 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
156 | pinstmem->flush(vm->dev); | 156 | pinstmem->flush(vm->dev); |
157 | 157 | ||
158 | /* BAR */ | 158 | /* BAR */ |
159 | if (vm != dev_priv->chan_vm) { | 159 | if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { |
160 | nv50_vm_flush_engine(vm->dev, 6); | 160 | nv50_vm_flush_engine(vm->dev, 6); |
161 | return; | 161 | return; |
162 | } | 162 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index ffbc3d8cf5b..af32daecd1e 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -51,9 +51,7 @@ void | |||
51 | nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) | 51 | nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) |
52 | { | 52 | { |
53 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 53 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
54 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | 54 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; |
55 | struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; | ||
56 | struct nouveau_mm *mm = man->priv; | ||
57 | struct nouveau_mm_node *this; | 55 | struct nouveau_mm_node *this; |
58 | struct nouveau_mem *mem; | 56 | struct nouveau_mem *mem; |
59 | 57 | ||
@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, | |||
84 | u32 memtype, struct nouveau_mem **pmem) | 82 | u32 memtype, struct nouveau_mem **pmem) |
85 | { | 83 | { |
86 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 84 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
87 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | 85 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; |
88 | struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; | ||
89 | struct nouveau_mm *mm = man->priv; | ||
90 | struct nouveau_mm_node *r; | 86 | struct nouveau_mm_node *r; |
91 | struct nouveau_mem *mem; | 87 | struct nouveau_mem *mem; |
92 | int comp = (memtype & 0x300) >> 8; | 88 | int comp = (memtype & 0x300) >> 8; |
@@ -190,22 +186,35 @@ int | |||
190 | nv50_vram_init(struct drm_device *dev) | 186 | nv50_vram_init(struct drm_device *dev) |
191 | { | 187 | { |
192 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 188 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
189 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
190 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | ||
191 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | ||
192 | u32 rblock, length; | ||
193 | 193 | ||
194 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); | 194 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); |
195 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | 195 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; |
196 | dev_priv->vram_size &= 0xffffffff00ULL; | 196 | dev_priv->vram_size &= 0xffffffff00ULL; |
197 | 197 | ||
198 | switch (dev_priv->chipset) { | 198 | /* IGPs, no funky reordering happens here, they don't have VRAM */ |
199 | case 0xaa: | 199 | if (dev_priv->chipset == 0xaa || |
200 | case 0xac: | 200 | dev_priv->chipset == 0xac || |
201 | case 0xaf: | 201 | dev_priv->chipset == 0xaf) { |
202 | dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; | 202 | dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; |
203 | dev_priv->vram_rblock_size = 4096; | 203 | rblock = 4096 >> 12; |
204 | break; | 204 | } else { |
205 | default: | 205 | rblock = nv50_vram_rblock(dev) >> 12; |
206 | dev_priv->vram_rblock_size = nv50_vram_rblock(dev); | ||
207 | break; | ||
208 | } | 206 | } |
209 | 207 | ||
210 | return 0; | 208 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; |
209 | |||
210 | return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock); | ||
211 | } | ||
212 | |||
213 | void | ||
214 | nv50_vram_fini(struct drm_device *dev) | ||
215 | { | ||
216 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
217 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
218 | |||
219 | nouveau_mm_fini(&vram->mm); | ||
211 | } | 220 | } |
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index 75b809a5174..edece9c616e 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c | |||
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev) | |||
138 | } | 138 | } |
139 | 139 | ||
140 | static int | 140 | static int |
141 | nv84_crypt_fini(struct drm_device *dev, int engine) | 141 | nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend) |
142 | { | 142 | { |
143 | nv_wr32(dev, 0x102140, 0x00000000); | 143 | nv_wr32(dev, 0x102140, 0x00000000); |
144 | return 0; | 144 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c index b86820a6122..8f356d58e40 100644 --- a/drivers/gpu/drm/nouveau/nva3_copy.c +++ b/drivers/gpu/drm/nouveau/nva3_copy.c | |||
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | static int | 142 | static int |
143 | nva3_copy_fini(struct drm_device *dev, int engine) | 143 | nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) |
144 | { | 144 | { |
145 | nv_mask(dev, 0x104048, 0x00000003, 0x00000000); | 145 | nv_mask(dev, 0x104048, 0x00000003, 0x00000000); |
146 | 146 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c index 208fa7ab3f4..dddf006f6d8 100644 --- a/drivers/gpu/drm/nouveau/nvc0_copy.c +++ b/drivers/gpu/drm/nouveau/nvc0_copy.c | |||
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine) | |||
48 | struct nouveau_gpuobj *ctx = NULL; | 48 | struct nouveau_gpuobj *ctx = NULL; |
49 | int ret; | 49 | int ret; |
50 | 50 | ||
51 | ret = nouveau_gpuobj_new(dev, NULL, 256, 256, | 51 | ret = nouveau_gpuobj_new(dev, chan, 256, 256, |
52 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | | 52 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | |
53 | NVOBJ_FLAG_ZERO_ALLOC, &ctx); | 53 | NVOBJ_FLAG_ZERO_ALLOC, &ctx); |
54 | if (ret) | 54 | if (ret) |
55 | return ret; | 55 | return ret; |
56 | 56 | ||
57 | nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); | 57 | nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst)); |
58 | nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); | 58 | nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst)); |
59 | dev_priv->engine.instmem.flush(dev); | 59 | dev_priv->engine.instmem.flush(dev); |
60 | 60 | ||
61 | chan->engctx[engine] = ctx; | 61 | chan->engctx[engine] = ctx; |
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | static int | 129 | static int |
130 | nvc0_copy_fini(struct drm_device *dev, int engine) | 130 | nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend) |
131 | { | 131 | { |
132 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); | 132 | struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); |
133 | 133 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c index 26a996025dd..08e6b118f02 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2010 Red Hat Inc. | 2 | * Copyright 2011 Red Hat Inc. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -23,16 +23,80 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | 26 | #include "drm.h" | |
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_drm.h" | ||
29 | |||
30 | struct nvc0_fb_priv { | ||
31 | struct page *r100c10_page; | ||
32 | dma_addr_t r100c10; | ||
33 | }; | ||
34 | |||
35 | static void | ||
36 | nvc0_fb_destroy(struct drm_device *dev) | ||
37 | { | ||
38 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
39 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
40 | struct nvc0_fb_priv *priv = pfb->priv; | ||
41 | |||
42 | if (priv->r100c10_page) { | ||
43 | pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, | ||
44 | PCI_DMA_BIDIRECTIONAL); | ||
45 | __free_page(priv->r100c10_page); | ||
46 | } | ||
47 | |||
48 | kfree(priv); | ||
49 | pfb->priv = NULL; | ||
50 | } | ||
51 | |||
52 | static int | ||
53 | nvc0_fb_create(struct drm_device *dev) | ||
54 | { | ||
55 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
56 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
57 | struct nvc0_fb_priv *priv; | ||
58 | |||
59 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
60 | if (!priv) | ||
61 | return -ENOMEM; | ||
62 | pfb->priv = priv; | ||
63 | |||
64 | priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
65 | if (!priv->r100c10_page) { | ||
66 | nvc0_fb_destroy(dev); | ||
67 | return -ENOMEM; | ||
68 | } | ||
69 | |||
70 | priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0, | ||
71 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
72 | if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) { | ||
73 | nvc0_fb_destroy(dev); | ||
74 | return -EFAULT; | ||
75 | } | ||
76 | |||
77 | return 0; | ||
78 | } | ||
28 | 79 | ||
29 | int | 80 | int |
30 | nvc0_fb_init(struct drm_device *dev) | 81 | nvc0_fb_init(struct drm_device *dev) |
31 | { | 82 | { |
83 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
84 | struct nvc0_fb_priv *priv; | ||
85 | int ret; | ||
86 | |||
87 | if (!dev_priv->engine.fb.priv) { | ||
88 | ret = nvc0_fb_create(dev); | ||
89 | if (ret) | ||
90 | return ret; | ||
91 | } | ||
92 | priv = dev_priv->engine.fb.priv; | ||
93 | |||
94 | nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); | ||
32 | return 0; | 95 | return 0; |
33 | } | 96 | } |
34 | 97 | ||
35 | void | 98 | void |
36 | nvc0_fb_takedown(struct drm_device *dev) | 99 | nvc0_fb_takedown(struct drm_device *dev) |
37 | { | 100 | { |
101 | nvc0_fb_destroy(dev); | ||
38 | } | 102 | } |
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index fa5d4c23438..a495e48197c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
159 | struct drm_device *dev = nfbdev->dev; | 159 | struct drm_device *dev = nfbdev->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 160 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
161 | struct nouveau_channel *chan = dev_priv->channel; | 161 | struct nouveau_channel *chan = dev_priv->channel; |
162 | struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; | 162 | struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; |
163 | int ret, format; | 163 | int ret, format; |
164 | 164 | ||
165 | ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); | 165 | ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); |
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
203 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); | 203 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); |
204 | OUT_RING (chan, 0x0000902d); | 204 | OUT_RING (chan, 0x0000902d); |
205 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); | 205 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); |
206 | OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); | 206 | OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset)); |
207 | OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); | 207 | OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset)); |
208 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); | 208 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); |
209 | OUT_RING (chan, 0); | 209 | OUT_RING (chan, 0); |
210 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); | 210 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); |
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
249 | OUT_RING (chan, info->fix.line_length); | 249 | OUT_RING (chan, info->fix.line_length); |
250 | OUT_RING (chan, info->var.xres_virtual); | 250 | OUT_RING (chan, info->var.xres_virtual); |
251 | OUT_RING (chan, info->var.yres_virtual); | 251 | OUT_RING (chan, info->var.yres_virtual); |
252 | OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); | 252 | OUT_RING (chan, upper_32_bits(fb->vma.offset)); |
253 | OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); | 253 | OUT_RING (chan, lower_32_bits(fb->vma.offset)); |
254 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); | 254 | BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); |
255 | OUT_RING (chan, format); | 255 | OUT_RING (chan, format); |
256 | OUT_RING (chan, 1); | 256 | OUT_RING (chan, 1); |
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
260 | OUT_RING (chan, info->fix.line_length); | 260 | OUT_RING (chan, info->fix.line_length); |
261 | OUT_RING (chan, info->var.xres_virtual); | 261 | OUT_RING (chan, info->var.xres_virtual); |
262 | OUT_RING (chan, info->var.yres_virtual); | 262 | OUT_RING (chan, info->var.yres_virtual); |
263 | OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); | 263 | OUT_RING (chan, upper_32_bits(fb->vma.offset)); |
264 | OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); | 264 | OUT_RING (chan, lower_32_bits(fb->vma.offset)); |
265 | FIRE_RING (chan); | 265 | FIRE_RING (chan); |
266 | 266 | ||
267 | return 0; | 267 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index fb4f5943e01..6f9f341c3e8 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev) | |||
210 | int i; | 210 | int i; |
211 | 211 | ||
212 | for (i = 0; i < 128; i++) { | 212 | for (i = 0; i < 128; i++) { |
213 | if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) | 213 | if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) |
214 | continue; | 214 | continue; |
215 | 215 | ||
216 | nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); | 216 | nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); |
217 | nv_wr32(dev, 0x002634, i); | 217 | nv_wr32(dev, 0x002634, i); |
218 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | 218 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { |
219 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | 219 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index ca6db204d64..5b2f6f42046 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -28,7 +28,34 @@ | |||
28 | 28 | ||
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_mm.h" | 30 | #include "nouveau_mm.h" |
31 | |||
31 | #include "nvc0_graph.h" | 32 | #include "nvc0_graph.h" |
33 | #include "nvc0_grhub.fuc.h" | ||
34 | #include "nvc0_grgpc.fuc.h" | ||
35 | |||
36 | static void | ||
37 | nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base) | ||
38 | { | ||
39 | NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base, | ||
40 | nv_rd32(dev, base + 0x400)); | ||
41 | NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, | ||
42 | nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804), | ||
43 | nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c)); | ||
44 | NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, | ||
45 | nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814), | ||
46 | nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c)); | ||
47 | } | ||
48 | |||
49 | static void | ||
50 | nvc0_graph_ctxctl_debug(struct drm_device *dev) | ||
51 | { | ||
52 | u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff; | ||
53 | u32 gpc; | ||
54 | |||
55 | nvc0_graph_ctxctl_debug_unit(dev, 0x409000); | ||
56 | for (gpc = 0; gpc < gpcnr; gpc++) | ||
57 | nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000)); | ||
58 | } | ||
32 | 59 | ||
33 | static int | 60 | static int |
34 | nvc0_graph_load_context(struct nouveau_channel *chan) | 61 | nvc0_graph_load_context(struct nouveau_channel *chan) |
@@ -72,24 +99,44 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) | |||
72 | if (!ctx) | 99 | if (!ctx) |
73 | return -ENOMEM; | 100 | return -ENOMEM; |
74 | 101 | ||
75 | nvc0_graph_load_context(chan); | 102 | if (!nouveau_ctxfw) { |
76 | 103 | nv_wr32(dev, 0x409840, 0x80000000); | |
77 | nv_wo32(grch->grctx, 0x1c, 1); | 104 | nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); |
78 | nv_wo32(grch->grctx, 0x20, 0); | 105 | nv_wr32(dev, 0x409504, 0x00000001); |
79 | nv_wo32(grch->grctx, 0x28, 0); | 106 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { |
80 | nv_wo32(grch->grctx, 0x2c, 0); | 107 | NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); |
81 | dev_priv->engine.instmem.flush(dev); | 108 | nvc0_graph_ctxctl_debug(dev); |
82 | 109 | ret = -EBUSY; | |
83 | ret = nvc0_grctx_generate(chan); | 110 | goto err; |
84 | if (ret) { | 111 | } |
85 | kfree(ctx); | 112 | } else { |
86 | return ret; | 113 | nvc0_graph_load_context(chan); |
114 | |||
115 | nv_wo32(grch->grctx, 0x1c, 1); | ||
116 | nv_wo32(grch->grctx, 0x20, 0); | ||
117 | nv_wo32(grch->grctx, 0x28, 0); | ||
118 | nv_wo32(grch->grctx, 0x2c, 0); | ||
119 | dev_priv->engine.instmem.flush(dev); | ||
87 | } | 120 | } |
88 | 121 | ||
89 | ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); | 122 | ret = nvc0_grctx_generate(chan); |
90 | if (ret) { | 123 | if (ret) |
91 | kfree(ctx); | 124 | goto err; |
92 | return ret; | 125 | |
126 | if (!nouveau_ctxfw) { | ||
127 | nv_wr32(dev, 0x409840, 0x80000000); | ||
128 | nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); | ||
129 | nv_wr32(dev, 0x409504, 0x00000002); | ||
130 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { | ||
131 | NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n"); | ||
132 | nvc0_graph_ctxctl_debug(dev); | ||
133 | ret = -EBUSY; | ||
134 | goto err; | ||
135 | } | ||
136 | } else { | ||
137 | ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); | ||
138 | if (ret) | ||
139 | goto err; | ||
93 | } | 140 | } |
94 | 141 | ||
95 | for (i = 0; i < priv->grctx_size; i += 4) | 142 | for (i = 0; i < priv->grctx_size; i += 4) |
@@ -97,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) | |||
97 | 144 | ||
98 | priv->grctx_vals = ctx; | 145 | priv->grctx_vals = ctx; |
99 | return 0; | 146 | return 0; |
147 | |||
148 | err: | ||
149 | kfree(ctx); | ||
150 | return ret; | ||
100 | } | 151 | } |
101 | 152 | ||
102 | static int | 153 | static int |
@@ -108,50 +159,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
108 | int i = 0, gpc, tp, ret; | 159 | int i = 0, gpc, tp, ret; |
109 | u32 magic; | 160 | u32 magic; |
110 | 161 | ||
111 | ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, | 162 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, |
112 | &grch->unk408004); | 163 | &grch->unk408004); |
113 | if (ret) | 164 | if (ret) |
114 | return ret; | 165 | return ret; |
115 | 166 | ||
116 | ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, | 167 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM, |
117 | &grch->unk40800c); | 168 | &grch->unk40800c); |
118 | if (ret) | 169 | if (ret) |
119 | return ret; | 170 | return ret; |
120 | 171 | ||
121 | ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, | 172 | ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096, |
122 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, | 173 | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, |
123 | &grch->unk418810); | 174 | &grch->unk418810); |
124 | if (ret) | 175 | if (ret) |
125 | return ret; | 176 | return ret; |
126 | 177 | ||
127 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, | 178 | ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM, |
128 | &grch->mmio); | 179 | &grch->mmio); |
129 | if (ret) | 180 | if (ret) |
130 | return ret; | 181 | return ret; |
131 | 182 | ||
132 | 183 | ||
133 | nv_wo32(grch->mmio, i++ * 4, 0x00408004); | 184 | nv_wo32(grch->mmio, i++ * 4, 0x00408004); |
134 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); | 185 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); |
135 | nv_wo32(grch->mmio, i++ * 4, 0x00408008); | 186 | nv_wo32(grch->mmio, i++ * 4, 0x00408008); |
136 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | 187 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); |
137 | 188 | ||
138 | nv_wo32(grch->mmio, i++ * 4, 0x0040800c); | 189 | nv_wo32(grch->mmio, i++ * 4, 0x0040800c); |
139 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); | 190 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); |
140 | nv_wo32(grch->mmio, i++ * 4, 0x00408010); | 191 | nv_wo32(grch->mmio, i++ * 4, 0x00408010); |
141 | nv_wo32(grch->mmio, i++ * 4, 0x80000000); | 192 | nv_wo32(grch->mmio, i++ * 4, 0x80000000); |
142 | 193 | ||
143 | nv_wo32(grch->mmio, i++ * 4, 0x00418810); | 194 | nv_wo32(grch->mmio, i++ * 4, 0x00418810); |
144 | nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); | 195 | nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12); |
145 | nv_wo32(grch->mmio, i++ * 4, 0x00419848); | 196 | nv_wo32(grch->mmio, i++ * 4, 0x00419848); |
146 | nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); | 197 | nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12); |
147 | 198 | ||
148 | nv_wo32(grch->mmio, i++ * 4, 0x00419004); | 199 | nv_wo32(grch->mmio, i++ * 4, 0x00419004); |
149 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); | 200 | nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8); |
150 | nv_wo32(grch->mmio, i++ * 4, 0x00419008); | 201 | nv_wo32(grch->mmio, i++ * 4, 0x00419008); |
151 | nv_wo32(grch->mmio, i++ * 4, 0x00000000); | 202 | nv_wo32(grch->mmio, i++ * 4, 0x00000000); |
152 | 203 | ||
153 | nv_wo32(grch->mmio, i++ * 4, 0x00418808); | 204 | nv_wo32(grch->mmio, i++ * 4, 0x00418808); |
154 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); | 205 | nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8); |
155 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); | 206 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); |
156 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | 207 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); |
157 | 208 | ||
@@ -159,7 +210,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
159 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | 210 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); |
160 | nv_wo32(grch->mmio, i++ * 4, magic); | 211 | nv_wo32(grch->mmio, i++ * 4, magic); |
161 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 212 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
162 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { | 213 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { |
163 | u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); | 214 | u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); |
164 | nv_wo32(grch->mmio, i++ * 4, reg); | 215 | nv_wo32(grch->mmio, i++ * 4, reg); |
165 | nv_wo32(grch->mmio, i++ * 4, magic); | 216 | nv_wo32(grch->mmio, i++ * 4, magic); |
@@ -186,7 +237,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) | |||
186 | return -ENOMEM; | 237 | return -ENOMEM; |
187 | chan->engctx[NVOBJ_ENGINE_GR] = grch; | 238 | chan->engctx[NVOBJ_ENGINE_GR] = grch; |
188 | 239 | ||
189 | ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, | 240 | ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256, |
190 | NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, | 241 | NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, |
191 | &grch->grctx); | 242 | &grch->grctx); |
192 | if (ret) | 243 | if (ret) |
@@ -197,8 +248,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) | |||
197 | if (ret) | 248 | if (ret) |
198 | goto error; | 249 | goto error; |
199 | 250 | ||
200 | nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); | 251 | nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4); |
201 | nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); | 252 | nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst)); |
202 | pinstmem->flush(dev); | 253 | pinstmem->flush(dev); |
203 | 254 | ||
204 | if (!priv->grctx_vals) { | 255 | if (!priv->grctx_vals) { |
@@ -210,15 +261,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) | |||
210 | for (i = 0; i < priv->grctx_size; i += 4) | 261 | for (i = 0; i < priv->grctx_size; i += 4) |
211 | nv_wo32(grctx, i, priv->grctx_vals[i / 4]); | 262 | nv_wo32(grctx, i, priv->grctx_vals[i / 4]); |
212 | 263 | ||
213 | nv_wo32(grctx, 0xf4, 0); | 264 | if (!nouveau_ctxfw) { |
214 | nv_wo32(grctx, 0xf8, 0); | 265 | nv_wo32(grctx, 0x00, grch->mmio_nr); |
215 | nv_wo32(grctx, 0x10, grch->mmio_nr); | 266 | nv_wo32(grctx, 0x04, grch->mmio->linst >> 8); |
216 | nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); | 267 | } else { |
217 | nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); | 268 | nv_wo32(grctx, 0xf4, 0); |
218 | nv_wo32(grctx, 0x1c, 1); | 269 | nv_wo32(grctx, 0xf8, 0); |
219 | nv_wo32(grctx, 0x20, 0); | 270 | nv_wo32(grctx, 0x10, grch->mmio_nr); |
220 | nv_wo32(grctx, 0x28, 0); | 271 | nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst)); |
221 | nv_wo32(grctx, 0x2c, 0); | 272 | nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst)); |
273 | nv_wo32(grctx, 0x1c, 1); | ||
274 | nv_wo32(grctx, 0x20, 0); | ||
275 | nv_wo32(grctx, 0x28, 0); | ||
276 | nv_wo32(grctx, 0x2c, 0); | ||
277 | } | ||
222 | pinstmem->flush(dev); | 278 | pinstmem->flush(dev); |
223 | return 0; | 279 | return 0; |
224 | 280 | ||
@@ -248,7 +304,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine, | |||
248 | } | 304 | } |
249 | 305 | ||
250 | static int | 306 | static int |
251 | nvc0_graph_fini(struct drm_device *dev, int engine) | 307 | nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend) |
252 | { | 308 | { |
253 | return 0; | 309 | return 0; |
254 | } | 310 | } |
@@ -296,6 +352,7 @@ static void | |||
296 | nvc0_graph_init_gpc_0(struct drm_device *dev) | 352 | nvc0_graph_init_gpc_0(struct drm_device *dev) |
297 | { | 353 | { |
298 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | 354 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); |
355 | const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total); | ||
299 | u32 data[TP_MAX / 8]; | 356 | u32 data[TP_MAX / 8]; |
300 | u8 tpnr[GPC_MAX]; | 357 | u8 tpnr[GPC_MAX]; |
301 | int i, gpc, tpc; | 358 | int i, gpc, tpc; |
@@ -307,13 +364,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) | |||
307 | * 465: 3/4/4/0 4 7 | 364 | * 465: 3/4/4/0 4 7 |
308 | * 470: 3/3/4/4 5 5 | 365 | * 470: 3/3/4/4 5 5 |
309 | * 480: 3/4/4/4 6 6 | 366 | * 480: 3/4/4/4 6 6 |
310 | * | ||
311 | * magicgpc918 | ||
312 | * 450: 00200000 00000000001000000000000000000000 | ||
313 | * 460: 00124925 00000000000100100100100100100101 | ||
314 | * 465: 000ba2e9 00000000000010111010001011101001 | ||
315 | * 470: 00092493 00000000000010010010010010010011 | ||
316 | * 480: 00088889 00000000000010001000100010001001 | ||
317 | */ | 367 | */ |
318 | 368 | ||
319 | memset(data, 0x00, sizeof(data)); | 369 | memset(data, 0x00, sizeof(data)); |
@@ -336,10 +386,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) | |||
336 | nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | | 386 | nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | |
337 | priv->tp_nr[gpc]); | 387 | priv->tp_nr[gpc]); |
338 | nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); | 388 | nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); |
339 | nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); | 389 | nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918); |
340 | } | 390 | } |
341 | 391 | ||
342 | nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); | 392 | nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); |
343 | nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); | 393 | nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); |
344 | } | 394 | } |
345 | 395 | ||
@@ -419,8 +469,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, | |||
419 | static int | 469 | static int |
420 | nvc0_graph_init_ctxctl(struct drm_device *dev) | 470 | nvc0_graph_init_ctxctl(struct drm_device *dev) |
421 | { | 471 | { |
472 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
422 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); | 473 | struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); |
423 | u32 r000260; | 474 | u32 r000260; |
475 | int i; | ||
476 | |||
477 | if (!nouveau_ctxfw) { | ||
478 | /* load HUB microcode */ | ||
479 | r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); | ||
480 | nv_wr32(dev, 0x4091c0, 0x01000000); | ||
481 | for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++) | ||
482 | nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]); | ||
483 | |||
484 | nv_wr32(dev, 0x409180, 0x01000000); | ||
485 | for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) { | ||
486 | if ((i & 0x3f) == 0) | ||
487 | nv_wr32(dev, 0x409188, i >> 6); | ||
488 | nv_wr32(dev, 0x409184, nvc0_grhub_code[i]); | ||
489 | } | ||
490 | |||
491 | /* load GPC microcode */ | ||
492 | nv_wr32(dev, 0x41a1c0, 0x01000000); | ||
493 | for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++) | ||
494 | nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]); | ||
495 | |||
496 | nv_wr32(dev, 0x41a180, 0x01000000); | ||
497 | for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) { | ||
498 | if ((i & 0x3f) == 0) | ||
499 | nv_wr32(dev, 0x41a188, i >> 6); | ||
500 | nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]); | ||
501 | } | ||
502 | nv_wr32(dev, 0x000260, r000260); | ||
503 | |||
504 | /* start HUB ucode running, it'll init the GPCs */ | ||
505 | nv_wr32(dev, 0x409800, dev_priv->chipset); | ||
506 | nv_wr32(dev, 0x40910c, 0x00000000); | ||
507 | nv_wr32(dev, 0x409100, 0x00000002); | ||
508 | if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { | ||
509 | NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n"); | ||
510 | nvc0_graph_ctxctl_debug(dev); | ||
511 | return -EBUSY; | ||
512 | } | ||
513 | |||
514 | priv->grctx_size = nv_rd32(dev, 0x409804); | ||
515 | return 0; | ||
516 | } | ||
424 | 517 | ||
425 | /* load fuc microcode */ | 518 | /* load fuc microcode */ |
426 | r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); | 519 | r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); |
@@ -528,6 +621,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) | |||
528 | } | 621 | } |
529 | 622 | ||
530 | static void | 623 | static void |
624 | nvc0_graph_ctxctl_isr(struct drm_device *dev) | ||
625 | { | ||
626 | u32 ustat = nv_rd32(dev, 0x409c18); | ||
627 | |||
628 | if (ustat & 0x00000001) | ||
629 | NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n"); | ||
630 | if (ustat & 0x00080000) | ||
631 | NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n"); | ||
632 | if (ustat & ~0x00080001) | ||
633 | NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat); | ||
634 | |||
635 | nvc0_graph_ctxctl_debug(dev); | ||
636 | nv_wr32(dev, 0x409c20, ustat); | ||
637 | } | ||
638 | |||
639 | static void | ||
531 | nvc0_graph_isr(struct drm_device *dev) | 640 | nvc0_graph_isr(struct drm_device *dev) |
532 | { | 641 | { |
533 | u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; | 642 | u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; |
@@ -578,11 +687,7 @@ nvc0_graph_isr(struct drm_device *dev) | |||
578 | } | 687 | } |
579 | 688 | ||
580 | if (stat & 0x00080000) { | 689 | if (stat & 0x00080000) { |
581 | u32 ustat = nv_rd32(dev, 0x409c18); | 690 | nvc0_graph_ctxctl_isr(dev); |
582 | |||
583 | NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat); | ||
584 | |||
585 | nv_wr32(dev, 0x409c20, ustat); | ||
586 | nv_wr32(dev, 0x400100, 0x00080000); | 691 | nv_wr32(dev, 0x400100, 0x00080000); |
587 | stat &= ~0x00080000; | 692 | stat &= ~0x00080000; |
588 | } | 693 | } |
@@ -606,7 +711,7 @@ nvc0_runk140_isr(struct drm_device *dev) | |||
606 | u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); | 711 | u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); |
607 | u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); | 712 | u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); |
608 | 713 | ||
609 | NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); | 714 | NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); |
610 | units &= ~(1 << unit); | 715 | units &= ~(1 << unit); |
611 | } | 716 | } |
612 | } | 717 | } |
@@ -651,10 +756,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine) | |||
651 | { | 756 | { |
652 | struct nvc0_graph_priv *priv = nv_engine(dev, engine); | 757 | struct nvc0_graph_priv *priv = nv_engine(dev, engine); |
653 | 758 | ||
654 | nvc0_graph_destroy_fw(&priv->fuc409c); | 759 | if (nouveau_ctxfw) { |
655 | nvc0_graph_destroy_fw(&priv->fuc409d); | 760 | nvc0_graph_destroy_fw(&priv->fuc409c); |
656 | nvc0_graph_destroy_fw(&priv->fuc41ac); | 761 | nvc0_graph_destroy_fw(&priv->fuc409d); |
657 | nvc0_graph_destroy_fw(&priv->fuc41ad); | 762 | nvc0_graph_destroy_fw(&priv->fuc41ac); |
763 | nvc0_graph_destroy_fw(&priv->fuc41ad); | ||
764 | } | ||
658 | 765 | ||
659 | nouveau_irq_unregister(dev, 12); | 766 | nouveau_irq_unregister(dev, 12); |
660 | nouveau_irq_unregister(dev, 25); | 767 | nouveau_irq_unregister(dev, 25); |
@@ -675,13 +782,10 @@ nvc0_graph_create(struct drm_device *dev) | |||
675 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 782 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
676 | struct nvc0_graph_priv *priv; | 783 | struct nvc0_graph_priv *priv; |
677 | int ret, gpc, i; | 784 | int ret, gpc, i; |
785 | u32 fermi; | ||
678 | 786 | ||
679 | switch (dev_priv->chipset) { | 787 | fermi = nvc0_graph_class(dev); |
680 | case 0xc0: | 788 | if (!fermi) { |
681 | case 0xc3: | ||
682 | case 0xc4: | ||
683 | break; | ||
684 | default: | ||
685 | NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); | 789 | NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); |
686 | return 0; | 790 | return 0; |
687 | } | 791 | } |
@@ -701,15 +805,17 @@ nvc0_graph_create(struct drm_device *dev) | |||
701 | nouveau_irq_register(dev, 12, nvc0_graph_isr); | 805 | nouveau_irq_register(dev, 12, nvc0_graph_isr); |
702 | nouveau_irq_register(dev, 25, nvc0_runk140_isr); | 806 | nouveau_irq_register(dev, 25, nvc0_runk140_isr); |
703 | 807 | ||
704 | if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || | 808 | if (nouveau_ctxfw) { |
705 | nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || | 809 | NV_INFO(dev, "PGRAPH: using external firmware\n"); |
706 | nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || | 810 | if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || |
707 | nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { | 811 | nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || |
708 | ret = 0; | 812 | nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || |
709 | goto error; | 813 | nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { |
814 | ret = 0; | ||
815 | goto error; | ||
816 | } | ||
710 | } | 817 | } |
711 | 818 | ||
712 | |||
713 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); | 819 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); |
714 | if (ret) | 820 | if (ret) |
715 | goto error; | 821 | goto error; |
@@ -735,25 +841,28 @@ nvc0_graph_create(struct drm_device *dev) | |||
735 | case 0xc0: | 841 | case 0xc0: |
736 | if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ | 842 | if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ |
737 | priv->magic_not_rop_nr = 0x07; | 843 | priv->magic_not_rop_nr = 0x07; |
738 | /* filled values up to tp_total, the rest 0 */ | ||
739 | priv->magicgpc918 = 0x000ba2e9; | ||
740 | } else | 844 | } else |
741 | if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ | 845 | if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ |
742 | priv->magic_not_rop_nr = 0x05; | 846 | priv->magic_not_rop_nr = 0x05; |
743 | priv->magicgpc918 = 0x00092493; | ||
744 | } else | 847 | } else |
745 | if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ | 848 | if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ |
746 | priv->magic_not_rop_nr = 0x06; | 849 | priv->magic_not_rop_nr = 0x06; |
747 | priv->magicgpc918 = 0x00088889; | ||
748 | } | 850 | } |
749 | break; | 851 | break; |
750 | case 0xc3: /* 450, 4/0/0/0, 2 */ | 852 | case 0xc3: /* 450, 4/0/0/0, 2 */ |
751 | priv->magic_not_rop_nr = 0x03; | 853 | priv->magic_not_rop_nr = 0x03; |
752 | priv->magicgpc918 = 0x00200000; | ||
753 | break; | 854 | break; |
754 | case 0xc4: /* 460, 3/4/0/0, 4 */ | 855 | case 0xc4: /* 460, 3/4/0/0, 4 */ |
755 | priv->magic_not_rop_nr = 0x01; | 856 | priv->magic_not_rop_nr = 0x01; |
756 | priv->magicgpc918 = 0x00124925; | 857 | break; |
858 | case 0xc1: /* 2/0/0/0, 1 */ | ||
859 | priv->magic_not_rop_nr = 0x01; | ||
860 | break; | ||
861 | case 0xc8: /* 4/4/3/4, 5 */ | ||
862 | priv->magic_not_rop_nr = 0x06; | ||
863 | break; | ||
864 | case 0xce: /* 4/4/0/0, 4 */ | ||
865 | priv->magic_not_rop_nr = 0x03; | ||
757 | break; | 866 | break; |
758 | } | 867 | } |
759 | 868 | ||
@@ -763,13 +872,16 @@ nvc0_graph_create(struct drm_device *dev) | |||
763 | priv->tp_nr[3], priv->rop_nr); | 872 | priv->tp_nr[3], priv->rop_nr); |
764 | /* use 0xc3's values... */ | 873 | /* use 0xc3's values... */ |
765 | priv->magic_not_rop_nr = 0x03; | 874 | priv->magic_not_rop_nr = 0x03; |
766 | priv->magicgpc918 = 0x00200000; | ||
767 | } | 875 | } |
768 | 876 | ||
769 | NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ | 877 | NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ |
770 | NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ | 878 | NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ |
771 | NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); | 879 | NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); |
772 | NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ | 880 | NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ |
881 | if (fermi >= 0x9197) | ||
882 | NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */ | ||
883 | if (fermi >= 0x9297) | ||
884 | NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */ | ||
773 | NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ | 885 | NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ |
774 | return 0; | 886 | return 0; |
775 | 887 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc new file mode 100644 index 00000000000..2a4b6dc8f9d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc | |||
@@ -0,0 +1,400 @@ | |||
1 | /* fuc microcode util functions for nvc0 PGRAPH | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)') | ||
27 | define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))') | ||
28 | |||
29 | ifdef(`include_code', ` | ||
30 | // Error codes | ||
31 | define(`E_BAD_COMMAND', 0x01) | ||
32 | define(`E_CMD_OVERFLOW', 0x02) | ||
33 | |||
34 | // Util macros to help with debugging ucode hangs etc | ||
35 | define(`T_WAIT', 0) | ||
36 | define(`T_MMCTX', 1) | ||
37 | define(`T_STRWAIT', 2) | ||
38 | define(`T_STRINIT', 3) | ||
39 | define(`T_AUTO', 4) | ||
40 | define(`T_CHAN', 5) | ||
41 | define(`T_LOAD', 6) | ||
42 | define(`T_SAVE', 7) | ||
43 | define(`T_LCHAN', 8) | ||
44 | define(`T_LCTXH', 9) | ||
45 | |||
46 | define(`trace_set', ` | ||
47 | mov $r8 0x83c | ||
48 | shl b32 $r8 6 | ||
49 | clear b32 $r9 | ||
50 | bset $r9 $1 | ||
51 | iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] | ||
52 | ') | ||
53 | |||
54 | define(`trace_clr', ` | ||
55 | mov $r8 0x85c | ||
56 | shl b32 $r8 6 | ||
57 | clear b32 $r9 | ||
58 | bset $r9 $1 | ||
59 | iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] | ||
60 | ') | ||
61 | |||
62 | // queue_put - add request to queue | ||
63 | // | ||
64 | // In : $r13 queue pointer | ||
65 | // $r14 command | ||
66 | // $r15 data | ||
67 | // | ||
68 | queue_put: | ||
69 | // make sure we have space.. | ||
70 | ld b32 $r8 D[$r13 + 0x0] // GET | ||
71 | ld b32 $r9 D[$r13 + 0x4] // PUT | ||
72 | xor $r8 8 | ||
73 | cmpu b32 $r8 $r9 | ||
74 | bra ne queue_put_next | ||
75 | mov $r15 E_CMD_OVERFLOW | ||
76 | call error | ||
77 | ret | ||
78 | |||
79 | // store cmd/data on queue | ||
80 | queue_put_next: | ||
81 | and $r8 $r9 7 | ||
82 | shl b32 $r8 3 | ||
83 | add b32 $r8 $r13 | ||
84 | add b32 $r8 8 | ||
85 | st b32 D[$r8 + 0x0] $r14 | ||
86 | st b32 D[$r8 + 0x4] $r15 | ||
87 | |||
88 | // update PUT | ||
89 | add b32 $r9 1 | ||
90 | and $r9 0xf | ||
91 | st b32 D[$r13 + 0x4] $r9 | ||
92 | ret | ||
93 | |||
94 | // queue_get - fetch request from queue | ||
95 | // | ||
96 | // In : $r13 queue pointer | ||
97 | // | ||
98 | // Out: $p1 clear on success (data available) | ||
99 | // $r14 command | ||
100 | // $r15 data | ||
101 | // | ||
102 | queue_get: | ||
103 | bset $flags $p1 | ||
104 | ld b32 $r8 D[$r13 + 0x0] // GET | ||
105 | ld b32 $r9 D[$r13 + 0x4] // PUT | ||
106 | cmpu b32 $r8 $r9 | ||
107 | bra e queue_get_done | ||
108 | // fetch first cmd/data pair | ||
109 | and $r9 $r8 7 | ||
110 | shl b32 $r9 3 | ||
111 | add b32 $r9 $r13 | ||
112 | add b32 $r9 8 | ||
113 | ld b32 $r14 D[$r9 + 0x0] | ||
114 | ld b32 $r15 D[$r9 + 0x4] | ||
115 | |||
116 | // update GET | ||
117 | add b32 $r8 1 | ||
118 | and $r8 0xf | ||
119 | st b32 D[$r13 + 0x0] $r8 | ||
120 | bclr $flags $p1 | ||
121 | queue_get_done: | ||
122 | ret | ||
123 | |||
124 | // nv_rd32 - read 32-bit value from nv register | ||
125 | // | ||
126 | // In : $r14 register | ||
127 | // Out: $r15 value | ||
128 | // | ||
129 | nv_rd32: | ||
130 | mov $r11 0x728 | ||
131 | shl b32 $r11 6 | ||
132 | mov b32 $r12 $r14 | ||
133 | bset $r12 31 // MMIO_CTRL_PENDING | ||
134 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | ||
135 | nv_rd32_wait: | ||
136 | iord $r12 I[$r11 + 0x000] | ||
137 | xbit $r12 $r12 31 | ||
138 | bra ne nv_rd32_wait | ||
139 | mov $r10 6 // DONE_MMIO_RD | ||
140 | call wait_doneo | ||
141 | iord $r15 I[$r11 + 0x100] // MMIO_RDVAL | ||
142 | ret | ||
143 | |||
144 | // nv_wr32 - write 32-bit value to nv register | ||
145 | // | ||
146 | // In : $r14 register | ||
147 | // $r15 value | ||
148 | // | ||
149 | nv_wr32: | ||
150 | mov $r11 0x728 | ||
151 | shl b32 $r11 6 | ||
152 | iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL | ||
153 | mov b32 $r12 $r14 | ||
154 | bset $r12 31 // MMIO_CTRL_PENDING | ||
155 | bset $r12 30 // MMIO_CTRL_WRITE | ||
156 | iowr I[$r11 + 0x000] $r12 // MMIO_CTRL | ||
157 | nv_wr32_wait: | ||
158 | iord $r12 I[$r11 + 0x000] | ||
159 | xbit $r12 $r12 31 | ||
160 | bra ne nv_wr32_wait | ||
161 | ret | ||
162 | |||
163 | // (re)set watchdog timer | ||
164 | // | ||
165 | // In : $r15 timeout | ||
166 | // | ||
167 | watchdog_reset: | ||
168 | mov $r8 0x430 | ||
169 | shl b32 $r8 6 | ||
170 | bset $r15 31 | ||
171 | iowr I[$r8 + 0x000] $r15 | ||
172 | ret | ||
173 | |||
174 | // clear watchdog timer | ||
175 | watchdog_clear: | ||
176 | mov $r8 0x430 | ||
177 | shl b32 $r8 6 | ||
178 | iowr I[$r8 + 0x000] $r0 | ||
179 | ret | ||
180 | |||
181 | // wait_done{z,o} - wait on FUC_DONE bit to become clear/set | ||
182 | // | ||
183 | // In : $r10 bit to wait on | ||
184 | // | ||
185 | define(`wait_done', ` | ||
186 | $1: | ||
187 | trace_set(T_WAIT); | ||
188 | mov $r8 0x818 | ||
189 | shl b32 $r8 6 | ||
190 | iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit | ||
191 | wait_done_$1: | ||
192 | mov $r8 0x400 | ||
193 | shl b32 $r8 6 | ||
194 | iord $r8 I[$r8 + 0x000] // DONE | ||
195 | xbit $r8 $r8 $r10 | ||
196 | bra $2 wait_done_$1 | ||
197 | trace_clr(T_WAIT) | ||
198 | ret | ||
199 | ') | ||
200 | wait_done(wait_donez, ne) | ||
201 | wait_done(wait_doneo, e) | ||
202 | |||
203 | // mmctx_size - determine size of a mmio list transfer | ||
204 | // | ||
205 | // In : $r14 mmio list head | ||
206 | // $r15 mmio list tail | ||
207 | // Out: $r15 transfer size (in bytes) | ||
208 | // | ||
209 | mmctx_size: | ||
210 | clear b32 $r9 | ||
211 | nv_mmctx_size_loop: | ||
212 | ld b32 $r8 D[$r14] | ||
213 | shr b32 $r8 26 | ||
214 | add b32 $r8 1 | ||
215 | shl b32 $r8 2 | ||
216 | add b32 $r9 $r8 | ||
217 | add b32 $r14 4 | ||
218 | cmpu b32 $r14 $r15 | ||
219 | bra ne nv_mmctx_size_loop | ||
220 | mov b32 $r15 $r9 | ||
221 | ret | ||
222 | |||
223 | // mmctx_xfer - execute a list of mmio transfers | ||
224 | // | ||
225 | // In : $r10 flags | ||
226 | // bit 0: direction (0 = save, 1 = load) | ||
227 | // bit 1: set if first transfer | ||
228 | // bit 2: set if last transfer | ||
229 | // $r11 base | ||
230 | // $r12 mmio list head | ||
231 | // $r13 mmio list tail | ||
232 | // $r14 multi_stride | ||
233 | // $r15 multi_mask | ||
234 | // | ||
235 | mmctx_xfer: | ||
236 | trace_set(T_MMCTX) | ||
237 | mov $r8 0x710 | ||
238 | shl b32 $r8 6 | ||
239 | clear b32 $r9 | ||
240 | or $r11 $r11 | ||
241 | bra e mmctx_base_disabled | ||
242 | iowr I[$r8 + 0x000] $r11 // MMCTX_BASE | ||
243 | bset $r9 0 // BASE_EN | ||
244 | mmctx_base_disabled: | ||
245 | or $r14 $r14 | ||
246 | bra e mmctx_multi_disabled | ||
247 | iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE | ||
248 | iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK | ||
249 | bset $r9 1 // MULTI_EN | ||
250 | mmctx_multi_disabled: | ||
251 | add b32 $r8 0x100 | ||
252 | |||
253 | xbit $r11 $r10 0 | ||
254 | shl b32 $r11 16 // DIR | ||
255 | bset $r11 12 // QLIMIT = 0x10 | ||
256 | xbit $r14 $r10 1 | ||
257 | shl b32 $r14 17 | ||
258 | or $r11 $r14 // START_TRIGGER | ||
259 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | ||
260 | |||
261 | // loop over the mmio list, and send requests to the hw | ||
262 | mmctx_exec_loop: | ||
263 | // wait for space in mmctx queue | ||
264 | mmctx_wait_free: | ||
265 | iord $r14 I[$r8 + 0x000] // MMCTX_CTRL | ||
266 | and $r14 0x1f | ||
267 | bra e mmctx_wait_free | ||
268 | |||
269 | // queue up an entry | ||
270 | ld b32 $r14 D[$r12] | ||
271 | or $r14 $r9 | ||
272 | iowr I[$r8 + 0x300] $r14 | ||
273 | add b32 $r12 4 | ||
274 | cmpu b32 $r12 $r13 | ||
275 | bra ne mmctx_exec_loop | ||
276 | |||
277 | xbit $r11 $r10 2 | ||
278 | bra ne mmctx_stop | ||
279 | // wait for queue to empty | ||
280 | mmctx_fini_wait: | ||
281 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | ||
282 | and $r11 0x1f | ||
283 | cmpu b32 $r11 0x10 | ||
284 | bra ne mmctx_fini_wait | ||
285 | mov $r10 2 // DONE_MMCTX | ||
286 | call wait_donez | ||
287 | bra mmctx_done | ||
288 | mmctx_stop: | ||
289 | xbit $r11 $r10 0 | ||
290 | shl b32 $r11 16 // DIR | ||
291 | bset $r11 12 // QLIMIT = 0x10 | ||
292 | bset $r11 18 // STOP_TRIGGER | ||
293 | iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL | ||
294 | mmctx_stop_wait: | ||
295 | // wait for STOP_TRIGGER to clear | ||
296 | iord $r11 I[$r8 + 0x000] // MMCTX_CTRL | ||
297 | xbit $r11 $r11 18 | ||
298 | bra ne mmctx_stop_wait | ||
299 | mmctx_done: | ||
300 | trace_clr(T_MMCTX) | ||
301 | ret | ||
302 | |||
303 | // Wait for DONE_STRAND | ||
304 | // | ||
305 | strand_wait: | ||
306 | push $r10 | ||
307 | mov $r10 2 | ||
308 | call wait_donez | ||
309 | pop $r10 | ||
310 | ret | ||
311 | |||
312 | // unknown - call before issuing strand commands | ||
313 | // | ||
314 | strand_pre: | ||
315 | mov $r8 0x4afc | ||
316 | sethi $r8 0x20000 | ||
317 | mov $r9 0xc | ||
318 | iowr I[$r8] $r9 | ||
319 | call strand_wait | ||
320 | ret | ||
321 | |||
322 | // unknown - call after issuing strand commands | ||
323 | // | ||
324 | strand_post: | ||
325 | mov $r8 0x4afc | ||
326 | sethi $r8 0x20000 | ||
327 | mov $r9 0xd | ||
328 | iowr I[$r8] $r9 | ||
329 | call strand_wait | ||
330 | ret | ||
331 | |||
332 | // Selects strand set?! | ||
333 | // | ||
334 | // In: $r14 id | ||
335 | // | ||
336 | strand_set: | ||
337 | mov $r10 0x4ffc | ||
338 | sethi $r10 0x20000 | ||
339 | sub b32 $r11 $r10 0x500 | ||
340 | mov $r12 0xf | ||
341 | iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf | ||
342 | mov $r12 0xb | ||
343 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb | ||
344 | call strand_wait | ||
345 | iowr I[$r10 + 0x000] $r14 // 0x93c = <id> | ||
346 | mov $r12 0xa | ||
347 | iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa | ||
348 | call strand_wait | ||
349 | ret | ||
350 | |||
351 | // Initialise strand context data | ||
352 | // | ||
353 | // In : $r15 context base | ||
354 | // Out: $r15 context size (in bytes) | ||
355 | // | ||
356 | // Strandset(?) 3 hardcoded currently | ||
357 | // | ||
358 | strand_ctx_init: | ||
359 | trace_set(T_STRINIT) | ||
360 | call strand_pre | ||
361 | mov $r14 3 | ||
362 | call strand_set | ||
363 | mov $r10 0x46fc | ||
364 | sethi $r10 0x20000 | ||
365 | add b32 $r11 $r10 0x400 | ||
366 | iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 | ||
367 | mov $r12 1 | ||
368 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE | ||
369 | call strand_wait | ||
370 | sub b32 $r12 $r0 1 | ||
371 | iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff | ||
372 | mov $r12 2 | ||
373 | iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT | ||
374 | call strand_wait | ||
375 | call strand_post | ||
376 | |||
377 | // read the size of each strand, poke the context offset of | ||
378 | // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry | ||
379 | // about it later then. | ||
380 | mov $r8 0x880 | ||
381 | shl b32 $r8 6 | ||
382 | iord $r9 I[$r8 + 0x000] // STRANDS | ||
383 | add b32 $r8 0x2200 | ||
384 | shr b32 $r14 $r15 8 | ||
385 | ctx_init_strand_loop: | ||
386 | iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE | ||
387 | iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE | ||
388 | iord $r10 I[$r8 + 0x200] // STRAND_SIZE | ||
389 | shr b32 $r10 6 | ||
390 | add b32 $r10 1 | ||
391 | add b32 $r14 $r10 | ||
392 | add b32 $r8 4 | ||
393 | sub b32 $r9 1 | ||
394 | bra ne ctx_init_strand_loop | ||
395 | |||
396 | shl b32 $r14 8 | ||
397 | sub b32 $r15 $r14 $r15 | ||
398 | trace_clr(T_STRINIT) | ||
399 | ret | ||
400 | ') | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index f5d184e0689..55689e99728 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h | |||
@@ -57,8 +57,7 @@ struct nvc0_graph_priv { | |||
57 | struct nouveau_gpuobj *unk4188b4; | 57 | struct nouveau_gpuobj *unk4188b4; |
58 | struct nouveau_gpuobj *unk4188b8; | 58 | struct nouveau_gpuobj *unk4188b8; |
59 | 59 | ||
60 | u8 magic_not_rop_nr; | 60 | u8 magic_not_rop_nr; |
61 | u32 magicgpc918; | ||
62 | }; | 61 | }; |
63 | 62 | ||
64 | struct nvc0_graph_chan { | 63 | struct nvc0_graph_chan { |
@@ -72,4 +71,25 @@ struct nvc0_graph_chan { | |||
72 | 71 | ||
73 | int nvc0_grctx_generate(struct nouveau_channel *); | 72 | int nvc0_grctx_generate(struct nouveau_channel *); |
74 | 73 | ||
74 | /* nvc0_graph.c uses this also to determine supported chipsets */ | ||
75 | static inline u32 | ||
76 | nvc0_graph_class(struct drm_device *dev) | ||
77 | { | ||
78 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
79 | |||
80 | switch (dev_priv->chipset) { | ||
81 | case 0xc0: | ||
82 | case 0xc3: | ||
83 | case 0xc4: | ||
84 | case 0xce: /* guess, mmio trace shows only 0x9097 state */ | ||
85 | return 0x9097; | ||
86 | case 0xc1: | ||
87 | return 0x9197; | ||
88 | case 0xc8: | ||
89 | return 0x9297; | ||
90 | default: | ||
91 | return 0; | ||
92 | } | ||
93 | } | ||
94 | |||
75 | #endif | 95 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 6df06611413..31018eaf527 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) | |||
45 | static void | 45 | static void |
46 | nvc0_grctx_generate_9097(struct drm_device *dev) | 46 | nvc0_grctx_generate_9097(struct drm_device *dev) |
47 | { | 47 | { |
48 | u32 fermi = nvc0_graph_class(dev); | ||
49 | u32 mthd; | ||
50 | |||
48 | nv_mthd(dev, 0x9097, 0x0800, 0x00000000); | 51 | nv_mthd(dev, 0x9097, 0x0800, 0x00000000); |
49 | nv_mthd(dev, 0x9097, 0x0840, 0x00000000); | 52 | nv_mthd(dev, 0x9097, 0x0840, 0x00000000); |
50 | nv_mthd(dev, 0x9097, 0x0880, 0x00000000); | 53 | nv_mthd(dev, 0x9097, 0x0880, 0x00000000); |
@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev) | |||
824 | nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); | 827 | nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); |
825 | nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); | 828 | nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); |
826 | nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); | 829 | nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); |
827 | nv_mthd(dev, 0x9097, 0x3400, 0x00000000); | 830 | if (fermi == 0x9097) { |
828 | nv_mthd(dev, 0x9097, 0x3404, 0x00000000); | 831 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) |
829 | nv_mthd(dev, 0x9097, 0x3408, 0x00000000); | 832 | nv_mthd(dev, 0x9097, mthd, 0x00000000); |
830 | nv_mthd(dev, 0x9097, 0x340c, 0x00000000); | 833 | } |
831 | nv_mthd(dev, 0x9097, 0x3410, 0x00000000); | ||
832 | nv_mthd(dev, 0x9097, 0x3414, 0x00000000); | ||
833 | nv_mthd(dev, 0x9097, 0x3418, 0x00000000); | ||
834 | nv_mthd(dev, 0x9097, 0x341c, 0x00000000); | ||
835 | nv_mthd(dev, 0x9097, 0x3420, 0x00000000); | ||
836 | nv_mthd(dev, 0x9097, 0x3424, 0x00000000); | ||
837 | nv_mthd(dev, 0x9097, 0x3428, 0x00000000); | ||
838 | nv_mthd(dev, 0x9097, 0x342c, 0x00000000); | ||
839 | nv_mthd(dev, 0x9097, 0x3430, 0x00000000); | ||
840 | nv_mthd(dev, 0x9097, 0x3434, 0x00000000); | ||
841 | nv_mthd(dev, 0x9097, 0x3438, 0x00000000); | ||
842 | nv_mthd(dev, 0x9097, 0x343c, 0x00000000); | ||
843 | nv_mthd(dev, 0x9097, 0x3440, 0x00000000); | ||
844 | nv_mthd(dev, 0x9097, 0x3444, 0x00000000); | ||
845 | nv_mthd(dev, 0x9097, 0x3448, 0x00000000); | ||
846 | nv_mthd(dev, 0x9097, 0x344c, 0x00000000); | ||
847 | nv_mthd(dev, 0x9097, 0x3450, 0x00000000); | ||
848 | nv_mthd(dev, 0x9097, 0x3454, 0x00000000); | ||
849 | nv_mthd(dev, 0x9097, 0x3458, 0x00000000); | ||
850 | nv_mthd(dev, 0x9097, 0x345c, 0x00000000); | ||
851 | nv_mthd(dev, 0x9097, 0x3460, 0x00000000); | ||
852 | nv_mthd(dev, 0x9097, 0x3464, 0x00000000); | ||
853 | nv_mthd(dev, 0x9097, 0x3468, 0x00000000); | ||
854 | nv_mthd(dev, 0x9097, 0x346c, 0x00000000); | ||
855 | nv_mthd(dev, 0x9097, 0x3470, 0x00000000); | ||
856 | nv_mthd(dev, 0x9097, 0x3474, 0x00000000); | ||
857 | nv_mthd(dev, 0x9097, 0x3478, 0x00000000); | ||
858 | nv_mthd(dev, 0x9097, 0x347c, 0x00000000); | ||
859 | nv_mthd(dev, 0x9097, 0x3480, 0x00000000); | ||
860 | nv_mthd(dev, 0x9097, 0x3484, 0x00000000); | ||
861 | nv_mthd(dev, 0x9097, 0x3488, 0x00000000); | ||
862 | nv_mthd(dev, 0x9097, 0x348c, 0x00000000); | ||
863 | nv_mthd(dev, 0x9097, 0x3490, 0x00000000); | ||
864 | nv_mthd(dev, 0x9097, 0x3494, 0x00000000); | ||
865 | nv_mthd(dev, 0x9097, 0x3498, 0x00000000); | ||
866 | nv_mthd(dev, 0x9097, 0x349c, 0x00000000); | ||
867 | nv_mthd(dev, 0x9097, 0x34a0, 0x00000000); | ||
868 | nv_mthd(dev, 0x9097, 0x34a4, 0x00000000); | ||
869 | nv_mthd(dev, 0x9097, 0x34a8, 0x00000000); | ||
870 | nv_mthd(dev, 0x9097, 0x34ac, 0x00000000); | ||
871 | nv_mthd(dev, 0x9097, 0x34b0, 0x00000000); | ||
872 | nv_mthd(dev, 0x9097, 0x34b4, 0x00000000); | ||
873 | nv_mthd(dev, 0x9097, 0x34b8, 0x00000000); | ||
874 | nv_mthd(dev, 0x9097, 0x34bc, 0x00000000); | ||
875 | nv_mthd(dev, 0x9097, 0x34c0, 0x00000000); | ||
876 | nv_mthd(dev, 0x9097, 0x34c4, 0x00000000); | ||
877 | nv_mthd(dev, 0x9097, 0x34c8, 0x00000000); | ||
878 | nv_mthd(dev, 0x9097, 0x34cc, 0x00000000); | ||
879 | nv_mthd(dev, 0x9097, 0x34d0, 0x00000000); | ||
880 | nv_mthd(dev, 0x9097, 0x34d4, 0x00000000); | ||
881 | nv_mthd(dev, 0x9097, 0x34d8, 0x00000000); | ||
882 | nv_mthd(dev, 0x9097, 0x34dc, 0x00000000); | ||
883 | nv_mthd(dev, 0x9097, 0x34e0, 0x00000000); | ||
884 | nv_mthd(dev, 0x9097, 0x34e4, 0x00000000); | ||
885 | nv_mthd(dev, 0x9097, 0x34e8, 0x00000000); | ||
886 | nv_mthd(dev, 0x9097, 0x34ec, 0x00000000); | ||
887 | nv_mthd(dev, 0x9097, 0x34f0, 0x00000000); | ||
888 | nv_mthd(dev, 0x9097, 0x34f4, 0x00000000); | ||
889 | nv_mthd(dev, 0x9097, 0x34f8, 0x00000000); | ||
890 | nv_mthd(dev, 0x9097, 0x34fc, 0x00000000); | ||
891 | nv_mthd(dev, 0x9097, 0x3500, 0x00000000); | ||
892 | nv_mthd(dev, 0x9097, 0x3504, 0x00000000); | ||
893 | nv_mthd(dev, 0x9097, 0x3508, 0x00000000); | ||
894 | nv_mthd(dev, 0x9097, 0x350c, 0x00000000); | ||
895 | nv_mthd(dev, 0x9097, 0x3510, 0x00000000); | ||
896 | nv_mthd(dev, 0x9097, 0x3514, 0x00000000); | ||
897 | nv_mthd(dev, 0x9097, 0x3518, 0x00000000); | ||
898 | nv_mthd(dev, 0x9097, 0x351c, 0x00000000); | ||
899 | nv_mthd(dev, 0x9097, 0x3520, 0x00000000); | ||
900 | nv_mthd(dev, 0x9097, 0x3524, 0x00000000); | ||
901 | nv_mthd(dev, 0x9097, 0x3528, 0x00000000); | ||
902 | nv_mthd(dev, 0x9097, 0x352c, 0x00000000); | ||
903 | nv_mthd(dev, 0x9097, 0x3530, 0x00000000); | ||
904 | nv_mthd(dev, 0x9097, 0x3534, 0x00000000); | ||
905 | nv_mthd(dev, 0x9097, 0x3538, 0x00000000); | ||
906 | nv_mthd(dev, 0x9097, 0x353c, 0x00000000); | ||
907 | nv_mthd(dev, 0x9097, 0x3540, 0x00000000); | ||
908 | nv_mthd(dev, 0x9097, 0x3544, 0x00000000); | ||
909 | nv_mthd(dev, 0x9097, 0x3548, 0x00000000); | ||
910 | nv_mthd(dev, 0x9097, 0x354c, 0x00000000); | ||
911 | nv_mthd(dev, 0x9097, 0x3550, 0x00000000); | ||
912 | nv_mthd(dev, 0x9097, 0x3554, 0x00000000); | ||
913 | nv_mthd(dev, 0x9097, 0x3558, 0x00000000); | ||
914 | nv_mthd(dev, 0x9097, 0x355c, 0x00000000); | ||
915 | nv_mthd(dev, 0x9097, 0x3560, 0x00000000); | ||
916 | nv_mthd(dev, 0x9097, 0x3564, 0x00000000); | ||
917 | nv_mthd(dev, 0x9097, 0x3568, 0x00000000); | ||
918 | nv_mthd(dev, 0x9097, 0x356c, 0x00000000); | ||
919 | nv_mthd(dev, 0x9097, 0x3570, 0x00000000); | ||
920 | nv_mthd(dev, 0x9097, 0x3574, 0x00000000); | ||
921 | nv_mthd(dev, 0x9097, 0x3578, 0x00000000); | ||
922 | nv_mthd(dev, 0x9097, 0x357c, 0x00000000); | ||
923 | nv_mthd(dev, 0x9097, 0x3580, 0x00000000); | ||
924 | nv_mthd(dev, 0x9097, 0x3584, 0x00000000); | ||
925 | nv_mthd(dev, 0x9097, 0x3588, 0x00000000); | ||
926 | nv_mthd(dev, 0x9097, 0x358c, 0x00000000); | ||
927 | nv_mthd(dev, 0x9097, 0x3590, 0x00000000); | ||
928 | nv_mthd(dev, 0x9097, 0x3594, 0x00000000); | ||
929 | nv_mthd(dev, 0x9097, 0x3598, 0x00000000); | ||
930 | nv_mthd(dev, 0x9097, 0x359c, 0x00000000); | ||
931 | nv_mthd(dev, 0x9097, 0x35a0, 0x00000000); | ||
932 | nv_mthd(dev, 0x9097, 0x35a4, 0x00000000); | ||
933 | nv_mthd(dev, 0x9097, 0x35a8, 0x00000000); | ||
934 | nv_mthd(dev, 0x9097, 0x35ac, 0x00000000); | ||
935 | nv_mthd(dev, 0x9097, 0x35b0, 0x00000000); | ||
936 | nv_mthd(dev, 0x9097, 0x35b4, 0x00000000); | ||
937 | nv_mthd(dev, 0x9097, 0x35b8, 0x00000000); | ||
938 | nv_mthd(dev, 0x9097, 0x35bc, 0x00000000); | ||
939 | nv_mthd(dev, 0x9097, 0x35c0, 0x00000000); | ||
940 | nv_mthd(dev, 0x9097, 0x35c4, 0x00000000); | ||
941 | nv_mthd(dev, 0x9097, 0x35c8, 0x00000000); | ||
942 | nv_mthd(dev, 0x9097, 0x35cc, 0x00000000); | ||
943 | nv_mthd(dev, 0x9097, 0x35d0, 0x00000000); | ||
944 | nv_mthd(dev, 0x9097, 0x35d4, 0x00000000); | ||
945 | nv_mthd(dev, 0x9097, 0x35d8, 0x00000000); | ||
946 | nv_mthd(dev, 0x9097, 0x35dc, 0x00000000); | ||
947 | nv_mthd(dev, 0x9097, 0x35e0, 0x00000000); | ||
948 | nv_mthd(dev, 0x9097, 0x35e4, 0x00000000); | ||
949 | nv_mthd(dev, 0x9097, 0x35e8, 0x00000000); | ||
950 | nv_mthd(dev, 0x9097, 0x35ec, 0x00000000); | ||
951 | nv_mthd(dev, 0x9097, 0x35f0, 0x00000000); | ||
952 | nv_mthd(dev, 0x9097, 0x35f4, 0x00000000); | ||
953 | nv_mthd(dev, 0x9097, 0x35f8, 0x00000000); | ||
954 | nv_mthd(dev, 0x9097, 0x35fc, 0x00000000); | ||
955 | nv_mthd(dev, 0x9097, 0x030c, 0x00000001); | 834 | nv_mthd(dev, 0x9097, 0x030c, 0x00000001); |
956 | nv_mthd(dev, 0x9097, 0x1944, 0x00000000); | 835 | nv_mthd(dev, 0x9097, 0x1944, 0x00000000); |
957 | nv_mthd(dev, 0x9097, 0x1514, 0x00000000); | 836 | nv_mthd(dev, 0x9097, 0x1514, 0x00000000); |
@@ -1321,6 +1200,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev) | |||
1321 | } | 1200 | } |
1322 | 1201 | ||
1323 | static void | 1202 | static void |
1203 | nvc0_grctx_generate_9197(struct drm_device *dev) | ||
1204 | { | ||
1205 | u32 fermi = nvc0_graph_class(dev); | ||
1206 | u32 mthd; | ||
1207 | |||
1208 | if (fermi == 0x9197) { | ||
1209 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) | ||
1210 | nv_mthd(dev, 0x9197, mthd, 0x00000000); | ||
1211 | } | ||
1212 | nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001); | ||
1213 | } | ||
1214 | |||
1215 | static void | ||
1216 | nvc0_grctx_generate_9297(struct drm_device *dev) | ||
1217 | { | ||
1218 | u32 fermi = nvc0_graph_class(dev); | ||
1219 | u32 mthd; | ||
1220 | |||
1221 | if (fermi == 0x9297) { | ||
1222 | for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) | ||
1223 | nv_mthd(dev, 0x9297, mthd, 0x00000000); | ||
1224 | } | ||
1225 | nv_mthd(dev, 0x9297, 0x036c, 0x00000000); | ||
1226 | nv_mthd(dev, 0x9297, 0x0370, 0x00000000); | ||
1227 | nv_mthd(dev, 0x9297, 0x07a4, 0x00000000); | ||
1228 | nv_mthd(dev, 0x9297, 0x07a8, 0x00000000); | ||
1229 | nv_mthd(dev, 0x9297, 0x0374, 0x00000000); | ||
1230 | nv_mthd(dev, 0x9297, 0x0378, 0x00000020); | ||
1231 | } | ||
1232 | |||
1233 | static void | ||
1324 | nvc0_grctx_generate_902d(struct drm_device *dev) | 1234 | nvc0_grctx_generate_902d(struct drm_device *dev) |
1325 | { | 1235 | { |
1326 | nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); | 1236 | nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); |
@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev) | |||
1559 | static void | 1469 | static void |
1560 | nvc0_grctx_generate_shaders(struct drm_device *dev) | 1470 | nvc0_grctx_generate_shaders(struct drm_device *dev) |
1561 | { | 1471 | { |
1562 | nv_wr32(dev, 0x405800, 0x078000bf); | 1472 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1563 | nv_wr32(dev, 0x405830, 0x02180000); | 1473 | |
1474 | if (dev_priv->chipset != 0xc1) { | ||
1475 | nv_wr32(dev, 0x405800, 0x078000bf); | ||
1476 | nv_wr32(dev, 0x405830, 0x02180000); | ||
1477 | } else { | ||
1478 | nv_wr32(dev, 0x405800, 0x0f8000bf); | ||
1479 | nv_wr32(dev, 0x405830, 0x02180218); | ||
1480 | } | ||
1564 | nv_wr32(dev, 0x405834, 0x00000000); | 1481 | nv_wr32(dev, 0x405834, 0x00000000); |
1565 | nv_wr32(dev, 0x405838, 0x00000000); | 1482 | nv_wr32(dev, 0x405838, 0x00000000); |
1566 | nv_wr32(dev, 0x405854, 0x00000000); | 1483 | nv_wr32(dev, 0x405854, 0x00000000); |
@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev) | |||
1586 | static void | 1503 | static void |
1587 | nvc0_grctx_generate_unk64xx(struct drm_device *dev) | 1504 | nvc0_grctx_generate_unk64xx(struct drm_device *dev) |
1588 | { | 1505 | { |
1506 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1507 | |||
1589 | nv_wr32(dev, 0x4064a8, 0x00000000); | 1508 | nv_wr32(dev, 0x4064a8, 0x00000000); |
1590 | nv_wr32(dev, 0x4064ac, 0x00003fff); | 1509 | nv_wr32(dev, 0x4064ac, 0x00003fff); |
1591 | nv_wr32(dev, 0x4064b4, 0x00000000); | 1510 | nv_wr32(dev, 0x4064b4, 0x00000000); |
1592 | nv_wr32(dev, 0x4064b8, 0x00000000); | 1511 | nv_wr32(dev, 0x4064b8, 0x00000000); |
1512 | if (dev_priv->chipset == 0xc1) { | ||
1513 | nv_wr32(dev, 0x4064c0, 0x80140078); | ||
1514 | nv_wr32(dev, 0x4064c4, 0x0086ffff); | ||
1515 | } | ||
1593 | } | 1516 | } |
1594 | 1517 | ||
1595 | static void | 1518 | static void |
@@ -1622,21 +1545,14 @@ static void | |||
1622 | nvc0_grctx_generate_rop(struct drm_device *dev) | 1545 | nvc0_grctx_generate_rop(struct drm_device *dev) |
1623 | { | 1546 | { |
1624 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1547 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1548 | int chipset = dev_priv->chipset; | ||
1625 | 1549 | ||
1626 | /* ROPC_BROADCAST */ | 1550 | /* ROPC_BROADCAST */ |
1627 | nv_wr32(dev, 0x408800, 0x02802a3c); | 1551 | nv_wr32(dev, 0x408800, 0x02802a3c); |
1628 | nv_wr32(dev, 0x408804, 0x00000040); | 1552 | nv_wr32(dev, 0x408804, 0x00000040); |
1629 | nv_wr32(dev, 0x408808, 0x0003e00d); | 1553 | nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005); |
1630 | switch (dev_priv->chipset) { | 1554 | nv_wr32(dev, 0x408900, 0x3080b801); |
1631 | case 0xc0: | 1555 | nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001); |
1632 | nv_wr32(dev, 0x408900, 0x0080b801); | ||
1633 | break; | ||
1634 | case 0xc3: | ||
1635 | case 0xc4: | ||
1636 | nv_wr32(dev, 0x408900, 0x3080b801); | ||
1637 | break; | ||
1638 | } | ||
1639 | nv_wr32(dev, 0x408904, 0x02000001); | ||
1640 | nv_wr32(dev, 0x408908, 0x00c80929); | 1556 | nv_wr32(dev, 0x408908, 0x00c80929); |
1641 | nv_wr32(dev, 0x40890c, 0x00000000); | 1557 | nv_wr32(dev, 0x40890c, 0x00000000); |
1642 | nv_wr32(dev, 0x408980, 0x0000011d); | 1558 | nv_wr32(dev, 0x408980, 0x0000011d); |
@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev) | |||
1645 | static void | 1561 | static void |
1646 | nvc0_grctx_generate_gpc(struct drm_device *dev) | 1562 | nvc0_grctx_generate_gpc(struct drm_device *dev) |
1647 | { | 1563 | { |
1564 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1565 | int chipset = dev_priv->chipset; | ||
1648 | int i; | 1566 | int i; |
1649 | 1567 | ||
1650 | /* GPC_BROADCAST */ | 1568 | /* GPC_BROADCAST */ |
@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) | |||
1676 | nv_wr32(dev, 0x41880c, 0x00000000); | 1594 | nv_wr32(dev, 0x41880c, 0x00000000); |
1677 | nv_wr32(dev, 0x418810, 0x00000000); | 1595 | nv_wr32(dev, 0x418810, 0x00000000); |
1678 | nv_wr32(dev, 0x418828, 0x00008442); | 1596 | nv_wr32(dev, 0x418828, 0x00008442); |
1679 | nv_wr32(dev, 0x418830, 0x00000001); | 1597 | nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001); |
1680 | nv_wr32(dev, 0x4188d8, 0x00000008); | 1598 | nv_wr32(dev, 0x4188d8, 0x00000008); |
1681 | nv_wr32(dev, 0x4188e0, 0x01000000); | 1599 | nv_wr32(dev, 0x4188e0, 0x01000000); |
1682 | nv_wr32(dev, 0x4188e8, 0x00000000); | 1600 | nv_wr32(dev, 0x4188e8, 0x00000000); |
@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) | |||
1684 | nv_wr32(dev, 0x4188f0, 0x00000000); | 1602 | nv_wr32(dev, 0x4188f0, 0x00000000); |
1685 | nv_wr32(dev, 0x4188f4, 0x00000000); | 1603 | nv_wr32(dev, 0x4188f4, 0x00000000); |
1686 | nv_wr32(dev, 0x4188f8, 0x00000000); | 1604 | nv_wr32(dev, 0x4188f8, 0x00000000); |
1687 | nv_wr32(dev, 0x4188fc, 0x00100000); | 1605 | nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018); |
1688 | nv_wr32(dev, 0x41891c, 0x00ff00ff); | 1606 | nv_wr32(dev, 0x41891c, 0x00ff00ff); |
1689 | nv_wr32(dev, 0x418924, 0x00000000); | 1607 | nv_wr32(dev, 0x418924, 0x00000000); |
1690 | nv_wr32(dev, 0x418928, 0x00ffff00); | 1608 | nv_wr32(dev, 0x418928, 0x00ffff00); |
@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) | |||
1715 | nv_wr32(dev, 0x418c24, 0x00000000); | 1633 | nv_wr32(dev, 0x418c24, 0x00000000); |
1716 | nv_wr32(dev, 0x418c28, 0x00000000); | 1634 | nv_wr32(dev, 0x418c28, 0x00000000); |
1717 | nv_wr32(dev, 0x418c2c, 0x00000000); | 1635 | nv_wr32(dev, 0x418c2c, 0x00000000); |
1636 | if (chipset == 0xc1) | ||
1637 | nv_wr32(dev, 0x418c6c, 0x00000001); | ||
1718 | nv_wr32(dev, 0x418c80, 0x20200004); | 1638 | nv_wr32(dev, 0x418c80, 0x20200004); |
1719 | nv_wr32(dev, 0x418c8c, 0x00000001); | 1639 | nv_wr32(dev, 0x418c8c, 0x00000001); |
1720 | nv_wr32(dev, 0x419000, 0x00000780); | 1640 | nv_wr32(dev, 0x419000, 0x00000780); |
@@ -1727,10 +1647,13 @@ static void | |||
1727 | nvc0_grctx_generate_tp(struct drm_device *dev) | 1647 | nvc0_grctx_generate_tp(struct drm_device *dev) |
1728 | { | 1648 | { |
1729 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1649 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1650 | int chipset = dev_priv->chipset; | ||
1730 | 1651 | ||
1731 | /* GPC_BROADCAST.TP_BROADCAST */ | 1652 | /* GPC_BROADCAST.TP_BROADCAST */ |
1653 | nv_wr32(dev, 0x419818, 0x00000000); | ||
1654 | nv_wr32(dev, 0x41983c, 0x00038bc7); | ||
1732 | nv_wr32(dev, 0x419848, 0x00000000); | 1655 | nv_wr32(dev, 0x419848, 0x00000000); |
1733 | nv_wr32(dev, 0x419864, 0x0000012a); | 1656 | nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129); |
1734 | nv_wr32(dev, 0x419888, 0x00000000); | 1657 | nv_wr32(dev, 0x419888, 0x00000000); |
1735 | nv_wr32(dev, 0x419a00, 0x000001f0); | 1658 | nv_wr32(dev, 0x419a00, 0x000001f0); |
1736 | nv_wr32(dev, 0x419a04, 0x00000001); | 1659 | nv_wr32(dev, 0x419a04, 0x00000001); |
@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev) | |||
1740 | nv_wr32(dev, 0x419a14, 0x00000200); | 1663 | nv_wr32(dev, 0x419a14, 0x00000200); |
1741 | nv_wr32(dev, 0x419a1c, 0x00000000); | 1664 | nv_wr32(dev, 0x419a1c, 0x00000000); |
1742 | nv_wr32(dev, 0x419a20, 0x00000800); | 1665 | nv_wr32(dev, 0x419a20, 0x00000800); |
1743 | if (dev_priv->chipset != 0xc0) | 1666 | if (chipset != 0xc0 && chipset != 0xc8) |
1744 | nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ | 1667 | nv_wr32(dev, 0x00419ac4, 0x0007f440); |
1745 | nv_wr32(dev, 0x419b00, 0x0a418820); | 1668 | nv_wr32(dev, 0x419b00, 0x0a418820); |
1746 | nv_wr32(dev, 0x419b04, 0x062080e6); | 1669 | nv_wr32(dev, 0x419b04, 0x062080e6); |
1747 | nv_wr32(dev, 0x419b08, 0x020398a4); | 1670 | nv_wr32(dev, 0x419b08, 0x020398a4); |
@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev) | |||
1749 | nv_wr32(dev, 0x419b10, 0x0a418820); | 1672 | nv_wr32(dev, 0x419b10, 0x0a418820); |
1750 | nv_wr32(dev, 0x419b14, 0x000000e6); | 1673 | nv_wr32(dev, 0x419b14, 0x000000e6); |
1751 | nv_wr32(dev, 0x419bd0, 0x00900103); | 1674 | nv_wr32(dev, 0x419bd0, 0x00900103); |
1752 | nv_wr32(dev, 0x419be0, 0x00000001); | 1675 | nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001); |
1753 | nv_wr32(dev, 0x419be4, 0x00000000); | 1676 | nv_wr32(dev, 0x419be4, 0x00000000); |
1754 | nv_wr32(dev, 0x419c00, 0x00000002); | 1677 | nv_wr32(dev, 0x419c00, 0x00000002); |
1755 | nv_wr32(dev, 0x419c04, 0x00000006); | 1678 | nv_wr32(dev, 0x419c04, 0x00000006); |
1756 | nv_wr32(dev, 0x419c08, 0x00000002); | 1679 | nv_wr32(dev, 0x419c08, 0x00000002); |
1757 | nv_wr32(dev, 0x419c20, 0x00000000); | 1680 | nv_wr32(dev, 0x419c20, 0x00000000); |
1758 | nv_wr32(dev, 0x419cbc, 0x28137606); | 1681 | nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 |
1759 | nv_wr32(dev, 0x419ce8, 0x00000000); | 1682 | nv_wr32(dev, 0x419ce8, 0x00000000); |
1760 | nv_wr32(dev, 0x419cf4, 0x00000183); | 1683 | nv_wr32(dev, 0x419cf4, 0x00000183); |
1761 | nv_wr32(dev, 0x419d20, 0x02180000); | 1684 | nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); |
1762 | nv_wr32(dev, 0x419d24, 0x00001fff); | 1685 | nv_wr32(dev, 0x419d24, 0x00001fff); |
1686 | if (chipset == 0xc1) | ||
1687 | nv_wr32(dev, 0x419d44, 0x02180218); | ||
1763 | nv_wr32(dev, 0x419e04, 0x00000000); | 1688 | nv_wr32(dev, 0x419e04, 0x00000000); |
1764 | nv_wr32(dev, 0x419e08, 0x00000000); | 1689 | nv_wr32(dev, 0x419e08, 0x00000000); |
1765 | nv_wr32(dev, 0x419e0c, 0x00000000); | 1690 | nv_wr32(dev, 0x419e0c, 0x00000000); |
@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev) | |||
1785 | nv_wr32(dev, 0x419e8c, 0x00000000); | 1710 | nv_wr32(dev, 0x419e8c, 0x00000000); |
1786 | nv_wr32(dev, 0x419e90, 0x00000000); | 1711 | nv_wr32(dev, 0x419e90, 0x00000000); |
1787 | nv_wr32(dev, 0x419e98, 0x00000000); | 1712 | nv_wr32(dev, 0x419e98, 0x00000000); |
1788 | if (dev_priv->chipset != 0xc0) | 1713 | if (chipset != 0xc0 && chipset != 0xc8) |
1789 | nv_wr32(dev, 0x419ee0, 0x00011110); | 1714 | nv_wr32(dev, 0x419ee0, 0x00011110); |
1790 | nv_wr32(dev, 0x419f50, 0x00000000); | 1715 | nv_wr32(dev, 0x419f50, 0x00000000); |
1791 | nv_wr32(dev, 0x419f54, 0x00000000); | 1716 | nv_wr32(dev, 0x419f54, 0x00000000); |
1792 | if (dev_priv->chipset != 0xc0) | 1717 | if (chipset != 0xc0 && chipset != 0xc8) |
1793 | nv_wr32(dev, 0x419f58, 0x00000000); | 1718 | nv_wr32(dev, 0x419f58, 0x00000000); |
1794 | } | 1719 | } |
1795 | 1720 | ||
@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1801 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | 1726 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; |
1802 | struct drm_device *dev = chan->dev; | 1727 | struct drm_device *dev = chan->dev; |
1803 | int i, gpc, tp, id; | 1728 | int i, gpc, tp, id; |
1729 | u32 fermi = nvc0_graph_class(dev); | ||
1804 | u32 r000260, tmp; | 1730 | u32 r000260, tmp; |
1805 | 1731 | ||
1806 | r000260 = nv_rd32(dev, 0x000260); | 1732 | r000260 = nv_rd32(dev, 0x000260); |
@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1857 | nv_wr32(dev, 0x40587c, 0x00000000); | 1783 | nv_wr32(dev, 0x40587c, 0x00000000); |
1858 | 1784 | ||
1859 | if (1) { | 1785 | if (1) { |
1860 | const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; | 1786 | const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0, |
1787 | 16, 0, 0, 0, 0, 0, 8, 0 }; | ||
1861 | u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; | 1788 | u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; |
1862 | u8 tpnr[GPC_MAX]; | 1789 | u8 tpnr[GPC_MAX]; |
1863 | u8 data[32]; | 1790 | u8 data[TP_MAX]; |
1864 | 1791 | ||
1865 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1792 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1866 | memset(data, 0x1f, sizeof(data)); | 1793 | memset(data, 0x1f, sizeof(data)); |
@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
2633 | nv_icmd(dev, 0x0000053f, 0xffff0000); | 2560 | nv_icmd(dev, 0x0000053f, 0xffff0000); |
2634 | nv_icmd(dev, 0x00000585, 0x0000003f); | 2561 | nv_icmd(dev, 0x00000585, 0x0000003f); |
2635 | nv_icmd(dev, 0x00000576, 0x00000003); | 2562 | nv_icmd(dev, 0x00000576, 0x00000003); |
2563 | if (dev_priv->chipset == 0xc1) | ||
2564 | nv_icmd(dev, 0x0000057b, 0x00000059); | ||
2636 | nv_icmd(dev, 0x00000586, 0x00000040); | 2565 | nv_icmd(dev, 0x00000586, 0x00000040); |
2637 | nv_icmd(dev, 0x00000582, 0x00000080); | 2566 | nv_icmd(dev, 0x00000582, 0x00000080); |
2638 | nv_icmd(dev, 0x00000583, 0x00000080); | 2567 | nv_icmd(dev, 0x00000583, 0x00000080); |
@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
2865 | nv_wr32(dev, 0x404154, 0x00000400); | 2794 | nv_wr32(dev, 0x404154, 0x00000400); |
2866 | 2795 | ||
2867 | nvc0_grctx_generate_9097(dev); | 2796 | nvc0_grctx_generate_9097(dev); |
2797 | if (fermi >= 0x9197) | ||
2798 | nvc0_grctx_generate_9197(dev); | ||
2799 | if (fermi >= 0x9297) | ||
2800 | nvc0_grctx_generate_9297(dev); | ||
2868 | nvc0_grctx_generate_902d(dev); | 2801 | nvc0_grctx_generate_902d(dev); |
2869 | nvc0_grctx_generate_9039(dev); | 2802 | nvc0_grctx_generate_9039(dev); |
2870 | nvc0_grctx_generate_90c0(dev); | 2803 | nvc0_grctx_generate_90c0(dev); |
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc new file mode 100644 index 00000000000..0ec2add72a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc | |||
@@ -0,0 +1,474 @@ | |||
1 | /* fuc microcode for nvc0 PGRAPH/GPC | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | /* To build: | ||
27 | * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h | ||
28 | */ | ||
29 | |||
30 | /* TODO | ||
31 | * - bracket certain functions with scratch writes, useful for debugging | ||
32 | * - watchdog timer around ctx operations | ||
33 | */ | ||
34 | |||
35 | .section nvc0_grgpc_data | ||
36 | include(`nvc0_graph.fuc') | ||
37 | gpc_id: .b32 0 | ||
38 | gpc_mmio_list_head: .b32 0 | ||
39 | gpc_mmio_list_tail: .b32 0 | ||
40 | |||
41 | tpc_count: .b32 0 | ||
42 | tpc_mask: .b32 0 | ||
43 | tpc_mmio_list_head: .b32 0 | ||
44 | tpc_mmio_list_tail: .b32 0 | ||
45 | |||
46 | cmd_queue: queue_init | ||
47 | |||
48 | // chipset descriptions | ||
49 | chipsets: | ||
50 | .b8 0xc0 0 0 0 | ||
51 | .b16 nvc0_gpc_mmio_head | ||
52 | .b16 nvc0_gpc_mmio_tail | ||
53 | .b16 nvc0_tpc_mmio_head | ||
54 | .b16 nvc0_tpc_mmio_tail | ||
55 | .b8 0xc1 0 0 0 | ||
56 | .b16 nvc0_gpc_mmio_head | ||
57 | .b16 nvc1_gpc_mmio_tail | ||
58 | .b16 nvc0_tpc_mmio_head | ||
59 | .b16 nvc1_tpc_mmio_tail | ||
60 | .b8 0xc3 0 0 0 | ||
61 | .b16 nvc0_gpc_mmio_head | ||
62 | .b16 nvc0_gpc_mmio_tail | ||
63 | .b16 nvc0_tpc_mmio_head | ||
64 | .b16 nvc3_tpc_mmio_tail | ||
65 | .b8 0xc4 0 0 0 | ||
66 | .b16 nvc0_gpc_mmio_head | ||
67 | .b16 nvc0_gpc_mmio_tail | ||
68 | .b16 nvc0_tpc_mmio_head | ||
69 | .b16 nvc3_tpc_mmio_tail | ||
70 | .b8 0xc8 0 0 0 | ||
71 | .b16 nvc0_gpc_mmio_head | ||
72 | .b16 nvc0_gpc_mmio_tail | ||
73 | .b16 nvc0_tpc_mmio_head | ||
74 | .b16 nvc0_tpc_mmio_tail | ||
75 | .b8 0xce 0 0 0 | ||
76 | .b16 nvc0_gpc_mmio_head | ||
77 | .b16 nvc0_gpc_mmio_tail | ||
78 | .b16 nvc0_tpc_mmio_head | ||
79 | .b16 nvc3_tpc_mmio_tail | ||
80 | .b8 0 0 0 0 | ||
81 | |||
82 | // GPC mmio lists | ||
83 | nvc0_gpc_mmio_head: | ||
84 | mmctx_data(0x000380, 1) | ||
85 | mmctx_data(0x000400, 6) | ||
86 | mmctx_data(0x000450, 9) | ||
87 | mmctx_data(0x000600, 1) | ||
88 | mmctx_data(0x000684, 1) | ||
89 | mmctx_data(0x000700, 5) | ||
90 | mmctx_data(0x000800, 1) | ||
91 | mmctx_data(0x000808, 3) | ||
92 | mmctx_data(0x000828, 1) | ||
93 | mmctx_data(0x000830, 1) | ||
94 | mmctx_data(0x0008d8, 1) | ||
95 | mmctx_data(0x0008e0, 1) | ||
96 | mmctx_data(0x0008e8, 6) | ||
97 | mmctx_data(0x00091c, 1) | ||
98 | mmctx_data(0x000924, 3) | ||
99 | mmctx_data(0x000b00, 1) | ||
100 | mmctx_data(0x000b08, 6) | ||
101 | mmctx_data(0x000bb8, 1) | ||
102 | mmctx_data(0x000c08, 1) | ||
103 | mmctx_data(0x000c10, 8) | ||
104 | mmctx_data(0x000c80, 1) | ||
105 | mmctx_data(0x000c8c, 1) | ||
106 | mmctx_data(0x001000, 3) | ||
107 | mmctx_data(0x001014, 1) | ||
108 | nvc0_gpc_mmio_tail: | ||
109 | mmctx_data(0x000c6c, 1); | ||
110 | nvc1_gpc_mmio_tail: | ||
111 | |||
112 | // TPC mmio lists | ||
113 | nvc0_tpc_mmio_head: | ||
114 | mmctx_data(0x000018, 1) | ||
115 | mmctx_data(0x00003c, 1) | ||
116 | mmctx_data(0x000048, 1) | ||
117 | mmctx_data(0x000064, 1) | ||
118 | mmctx_data(0x000088, 1) | ||
119 | mmctx_data(0x000200, 6) | ||
120 | mmctx_data(0x00021c, 2) | ||
121 | mmctx_data(0x000300, 6) | ||
122 | mmctx_data(0x0003d0, 1) | ||
123 | mmctx_data(0x0003e0, 2) | ||
124 | mmctx_data(0x000400, 3) | ||
125 | mmctx_data(0x000420, 1) | ||
126 | mmctx_data(0x0004b0, 1) | ||
127 | mmctx_data(0x0004e8, 1) | ||
128 | mmctx_data(0x0004f4, 1) | ||
129 | mmctx_data(0x000520, 2) | ||
130 | mmctx_data(0x000604, 4) | ||
131 | mmctx_data(0x000644, 20) | ||
132 | mmctx_data(0x000698, 1) | ||
133 | mmctx_data(0x000750, 2) | ||
134 | nvc0_tpc_mmio_tail: | ||
135 | mmctx_data(0x000758, 1) | ||
136 | mmctx_data(0x0002c4, 1) | ||
137 | mmctx_data(0x0004bc, 1) | ||
138 | mmctx_data(0x0006e0, 1) | ||
139 | nvc3_tpc_mmio_tail: | ||
140 | mmctx_data(0x000544, 1) | ||
141 | nvc1_tpc_mmio_tail: | ||
142 | |||
143 | |||
144 | .section nvc0_grgpc_code | ||
145 | bra init | ||
146 | define(`include_code') | ||
147 | include(`nvc0_graph.fuc') | ||
148 | |||
149 | // reports an exception to the host | ||
150 | // | ||
151 | // In: $r15 error code (see nvc0_graph.fuc) | ||
152 | // | ||
153 | error: | ||
154 | push $r14 | ||
155 | mov $r14 -0x67ec // 0x9814 | ||
156 | sethi $r14 0x400000 | ||
157 | call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code | ||
158 | add b32 $r14 0x41c | ||
159 | mov $r15 1 | ||
160 | call nv_wr32 // HUB_CTXCTL_INTR_UP_SET | ||
161 | pop $r14 | ||
162 | ret | ||
163 | |||
164 | // GPC fuc initialisation, executed by triggering ucode start, will | ||
165 | // fall through to main loop after completion. | ||
166 | // | ||
167 | // Input: | ||
168 | // CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) | ||
169 | // CC_SCRATCH[1]: context base | ||
170 | // | ||
171 | // Output: | ||
172 | // CC_SCRATCH[0]: | ||
173 | // 31:31: set to signal completion | ||
174 | // CC_SCRATCH[1]: | ||
175 | // 31:0: GPC context size | ||
176 | // | ||
177 | init: | ||
178 | clear b32 $r0 | ||
179 | mov $sp $r0 | ||
180 | |||
181 | // enable fifo access | ||
182 | mov $r1 0x1200 | ||
183 | mov $r2 2 | ||
184 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
185 | |||
186 | // setup i0 handler, and route all interrupts to it | ||
187 | mov $r1 ih | ||
188 | mov $iv0 $r1 | ||
189 | mov $r1 0x400 | ||
190 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
191 | |||
192 | // enable fifo interrupt | ||
193 | mov $r2 4 | ||
194 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | ||
195 | |||
196 | // enable interrupts | ||
197 | bset $flags ie0 | ||
198 | |||
199 | // figure out which GPC we are, and how many TPCs we have | ||
200 | mov $r1 0x608 | ||
201 | shl b32 $r1 6 | ||
202 | iord $r2 I[$r1 + 0x000] // UNITS | ||
203 | mov $r3 1 | ||
204 | and $r2 0x1f | ||
205 | shl b32 $r3 $r2 | ||
206 | sub b32 $r3 1 | ||
207 | st b32 D[$r0 + tpc_count] $r2 | ||
208 | st b32 D[$r0 + tpc_mask] $r3 | ||
209 | add b32 $r1 0x400 | ||
210 | iord $r2 I[$r1 + 0x000] // MYINDEX | ||
211 | st b32 D[$r0 + gpc_id] $r2 | ||
212 | |||
213 | // find context data for this chipset | ||
214 | mov $r2 0x800 | ||
215 | shl b32 $r2 6 | ||
216 | iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] | ||
217 | mov $r1 chipsets - 12 | ||
218 | init_find_chipset: | ||
219 | add b32 $r1 12 | ||
220 | ld b32 $r3 D[$r1 + 0x00] | ||
221 | cmpu b32 $r3 $r2 | ||
222 | bra e init_context | ||
223 | cmpu b32 $r3 0 | ||
224 | bra ne init_find_chipset | ||
225 | // unknown chipset | ||
226 | ret | ||
227 | |||
228 | // initialise context base, and size tracking | ||
229 | init_context: | ||
230 | mov $r2 0x800 | ||
231 | shl b32 $r2 6 | ||
232 | iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base | ||
233 | clear b32 $r3 // track GPC context size here | ||
234 | |||
235 | // set mmctx base addresses now so we don't have to do it later, | ||
236 | // they don't currently ever change | ||
237 | mov $r4 0x700 | ||
238 | shl b32 $r4 6 | ||
239 | shr b32 $r5 $r2 8 | ||
240 | iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE | ||
241 | iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE | ||
242 | |||
243 | // calculate GPC mmio context size, store the chipset-specific | ||
244 | // mmio list pointers somewhere we can get at them later without | ||
245 | // re-parsing the chipset list | ||
246 | clear b32 $r14 | ||
247 | clear b32 $r15 | ||
248 | ld b16 $r14 D[$r1 + 4] | ||
249 | ld b16 $r15 D[$r1 + 6] | ||
250 | st b16 D[$r0 + gpc_mmio_list_head] $r14 | ||
251 | st b16 D[$r0 + gpc_mmio_list_tail] $r15 | ||
252 | call mmctx_size | ||
253 | add b32 $r2 $r15 | ||
254 | add b32 $r3 $r15 | ||
255 | |||
256 | // calculate per-TPC mmio context size, store the list pointers | ||
257 | ld b16 $r14 D[$r1 + 8] | ||
258 | ld b16 $r15 D[$r1 + 10] | ||
259 | st b16 D[$r0 + tpc_mmio_list_head] $r14 | ||
260 | st b16 D[$r0 + tpc_mmio_list_tail] $r15 | ||
261 | call mmctx_size | ||
262 | ld b32 $r14 D[$r0 + tpc_count] | ||
263 | mulu $r14 $r15 | ||
264 | add b32 $r2 $r14 | ||
265 | add b32 $r3 $r14 | ||
266 | |||
267 | // round up base/size to 256 byte boundary (for strand SWBASE) | ||
268 | add b32 $r4 0x1300 | ||
269 | shr b32 $r3 2 | ||
270 | iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? | ||
271 | shr b32 $r2 8 | ||
272 | shr b32 $r3 6 | ||
273 | add b32 $r2 1 | ||
274 | add b32 $r3 1 | ||
275 | shl b32 $r2 8 | ||
276 | shl b32 $r3 8 | ||
277 | |||
278 | // calculate size of strand context data | ||
279 | mov b32 $r15 $r2 | ||
280 | call strand_ctx_init | ||
281 | add b32 $r3 $r15 | ||
282 | |||
283 | // save context size, and tell HUB we're done | ||
284 | mov $r1 0x800 | ||
285 | shl b32 $r1 6 | ||
286 | iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size | ||
287 | add b32 $r1 0x800 | ||
288 | clear b32 $r2 | ||
289 | bset $r2 31 | ||
290 | iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 | ||
291 | |||
292 | // Main program loop, very simple, sleeps until woken up by the interrupt | ||
293 | // handler, pulls a command from the queue and executes its handler | ||
294 | // | ||
295 | main: | ||
296 | bset $flags $p0 | ||
297 | sleep $p0 | ||
298 | mov $r13 cmd_queue | ||
299 | call queue_get | ||
300 | bra $p1 main | ||
301 | |||
302 | // 0x0000-0x0003 are all context transfers | ||
303 | cmpu b32 $r14 0x04 | ||
304 | bra nc main_not_ctx_xfer | ||
305 | // fetch $flags and mask off $p1/$p2 | ||
306 | mov $r1 $flags | ||
307 | mov $r2 0x0006 | ||
308 | not b32 $r2 | ||
309 | and $r1 $r2 | ||
310 | // set $p1/$p2 according to transfer type | ||
311 | shl b32 $r14 1 | ||
312 | or $r1 $r14 | ||
313 | mov $flags $r1 | ||
314 | // transfer context data | ||
315 | call ctx_xfer | ||
316 | bra main | ||
317 | |||
318 | main_not_ctx_xfer: | ||
319 | shl b32 $r15 $r14 16 | ||
320 | or $r15 E_BAD_COMMAND | ||
321 | call error | ||
322 | bra main | ||
323 | |||
324 | // interrupt handler | ||
325 | ih: | ||
326 | push $r8 | ||
327 | mov $r8 $flags | ||
328 | push $r8 | ||
329 | push $r9 | ||
330 | push $r10 | ||
331 | push $r11 | ||
332 | push $r13 | ||
333 | push $r14 | ||
334 | push $r15 | ||
335 | |||
336 | // incoming fifo command? | ||
337 | iord $r10 I[$r0 + 0x200] // INTR | ||
338 | and $r11 $r10 0x00000004 | ||
339 | bra e ih_no_fifo | ||
340 | // queue incoming fifo command for later processing | ||
341 | mov $r11 0x1900 | ||
342 | mov $r13 cmd_queue | ||
343 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | ||
344 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | ||
345 | call queue_put | ||
346 | add b32 $r11 0x400 | ||
347 | mov $r14 1 | ||
348 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | ||
349 | |||
350 | // ack, and wake up main() | ||
351 | ih_no_fifo: | ||
352 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | ||
353 | |||
354 | pop $r15 | ||
355 | pop $r14 | ||
356 | pop $r13 | ||
357 | pop $r11 | ||
358 | pop $r10 | ||
359 | pop $r9 | ||
360 | pop $r8 | ||
361 | mov $flags $r8 | ||
362 | pop $r8 | ||
363 | bclr $flags $p0 | ||
364 | iret | ||
365 | |||
366 | // Set this GPC's bit in HUB_BAR, used to signal completion of various | ||
367 | // activities to the HUB fuc | ||
368 | // | ||
369 | hub_barrier_done: | ||
370 | mov $r15 1 | ||
371 | ld b32 $r14 D[$r0 + gpc_id] | ||
372 | shl b32 $r15 $r14 | ||
373 | mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET | ||
374 | sethi $r14 0x400000 | ||
375 | call nv_wr32 | ||
376 | ret | ||
377 | |||
378 | // Disables various things, waits a bit, and re-enables them.. | ||
379 | // | ||
380 | // Not sure how exactly this helps, perhaps "ENABLE" is not such a | ||
381 | // good description for the bits we turn off? Anyways, without this, | ||
382 | // funny things happen. | ||
383 | // | ||
384 | ctx_redswitch: | ||
385 | mov $r14 0x614 | ||
386 | shl b32 $r14 6 | ||
387 | mov $r15 0x020 | ||
388 | iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER | ||
389 | mov $r15 8 | ||
390 | ctx_redswitch_delay: | ||
391 | sub b32 $r15 1 | ||
392 | bra ne ctx_redswitch_delay | ||
393 | mov $r15 0xa20 | ||
394 | iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER | ||
395 | ret | ||
396 | |||
397 | // Transfer GPC context data between GPU and storage area | ||
398 | // | ||
399 | // In: $r15 context base address | ||
400 | // $p1 clear on save, set on load | ||
401 | // $p2 set if opposite direction done/will be done, so: | ||
402 | // on save it means: "a load will follow this save" | ||
403 | // on load it means: "a save preceeded this load" | ||
404 | // | ||
405 | ctx_xfer: | ||
406 | // set context base address | ||
407 | mov $r1 0xa04 | ||
408 | shl b32 $r1 6 | ||
409 | iowr I[$r1 + 0x000] $r15// MEM_BASE | ||
410 | bra not $p1 ctx_xfer_not_load | ||
411 | call ctx_redswitch | ||
412 | ctx_xfer_not_load: | ||
413 | |||
414 | // strands | ||
415 | mov $r1 0x4afc | ||
416 | sethi $r1 0x20000 | ||
417 | mov $r2 0xc | ||
418 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | ||
419 | call strand_wait | ||
420 | mov $r2 0x47fc | ||
421 | sethi $r2 0x20000 | ||
422 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
423 | xbit $r2 $flags $p1 | ||
424 | add b32 $r2 3 | ||
425 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
426 | |||
427 | // mmio context | ||
428 | xbit $r10 $flags $p1 // direction | ||
429 | or $r10 2 // first | ||
430 | mov $r11 0x0000 | ||
431 | sethi $r11 0x500000 | ||
432 | ld b32 $r12 D[$r0 + gpc_id] | ||
433 | shl b32 $r12 15 | ||
434 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn | ||
435 | ld b32 $r12 D[$r0 + gpc_mmio_list_head] | ||
436 | ld b32 $r13 D[$r0 + gpc_mmio_list_tail] | ||
437 | mov $r14 0 // not multi | ||
438 | call mmctx_xfer | ||
439 | |||
440 | // per-TPC mmio context | ||
441 | xbit $r10 $flags $p1 // direction | ||
442 | or $r10 4 // last | ||
443 | mov $r11 0x4000 | ||
444 | sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 | ||
445 | ld b32 $r12 D[$r0 + gpc_id] | ||
446 | shl b32 $r12 15 | ||
447 | add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 | ||
448 | ld b32 $r12 D[$r0 + tpc_mmio_list_head] | ||
449 | ld b32 $r13 D[$r0 + tpc_mmio_list_tail] | ||
450 | ld b32 $r15 D[$r0 + tpc_mask] | ||
451 | mov $r14 0x800 // stride = 0x800 | ||
452 | call mmctx_xfer | ||
453 | |||
454 | // wait for strands to finish | ||
455 | call strand_wait | ||
456 | |||
457 | // if load, or a save without a load following, do some | ||
458 | // unknown stuff that's done after finishing a block of | ||
459 | // strand commands | ||
460 | bra $p1 ctx_xfer_post | ||
461 | bra not $p2 ctx_xfer_done | ||
462 | ctx_xfer_post: | ||
463 | mov $r1 0x4afc | ||
464 | sethi $r1 0x20000 | ||
465 | mov $r2 0xd | ||
466 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d | ||
467 | call strand_wait | ||
468 | |||
469 | // mark completion in HUB's barrier | ||
470 | ctx_xfer_done: | ||
471 | call hub_barrier_done | ||
472 | ret | ||
473 | |||
474 | .align 256 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h new file mode 100644 index 00000000000..1896c898f5b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h | |||
@@ -0,0 +1,483 @@ | |||
1 | uint32_t nvc0_grgpc_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x00000000, | ||
26 | 0x00000000, | ||
27 | 0x000000c0, | ||
28 | 0x011000b0, | ||
29 | 0x01640114, | ||
30 | 0x000000c1, | ||
31 | 0x011400b0, | ||
32 | 0x01780114, | ||
33 | 0x000000c3, | ||
34 | 0x011000b0, | ||
35 | 0x01740114, | ||
36 | 0x000000c4, | ||
37 | 0x011000b0, | ||
38 | 0x01740114, | ||
39 | 0x000000c8, | ||
40 | 0x011000b0, | ||
41 | 0x01640114, | ||
42 | 0x000000ce, | ||
43 | 0x011000b0, | ||
44 | 0x01740114, | ||
45 | 0x00000000, | ||
46 | 0x00000380, | ||
47 | 0x14000400, | ||
48 | 0x20000450, | ||
49 | 0x00000600, | ||
50 | 0x00000684, | ||
51 | 0x10000700, | ||
52 | 0x00000800, | ||
53 | 0x08000808, | ||
54 | 0x00000828, | ||
55 | 0x00000830, | ||
56 | 0x000008d8, | ||
57 | 0x000008e0, | ||
58 | 0x140008e8, | ||
59 | 0x0000091c, | ||
60 | 0x08000924, | ||
61 | 0x00000b00, | ||
62 | 0x14000b08, | ||
63 | 0x00000bb8, | ||
64 | 0x00000c08, | ||
65 | 0x1c000c10, | ||
66 | 0x00000c80, | ||
67 | 0x00000c8c, | ||
68 | 0x08001000, | ||
69 | 0x00001014, | ||
70 | 0x00000c6c, | ||
71 | 0x00000018, | ||
72 | 0x0000003c, | ||
73 | 0x00000048, | ||
74 | 0x00000064, | ||
75 | 0x00000088, | ||
76 | 0x14000200, | ||
77 | 0x0400021c, | ||
78 | 0x14000300, | ||
79 | 0x000003d0, | ||
80 | 0x040003e0, | ||
81 | 0x08000400, | ||
82 | 0x00000420, | ||
83 | 0x000004b0, | ||
84 | 0x000004e8, | ||
85 | 0x000004f4, | ||
86 | 0x04000520, | ||
87 | 0x0c000604, | ||
88 | 0x4c000644, | ||
89 | 0x00000698, | ||
90 | 0x04000750, | ||
91 | 0x00000758, | ||
92 | 0x000002c4, | ||
93 | 0x000004bc, | ||
94 | 0x000006e0, | ||
95 | 0x00000544, | ||
96 | }; | ||
97 | |||
98 | uint32_t nvc0_grgpc_code[] = { | ||
99 | 0x03060ef5, | ||
100 | 0x9800d898, | ||
101 | 0x86f001d9, | ||
102 | 0x0489b808, | ||
103 | 0xf00c1bf4, | ||
104 | 0x21f502f7, | ||
105 | 0x00f802ec, | ||
106 | 0xb60798c4, | ||
107 | 0x8dbb0384, | ||
108 | 0x0880b600, | ||
109 | 0x80008e80, | ||
110 | 0x90b6018f, | ||
111 | 0x0f94f001, | ||
112 | 0xf801d980, | ||
113 | 0x0131f400, | ||
114 | 0x9800d898, | ||
115 | 0x89b801d9, | ||
116 | 0x210bf404, | ||
117 | 0xb60789c4, | ||
118 | 0x9dbb0394, | ||
119 | 0x0890b600, | ||
120 | 0x98009e98, | ||
121 | 0x80b6019f, | ||
122 | 0x0f84f001, | ||
123 | 0xf400d880, | ||
124 | 0x00f80132, | ||
125 | 0x0728b7f1, | ||
126 | 0xb906b4b6, | ||
127 | 0xc9f002ec, | ||
128 | 0x00bcd01f, | ||
129 | 0xc800bccf, | ||
130 | 0x1bf41fcc, | ||
131 | 0x06a7f0fa, | ||
132 | 0x010321f5, | ||
133 | 0xf840bfcf, | ||
134 | 0x28b7f100, | ||
135 | 0x06b4b607, | ||
136 | 0xb980bfd0, | ||
137 | 0xc9f002ec, | ||
138 | 0x1ec9f01f, | ||
139 | 0xcf00bcd0, | ||
140 | 0xccc800bc, | ||
141 | 0xfa1bf41f, | ||
142 | 0x87f100f8, | ||
143 | 0x84b60430, | ||
144 | 0x1ff9f006, | ||
145 | 0xf8008fd0, | ||
146 | 0x3087f100, | ||
147 | 0x0684b604, | ||
148 | 0xf80080d0, | ||
149 | 0x3c87f100, | ||
150 | 0x0684b608, | ||
151 | 0x99f094bd, | ||
152 | 0x0089d000, | ||
153 | 0x081887f1, | ||
154 | 0xd00684b6, | ||
155 | 0x87f1008a, | ||
156 | 0x84b60400, | ||
157 | 0x0088cf06, | ||
158 | 0xf4888aff, | ||
159 | 0x87f1f31b, | ||
160 | 0x84b6085c, | ||
161 | 0xf094bd06, | ||
162 | 0x89d00099, | ||
163 | 0xf100f800, | ||
164 | 0xb6083c87, | ||
165 | 0x94bd0684, | ||
166 | 0xd00099f0, | ||
167 | 0x87f10089, | ||
168 | 0x84b60818, | ||
169 | 0x008ad006, | ||
170 | 0x040087f1, | ||
171 | 0xcf0684b6, | ||
172 | 0x8aff0088, | ||
173 | 0xf30bf488, | ||
174 | 0x085c87f1, | ||
175 | 0xbd0684b6, | ||
176 | 0x0099f094, | ||
177 | 0xf80089d0, | ||
178 | 0x9894bd00, | ||
179 | 0x85b600e8, | ||
180 | 0x0180b61a, | ||
181 | 0xbb0284b6, | ||
182 | 0xe0b60098, | ||
183 | 0x04efb804, | ||
184 | 0xb9eb1bf4, | ||
185 | 0x00f8029f, | ||
186 | 0x083c87f1, | ||
187 | 0xbd0684b6, | ||
188 | 0x0199f094, | ||
189 | 0xf10089d0, | ||
190 | 0xb6071087, | ||
191 | 0x94bd0684, | ||
192 | 0xf405bbfd, | ||
193 | 0x8bd0090b, | ||
194 | 0x0099f000, | ||
195 | 0xf405eefd, | ||
196 | 0x8ed00c0b, | ||
197 | 0xc08fd080, | ||
198 | 0xb70199f0, | ||
199 | 0xc8010080, | ||
200 | 0xb4b600ab, | ||
201 | 0x0cb9f010, | ||
202 | 0xb601aec8, | ||
203 | 0xbefd11e4, | ||
204 | 0x008bd005, | ||
205 | 0xf0008ecf, | ||
206 | 0x0bf41fe4, | ||
207 | 0x00ce98fa, | ||
208 | 0xd005e9fd, | ||
209 | 0xc0b6c08e, | ||
210 | 0x04cdb804, | ||
211 | 0xc8e81bf4, | ||
212 | 0x1bf402ab, | ||
213 | 0x008bcf18, | ||
214 | 0xb01fb4f0, | ||
215 | 0x1bf410b4, | ||
216 | 0x02a7f0f7, | ||
217 | 0xf4c921f4, | ||
218 | 0xabc81b0e, | ||
219 | 0x10b4b600, | ||
220 | 0xf00cb9f0, | ||
221 | 0x8bd012b9, | ||
222 | 0x008bcf00, | ||
223 | 0xf412bbc8, | ||
224 | 0x87f1fa1b, | ||
225 | 0x84b6085c, | ||
226 | 0xf094bd06, | ||
227 | 0x89d00199, | ||
228 | 0xf900f800, | ||
229 | 0x02a7f0a0, | ||
230 | 0xfcc921f4, | ||
231 | 0xf100f8a0, | ||
232 | 0xf04afc87, | ||
233 | 0x97f00283, | ||
234 | 0x0089d00c, | ||
235 | 0x020721f5, | ||
236 | 0x87f100f8, | ||
237 | 0x83f04afc, | ||
238 | 0x0d97f002, | ||
239 | 0xf50089d0, | ||
240 | 0xf8020721, | ||
241 | 0xfca7f100, | ||
242 | 0x02a3f04f, | ||
243 | 0x0500aba2, | ||
244 | 0xd00fc7f0, | ||
245 | 0xc7f000ac, | ||
246 | 0x00bcd00b, | ||
247 | 0x020721f5, | ||
248 | 0xf000aed0, | ||
249 | 0xbcd00ac7, | ||
250 | 0x0721f500, | ||
251 | 0xf100f802, | ||
252 | 0xb6083c87, | ||
253 | 0x94bd0684, | ||
254 | 0xd00399f0, | ||
255 | 0x21f50089, | ||
256 | 0xe7f00213, | ||
257 | 0x3921f503, | ||
258 | 0xfca7f102, | ||
259 | 0x02a3f046, | ||
260 | 0x0400aba0, | ||
261 | 0xf040a0d0, | ||
262 | 0xbcd001c7, | ||
263 | 0x0721f500, | ||
264 | 0x010c9202, | ||
265 | 0xf000acd0, | ||
266 | 0xbcd002c7, | ||
267 | 0x0721f500, | ||
268 | 0x2621f502, | ||
269 | 0x8087f102, | ||
270 | 0x0684b608, | ||
271 | 0xb70089cf, | ||
272 | 0x95220080, | ||
273 | 0x8ed008fe, | ||
274 | 0x408ed000, | ||
275 | 0xb6808acf, | ||
276 | 0xa0b606a5, | ||
277 | 0x00eabb01, | ||
278 | 0xb60480b6, | ||
279 | 0x1bf40192, | ||
280 | 0x08e4b6e8, | ||
281 | 0xf1f2efbc, | ||
282 | 0xb6085c87, | ||
283 | 0x94bd0684, | ||
284 | 0xd00399f0, | ||
285 | 0x00f80089, | ||
286 | 0xe7f1e0f9, | ||
287 | 0xe3f09814, | ||
288 | 0x8d21f440, | ||
289 | 0x041ce0b7, | ||
290 | 0xf401f7f0, | ||
291 | 0xe0fc8d21, | ||
292 | 0x04bd00f8, | ||
293 | 0xf10004fe, | ||
294 | 0xf0120017, | ||
295 | 0x12d00227, | ||
296 | 0x3e17f100, | ||
297 | 0x0010fe04, | ||
298 | 0x040017f1, | ||
299 | 0xf0c010d0, | ||
300 | 0x12d00427, | ||
301 | 0x1031f400, | ||
302 | 0x060817f1, | ||
303 | 0xcf0614b6, | ||
304 | 0x37f00012, | ||
305 | 0x1f24f001, | ||
306 | 0xb60432bb, | ||
307 | 0x02800132, | ||
308 | 0x04038003, | ||
309 | 0x040010b7, | ||
310 | 0x800012cf, | ||
311 | 0x27f10002, | ||
312 | 0x24b60800, | ||
313 | 0x0022cf06, | ||
314 | 0xb65817f0, | ||
315 | 0x13980c10, | ||
316 | 0x0432b800, | ||
317 | 0xb00b0bf4, | ||
318 | 0x1bf40034, | ||
319 | 0xf100f8f1, | ||
320 | 0xb6080027, | ||
321 | 0x22cf0624, | ||
322 | 0xf134bd40, | ||
323 | 0xb6070047, | ||
324 | 0x25950644, | ||
325 | 0x0045d008, | ||
326 | 0xbd4045d0, | ||
327 | 0x58f4bde4, | ||
328 | 0x1f58021e, | ||
329 | 0x020e4003, | ||
330 | 0xf5040f40, | ||
331 | 0xbb013d21, | ||
332 | 0x3fbb002f, | ||
333 | 0x041e5800, | ||
334 | 0x40051f58, | ||
335 | 0x0f400a0e, | ||
336 | 0x3d21f50c, | ||
337 | 0x030e9801, | ||
338 | 0xbb00effd, | ||
339 | 0x3ebb002e, | ||
340 | 0x0040b700, | ||
341 | 0x0235b613, | ||
342 | 0xb60043d0, | ||
343 | 0x35b60825, | ||
344 | 0x0120b606, | ||
345 | 0xb60130b6, | ||
346 | 0x34b60824, | ||
347 | 0x022fb908, | ||
348 | 0x026321f5, | ||
349 | 0xf1003fbb, | ||
350 | 0xb6080017, | ||
351 | 0x13d00614, | ||
352 | 0x0010b740, | ||
353 | 0xf024bd08, | ||
354 | 0x12d01f29, | ||
355 | 0x0031f400, | ||
356 | 0xf00028f4, | ||
357 | 0x21f41cd7, | ||
358 | 0xf401f439, | ||
359 | 0xf404e4b0, | ||
360 | 0x81fe1e18, | ||
361 | 0x0627f001, | ||
362 | 0x12fd20bd, | ||
363 | 0x01e4b604, | ||
364 | 0xfe051efd, | ||
365 | 0x21f50018, | ||
366 | 0x0ef404c3, | ||
367 | 0x10ef94d3, | ||
368 | 0xf501f5f0, | ||
369 | 0xf402ec21, | ||
370 | 0x80f9c60e, | ||
371 | 0xf90188fe, | ||
372 | 0xf990f980, | ||
373 | 0xf9b0f9a0, | ||
374 | 0xf9e0f9d0, | ||
375 | 0x800acff0, | ||
376 | 0xf404abc4, | ||
377 | 0xb7f11d0b, | ||
378 | 0xd7f01900, | ||
379 | 0x40becf1c, | ||
380 | 0xf400bfcf, | ||
381 | 0xb0b70421, | ||
382 | 0xe7f00400, | ||
383 | 0x00bed001, | ||
384 | 0xfc400ad0, | ||
385 | 0xfce0fcf0, | ||
386 | 0xfcb0fcd0, | ||
387 | 0xfc90fca0, | ||
388 | 0x0088fe80, | ||
389 | 0x32f480fc, | ||
390 | 0xf001f800, | ||
391 | 0x0e9801f7, | ||
392 | 0x04febb00, | ||
393 | 0x9418e7f1, | ||
394 | 0xf440e3f0, | ||
395 | 0x00f88d21, | ||
396 | 0x0614e7f1, | ||
397 | 0xf006e4b6, | ||
398 | 0xefd020f7, | ||
399 | 0x08f7f000, | ||
400 | 0xf401f2b6, | ||
401 | 0xf7f1fd1b, | ||
402 | 0xefd00a20, | ||
403 | 0xf100f800, | ||
404 | 0xb60a0417, | ||
405 | 0x1fd00614, | ||
406 | 0x0711f400, | ||
407 | 0x04a421f5, | ||
408 | 0x4afc17f1, | ||
409 | 0xf00213f0, | ||
410 | 0x12d00c27, | ||
411 | 0x0721f500, | ||
412 | 0xfc27f102, | ||
413 | 0x0223f047, | ||
414 | 0xf00020d0, | ||
415 | 0x20b6012c, | ||
416 | 0x0012d003, | ||
417 | 0xf001acf0, | ||
418 | 0xb7f002a5, | ||
419 | 0x50b3f000, | ||
420 | 0xb6000c98, | ||
421 | 0xbcbb0fc4, | ||
422 | 0x010c9800, | ||
423 | 0xf0020d98, | ||
424 | 0x21f500e7, | ||
425 | 0xacf0015c, | ||
426 | 0x04a5f001, | ||
427 | 0x4000b7f1, | ||
428 | 0x9850b3f0, | ||
429 | 0xc4b6000c, | ||
430 | 0x00bcbb0f, | ||
431 | 0x98050c98, | ||
432 | 0x0f98060d, | ||
433 | 0x00e7f104, | ||
434 | 0x5c21f508, | ||
435 | 0x0721f501, | ||
436 | 0x0601f402, | ||
437 | 0xf11412f4, | ||
438 | 0xf04afc17, | ||
439 | 0x27f00213, | ||
440 | 0x0012d00d, | ||
441 | 0x020721f5, | ||
442 | 0x048f21f5, | ||
443 | 0x000000f8, | ||
444 | 0x00000000, | ||
445 | 0x00000000, | ||
446 | 0x00000000, | ||
447 | 0x00000000, | ||
448 | 0x00000000, | ||
449 | 0x00000000, | ||
450 | 0x00000000, | ||
451 | 0x00000000, | ||
452 | 0x00000000, | ||
453 | 0x00000000, | ||
454 | 0x00000000, | ||
455 | 0x00000000, | ||
456 | 0x00000000, | ||
457 | 0x00000000, | ||
458 | 0x00000000, | ||
459 | 0x00000000, | ||
460 | 0x00000000, | ||
461 | 0x00000000, | ||
462 | 0x00000000, | ||
463 | 0x00000000, | ||
464 | 0x00000000, | ||
465 | 0x00000000, | ||
466 | 0x00000000, | ||
467 | 0x00000000, | ||
468 | 0x00000000, | ||
469 | 0x00000000, | ||
470 | 0x00000000, | ||
471 | 0x00000000, | ||
472 | 0x00000000, | ||
473 | 0x00000000, | ||
474 | 0x00000000, | ||
475 | 0x00000000, | ||
476 | 0x00000000, | ||
477 | 0x00000000, | ||
478 | 0x00000000, | ||
479 | 0x00000000, | ||
480 | 0x00000000, | ||
481 | 0x00000000, | ||
482 | 0x00000000, | ||
483 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc new file mode 100644 index 00000000000..a1a599124cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc | |||
@@ -0,0 +1,808 @@ | |||
1 | /* fuc microcode for nvc0 PGRAPH/HUB | ||
2 | * | ||
3 | * Copyright 2011 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Ben Skeggs | ||
24 | */ | ||
25 | |||
26 | /* To build: | ||
27 | * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h | ||
28 | */ | ||
29 | |||
30 | .section nvc0_grhub_data | ||
31 | include(`nvc0_graph.fuc') | ||
32 | gpc_count: .b32 0 | ||
33 | rop_count: .b32 0 | ||
34 | cmd_queue: queue_init | ||
35 | hub_mmio_list_head: .b32 0 | ||
36 | hub_mmio_list_tail: .b32 0 | ||
37 | |||
38 | ctx_current: .b32 0 | ||
39 | |||
40 | chipsets: | ||
41 | .b8 0xc0 0 0 0 | ||
42 | .b16 nvc0_hub_mmio_head | ||
43 | .b16 nvc0_hub_mmio_tail | ||
44 | .b8 0xc1 0 0 0 | ||
45 | .b16 nvc0_hub_mmio_head | ||
46 | .b16 nvc1_hub_mmio_tail | ||
47 | .b8 0xc3 0 0 0 | ||
48 | .b16 nvc0_hub_mmio_head | ||
49 | .b16 nvc0_hub_mmio_tail | ||
50 | .b8 0xc4 0 0 0 | ||
51 | .b16 nvc0_hub_mmio_head | ||
52 | .b16 nvc0_hub_mmio_tail | ||
53 | .b8 0xc8 0 0 0 | ||
54 | .b16 nvc0_hub_mmio_head | ||
55 | .b16 nvc0_hub_mmio_tail | ||
56 | .b8 0xce 0 0 0 | ||
57 | .b16 nvc0_hub_mmio_head | ||
58 | .b16 nvc0_hub_mmio_tail | ||
59 | .b8 0 0 0 0 | ||
60 | |||
61 | nvc0_hub_mmio_head: | ||
62 | mmctx_data(0x17e91c, 2) | ||
63 | mmctx_data(0x400204, 2) | ||
64 | mmctx_data(0x404004, 11) | ||
65 | mmctx_data(0x404044, 1) | ||
66 | mmctx_data(0x404094, 14) | ||
67 | mmctx_data(0x4040d0, 7) | ||
68 | mmctx_data(0x4040f8, 1) | ||
69 | mmctx_data(0x404130, 3) | ||
70 | mmctx_data(0x404150, 3) | ||
71 | mmctx_data(0x404164, 2) | ||
72 | mmctx_data(0x404174, 3) | ||
73 | mmctx_data(0x404200, 8) | ||
74 | mmctx_data(0x404404, 14) | ||
75 | mmctx_data(0x404460, 4) | ||
76 | mmctx_data(0x404480, 1) | ||
77 | mmctx_data(0x404498, 1) | ||
78 | mmctx_data(0x404604, 4) | ||
79 | mmctx_data(0x404618, 32) | ||
80 | mmctx_data(0x404698, 21) | ||
81 | mmctx_data(0x4046f0, 2) | ||
82 | mmctx_data(0x404700, 22) | ||
83 | mmctx_data(0x405800, 1) | ||
84 | mmctx_data(0x405830, 3) | ||
85 | mmctx_data(0x405854, 1) | ||
86 | mmctx_data(0x405870, 4) | ||
87 | mmctx_data(0x405a00, 2) | ||
88 | mmctx_data(0x405a18, 1) | ||
89 | mmctx_data(0x406020, 1) | ||
90 | mmctx_data(0x406028, 4) | ||
91 | mmctx_data(0x4064a8, 2) | ||
92 | mmctx_data(0x4064b4, 2) | ||
93 | mmctx_data(0x407804, 1) | ||
94 | mmctx_data(0x40780c, 6) | ||
95 | mmctx_data(0x4078bc, 1) | ||
96 | mmctx_data(0x408000, 7) | ||
97 | mmctx_data(0x408064, 1) | ||
98 | mmctx_data(0x408800, 3) | ||
99 | mmctx_data(0x408900, 4) | ||
100 | mmctx_data(0x408980, 1) | ||
101 | nvc0_hub_mmio_tail: | ||
102 | mmctx_data(0x4064c0, 2) | ||
103 | nvc1_hub_mmio_tail: | ||
104 | |||
105 | .align 256 | ||
106 | chan_data: | ||
107 | chan_mmio_count: .b32 0 | ||
108 | chan_mmio_address: .b32 0 | ||
109 | |||
110 | .align 256 | ||
111 | xfer_data: .b32 0 | ||
112 | |||
113 | .section nvc0_grhub_code | ||
114 | bra init | ||
115 | define(`include_code') | ||
116 | include(`nvc0_graph.fuc') | ||
117 | |||
118 | // reports an exception to the host | ||
119 | // | ||
120 | // In: $r15 error code (see nvc0_graph.fuc) | ||
121 | // | ||
122 | error: | ||
123 | push $r14 | ||
124 | mov $r14 0x814 | ||
125 | shl b32 $r14 6 | ||
126 | iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code | ||
127 | mov $r14 0xc1c | ||
128 | shl b32 $r14 6 | ||
129 | mov $r15 1 | ||
130 | iowr I[$r14 + 0x000] $r15 // INTR_UP_SET | ||
131 | pop $r14 | ||
132 | ret | ||
133 | |||
134 | // HUB fuc initialisation, executed by triggering ucode start, will | ||
135 | // fall through to main loop after completion. | ||
136 | // | ||
137 | // Input: | ||
138 | // CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) | ||
139 | // | ||
140 | // Output: | ||
141 | // CC_SCRATCH[0]: | ||
142 | // 31:31: set to signal completion | ||
143 | // CC_SCRATCH[1]: | ||
144 | // 31:0: total PGRAPH context size | ||
145 | // | ||
146 | init: | ||
147 | clear b32 $r0 | ||
148 | mov $sp $r0 | ||
149 | mov $xdbase $r0 | ||
150 | |||
151 | // enable fifo access | ||
152 | mov $r1 0x1200 | ||
153 | mov $r2 2 | ||
154 | iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE | ||
155 | |||
156 | // setup i0 handler, and route all interrupts to it | ||
157 | mov $r1 ih | ||
158 | mov $iv0 $r1 | ||
159 | mov $r1 0x400 | ||
160 | iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH | ||
161 | |||
162 | // route HUB_CHANNEL_SWITCH to fuc interrupt 8 | ||
163 | mov $r3 0x404 | ||
164 | shl b32 $r3 6 | ||
165 | mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 | ||
166 | iowr I[$r3 + 0x000] $r2 | ||
167 | |||
168 | // not sure what these are, route them because NVIDIA does, and | ||
169 | // the IRQ handler will signal the host if we ever get one.. we | ||
170 | // may find out if/why we need to handle these if so.. | ||
171 | // | ||
172 | mov $r2 0x2004 | ||
173 | iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 | ||
174 | mov $r2 0x200b | ||
175 | iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 | ||
176 | mov $r2 0x200c | ||
177 | iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 | ||
178 | |||
179 | // enable all INTR_UP interrupts | ||
180 | mov $r2 0xc24 | ||
181 | shl b32 $r2 6 | ||
182 | not b32 $r3 $r0 | ||
183 | iowr I[$r2] $r3 | ||
184 | |||
185 | // enable fifo, ctxsw, 9, 10, 15 interrupts | ||
186 | mov $r2 -0x78fc // 0x8704 | ||
187 | sethi $r2 0 | ||
188 | iowr I[$r1 + 0x000] $r2 // INTR_EN_SET | ||
189 | |||
190 | // fifo level triggered, rest edge | ||
191 | sub b32 $r1 0x100 | ||
192 | mov $r2 4 | ||
193 | iowr I[$r1] $r2 | ||
194 | |||
195 | // enable interrupts | ||
196 | bset $flags ie0 | ||
197 | |||
198 | // fetch enabled GPC/ROP counts | ||
199 | mov $r14 -0x69fc // 0x409604 | ||
200 | sethi $r14 0x400000 | ||
201 | call nv_rd32 | ||
202 | extr $r1 $r15 16:20 | ||
203 | st b32 D[$r0 + rop_count] $r1 | ||
204 | and $r15 0x1f | ||
205 | st b32 D[$r0 + gpc_count] $r15 | ||
206 | |||
207 | // set BAR_REQMASK to GPC mask | ||
208 | mov $r1 1 | ||
209 | shl b32 $r1 $r15 | ||
210 | sub b32 $r1 1 | ||
211 | mov $r2 0x40c | ||
212 | shl b32 $r2 6 | ||
213 | iowr I[$r2 + 0x000] $r1 | ||
214 | iowr I[$r2 + 0x100] $r1 | ||
215 | |||
216 | // find context data for this chipset | ||
217 | mov $r2 0x800 | ||
218 | shl b32 $r2 6 | ||
219 | iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] | ||
220 | mov $r15 chipsets - 8 | ||
221 | init_find_chipset: | ||
222 | add b32 $r15 8 | ||
223 | ld b32 $r3 D[$r15 + 0x00] | ||
224 | cmpu b32 $r3 $r2 | ||
225 | bra e init_context | ||
226 | cmpu b32 $r3 0 | ||
227 | bra ne init_find_chipset | ||
228 | // unknown chipset | ||
229 | ret | ||
230 | |||
231 | // context size calculation, reserve first 256 bytes for use by fuc | ||
232 | init_context: | ||
233 | mov $r1 256 | ||
234 | |||
235 | // calculate size of mmio context data | ||
236 | ld b16 $r14 D[$r15 + 4] | ||
237 | ld b16 $r15 D[$r15 + 6] | ||
238 | sethi $r14 0 | ||
239 | st b32 D[$r0 + hub_mmio_list_head] $r14 | ||
240 | st b32 D[$r0 + hub_mmio_list_tail] $r15 | ||
241 | call mmctx_size | ||
242 | |||
243 | // set mmctx base addresses now so we don't have to do it later, | ||
244 | // they don't (currently) ever change | ||
245 | mov $r3 0x700 | ||
246 | shl b32 $r3 6 | ||
247 | shr b32 $r4 $r1 8 | ||
248 | iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE | ||
249 | iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE | ||
250 | add b32 $r3 0x1300 | ||
251 | add b32 $r1 $r15 | ||
252 | shr b32 $r15 2 | ||
253 | iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? | ||
254 | |||
255 | // strands, base offset needs to be aligned to 256 bytes | ||
256 | shr b32 $r1 8 | ||
257 | add b32 $r1 1 | ||
258 | shl b32 $r1 8 | ||
259 | mov b32 $r15 $r1 | ||
260 | call strand_ctx_init | ||
261 | add b32 $r1 $r15 | ||
262 | |||
263 | // initialise each GPC in sequence by passing in the offset of its | ||
264 | // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which | ||
265 | // has previously been uploaded by the host) running. | ||
266 | // | ||
267 | // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 | ||
268 | // when it has completed, and return the size of its context data | ||
269 | // in GPCn_CC_SCRATCH[1] | ||
270 | // | ||
271 | ld b32 $r3 D[$r0 + gpc_count] | ||
272 | mov $r4 0x2000 | ||
273 | sethi $r4 0x500000 | ||
274 | init_gpc: | ||
275 | // setup, and start GPC ucode running | ||
276 | add b32 $r14 $r4 0x804 | ||
277 | mov b32 $r15 $r1 | ||
278 | call nv_wr32 // CC_SCRATCH[1] = ctx offset | ||
279 | add b32 $r14 $r4 0x800 | ||
280 | mov b32 $r15 $r2 | ||
281 | call nv_wr32 // CC_SCRATCH[0] = chipset | ||
282 | add b32 $r14 $r4 0x10c | ||
283 | clear b32 $r15 | ||
284 | call nv_wr32 | ||
285 | add b32 $r14 $r4 0x104 | ||
286 | call nv_wr32 // ENTRY | ||
287 | add b32 $r14 $r4 0x100 | ||
288 | mov $r15 2 // CTRL_START_TRIGGER | ||
289 | call nv_wr32 // CTRL | ||
290 | |||
291 | // wait for it to complete, and adjust context size | ||
292 | add b32 $r14 $r4 0x800 | ||
293 | init_gpc_wait: | ||
294 | call nv_rd32 | ||
295 | xbit $r15 $r15 31 | ||
296 | bra e init_gpc_wait | ||
297 | add b32 $r14 $r4 0x804 | ||
298 | call nv_rd32 | ||
299 | add b32 $r1 $r15 | ||
300 | |||
301 | // next! | ||
302 | add b32 $r4 0x8000 | ||
303 | sub b32 $r3 1 | ||
304 | bra ne init_gpc | ||
305 | |||
306 | // save context size, and tell host we're ready | ||
307 | mov $r2 0x800 | ||
308 | shl b32 $r2 6 | ||
309 | iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size | ||
310 | add b32 $r2 0x800 | ||
311 | clear b32 $r1 | ||
312 | bset $r1 31 | ||
313 | iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000 | ||
314 | |||
315 | // Main program loop, very simple, sleeps until woken up by the interrupt | ||
316 | // handler, pulls a command from the queue and executes its handler | ||
317 | // | ||
318 | main: | ||
319 | // sleep until we have something to do | ||
320 | bset $flags $p0 | ||
321 | sleep $p0 | ||
322 | mov $r13 cmd_queue | ||
323 | call queue_get | ||
324 | bra $p1 main | ||
325 | |||
326 | // context switch, requested by GPU? | ||
327 | cmpu b32 $r14 0x4001 | ||
328 | bra ne main_not_ctx_switch | ||
329 | trace_set(T_AUTO) | ||
330 | mov $r1 0xb00 | ||
331 | shl b32 $r1 6 | ||
332 | iord $r2 I[$r1 + 0x100] // CHAN_NEXT | ||
333 | iord $r1 I[$r1 + 0x000] // CHAN_CUR | ||
334 | |||
335 | xbit $r3 $r1 31 | ||
336 | bra e chsw_no_prev | ||
337 | xbit $r3 $r2 31 | ||
338 | bra e chsw_prev_no_next | ||
339 | push $r2 | ||
340 | mov b32 $r2 $r1 | ||
341 | trace_set(T_SAVE) | ||
342 | bclr $flags $p1 | ||
343 | bset $flags $p2 | ||
344 | call ctx_xfer | ||
345 | trace_clr(T_SAVE); | ||
346 | pop $r2 | ||
347 | trace_set(T_LOAD); | ||
348 | bset $flags $p1 | ||
349 | call ctx_xfer | ||
350 | trace_clr(T_LOAD); | ||
351 | bra chsw_done | ||
352 | chsw_prev_no_next: | ||
353 | push $r2 | ||
354 | mov b32 $r2 $r1 | ||
355 | bclr $flags $p1 | ||
356 | bclr $flags $p2 | ||
357 | call ctx_xfer | ||
358 | pop $r2 | ||
359 | mov $r1 0xb00 | ||
360 | shl b32 $r1 6 | ||
361 | iowr I[$r1] $r2 | ||
362 | bra chsw_done | ||
363 | chsw_no_prev: | ||
364 | xbit $r3 $r2 31 | ||
365 | bra e chsw_done | ||
366 | bset $flags $p1 | ||
367 | bclr $flags $p2 | ||
368 | call ctx_xfer | ||
369 | |||
370 | // ack the context switch request | ||
371 | chsw_done: | ||
372 | mov $r1 0xb0c | ||
373 | shl b32 $r1 6 | ||
374 | mov $r2 1 | ||
375 | iowr I[$r1 + 0x000] $r2 // 0x409b0c | ||
376 | trace_clr(T_AUTO) | ||
377 | bra main | ||
378 | |||
379 | // request to set current channel? (*not* a context switch) | ||
380 | main_not_ctx_switch: | ||
381 | cmpu b32 $r14 0x0001 | ||
382 | bra ne main_not_ctx_chan | ||
383 | mov b32 $r2 $r15 | ||
384 | call ctx_chan | ||
385 | bra main_done | ||
386 | |||
387 | // request to store current channel context? | ||
388 | main_not_ctx_chan: | ||
389 | cmpu b32 $r14 0x0002 | ||
390 | bra ne main_not_ctx_save | ||
391 | trace_set(T_SAVE) | ||
392 | bclr $flags $p1 | ||
393 | bclr $flags $p2 | ||
394 | call ctx_xfer | ||
395 | trace_clr(T_SAVE) | ||
396 | bra main_done | ||
397 | |||
398 | main_not_ctx_save: | ||
399 | shl b32 $r15 $r14 16 | ||
400 | or $r15 E_BAD_COMMAND | ||
401 | call error | ||
402 | bra main | ||
403 | |||
404 | main_done: | ||
405 | mov $r1 0x820 | ||
406 | shl b32 $r1 6 | ||
407 | clear b32 $r2 | ||
408 | bset $r2 31 | ||
409 | iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 | ||
410 | bra main | ||
411 | |||
412 | // interrupt handler | ||
413 | ih: | ||
414 | push $r8 | ||
415 | mov $r8 $flags | ||
416 | push $r8 | ||
417 | push $r9 | ||
418 | push $r10 | ||
419 | push $r11 | ||
420 | push $r13 | ||
421 | push $r14 | ||
422 | push $r15 | ||
423 | |||
424 | // incoming fifo command? | ||
425 | iord $r10 I[$r0 + 0x200] // INTR | ||
426 | and $r11 $r10 0x00000004 | ||
427 | bra e ih_no_fifo | ||
428 | // queue incoming fifo command for later processing | ||
429 | mov $r11 0x1900 | ||
430 | mov $r13 cmd_queue | ||
431 | iord $r14 I[$r11 + 0x100] // FIFO_CMD | ||
432 | iord $r15 I[$r11 + 0x000] // FIFO_DATA | ||
433 | call queue_put | ||
434 | add b32 $r11 0x400 | ||
435 | mov $r14 1 | ||
436 | iowr I[$r11 + 0x000] $r14 // FIFO_ACK | ||
437 | |||
438 | // context switch request? | ||
439 | ih_no_fifo: | ||
440 | and $r11 $r10 0x00000100 | ||
441 | bra e ih_no_ctxsw | ||
442 | // enqueue a context switch for later processing | ||
443 | mov $r13 cmd_queue | ||
444 | mov $r14 0x4001 | ||
445 | call queue_put | ||
446 | |||
447 | // anything we didn't handle, bring it to the host's attention | ||
448 | ih_no_ctxsw: | ||
449 | mov $r11 0x104 | ||
450 | not b32 $r11 | ||
451 | and $r11 $r10 $r11 | ||
452 | bra e ih_no_other | ||
453 | mov $r10 0xc1c | ||
454 | shl b32 $r10 6 | ||
455 | iowr I[$r10] $r11 // INTR_UP_SET | ||
456 | |||
457 | // ack, and wake up main() | ||
458 | ih_no_other: | ||
459 | iowr I[$r0 + 0x100] $r10 // INTR_ACK | ||
460 | |||
461 | pop $r15 | ||
462 | pop $r14 | ||
463 | pop $r13 | ||
464 | pop $r11 | ||
465 | pop $r10 | ||
466 | pop $r9 | ||
467 | pop $r8 | ||
468 | mov $flags $r8 | ||
469 | pop $r8 | ||
470 | bclr $flags $p0 | ||
471 | iret | ||
472 | |||
473 | // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done | ||
474 | ctx_4160s: | ||
475 | mov $r14 0x4160 | ||
476 | sethi $r14 0x400000 | ||
477 | mov $r15 1 | ||
478 | call nv_wr32 | ||
479 | ctx_4160s_wait: | ||
480 | call nv_rd32 | ||
481 | xbit $r15 $r15 4 | ||
482 | bra e ctx_4160s_wait | ||
483 | ret | ||
484 | |||
485 | // Without clearing again at end of xfer, some things cause PGRAPH | ||
486 | // to hang with STATUS=0x00000007 until it's cleared.. fbcon can | ||
487 | // still function with it set however... | ||
488 | ctx_4160c: | ||
489 | mov $r14 0x4160 | ||
490 | sethi $r14 0x400000 | ||
491 | clear b32 $r15 | ||
492 | call nv_wr32 | ||
493 | ret | ||
494 | |||
495 | // Again, not real sure | ||
496 | // | ||
497 | // In: $r15 value to set 0x404170 to | ||
498 | // | ||
499 | ctx_4170s: | ||
500 | mov $r14 0x4170 | ||
501 | sethi $r14 0x400000 | ||
502 | or $r15 0x10 | ||
503 | call nv_wr32 | ||
504 | ret | ||
505 | |||
506 | // Waits for a ctx_4170s() call to complete | ||
507 | // | ||
508 | ctx_4170w: | ||
509 | mov $r14 0x4170 | ||
510 | sethi $r14 0x400000 | ||
511 | call nv_rd32 | ||
512 | and $r15 0x10 | ||
513 | bra ne ctx_4170w | ||
514 | ret | ||
515 | |||
516 | // Disables various things, waits a bit, and re-enables them.. | ||
517 | // | ||
518 | // Not sure how exactly this helps, perhaps "ENABLE" is not such a | ||
519 | // good description for the bits we turn off? Anyways, without this, | ||
520 | // funny things happen. | ||
521 | // | ||
522 | ctx_redswitch: | ||
523 | mov $r14 0x614 | ||
524 | shl b32 $r14 6 | ||
525 | mov $r15 0x270 | ||
526 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL | ||
527 | mov $r15 8 | ||
528 | ctx_redswitch_delay: | ||
529 | sub b32 $r15 1 | ||
530 | bra ne ctx_redswitch_delay | ||
531 | mov $r15 0x770 | ||
532 | iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL | ||
533 | ret | ||
534 | |||
535 | // Not a clue what this is for, except that unless the value is 0x10, the | ||
536 | // strand context is saved (and presumably restored) incorrectly.. | ||
537 | // | ||
538 | // In: $r15 value to set to (0x00/0x10 are used) | ||
539 | // | ||
540 | ctx_86c: | ||
541 | mov $r14 0x86c | ||
542 | shl b32 $r14 6 | ||
543 | iowr I[$r14] $r15 // HUB(0x86c) = val | ||
544 | mov $r14 -0x75ec | ||
545 | sethi $r14 0x400000 | ||
546 | call nv_wr32 // ROP(0xa14) = val | ||
547 | mov $r14 -0x5794 | ||
548 | sethi $r14 0x410000 | ||
549 | call nv_wr32 // GPC(0x86c) = val | ||
550 | ret | ||
551 | |||
552 | // ctx_load - load's a channel's ctxctl data, and selects its vm | ||
553 | // | ||
554 | // In: $r2 channel address | ||
555 | // | ||
556 | ctx_load: | ||
557 | trace_set(T_CHAN) | ||
558 | |||
559 | // switch to channel, somewhat magic in parts.. | ||
560 | mov $r10 12 // DONE_UNK12 | ||
561 | call wait_donez | ||
562 | mov $r1 0xa24 | ||
563 | shl b32 $r1 6 | ||
564 | iowr I[$r1 + 0x000] $r0 // 0x409a24 | ||
565 | mov $r3 0xb00 | ||
566 | shl b32 $r3 6 | ||
567 | iowr I[$r3 + 0x100] $r2 // CHAN_NEXT | ||
568 | mov $r1 0xa0c | ||
569 | shl b32 $r1 6 | ||
570 | mov $r4 7 | ||
571 | iowr I[$r1 + 0x000] $r2 // MEM_CHAN | ||
572 | iowr I[$r1 + 0x100] $r4 // MEM_CMD | ||
573 | ctx_chan_wait_0: | ||
574 | iord $r4 I[$r1 + 0x100] | ||
575 | and $r4 0x1f | ||
576 | bra ne ctx_chan_wait_0 | ||
577 | iowr I[$r3 + 0x000] $r2 // CHAN_CUR | ||
578 | |||
579 | // load channel header, fetch PGRAPH context pointer | ||
580 | mov $xtargets $r0 | ||
581 | bclr $r2 31 | ||
582 | shl b32 $r2 4 | ||
583 | add b32 $r2 2 | ||
584 | |||
585 | trace_set(T_LCHAN) | ||
586 | mov $r1 0xa04 | ||
587 | shl b32 $r1 6 | ||
588 | iowr I[$r1 + 0x000] $r2 // MEM_BASE | ||
589 | mov $r1 0xa20 | ||
590 | shl b32 $r1 6 | ||
591 | mov $r2 0x0002 | ||
592 | sethi $r2 0x80000000 | ||
593 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram | ||
594 | mov $r1 0x10 // chan + 0x0210 | ||
595 | mov $r2 xfer_data | ||
596 | sethi $r2 0x00020000 // 16 bytes | ||
597 | xdld $r1 $r2 | ||
598 | xdwait | ||
599 | trace_clr(T_LCHAN) | ||
600 | |||
601 | // update current context | ||
602 | ld b32 $r1 D[$r0 + xfer_data + 4] | ||
603 | shl b32 $r1 24 | ||
604 | ld b32 $r2 D[$r0 + xfer_data + 0] | ||
605 | shr b32 $r2 8 | ||
606 | or $r1 $r2 | ||
607 | st b32 D[$r0 + ctx_current] $r1 | ||
608 | |||
609 | // set transfer base to start of context, and fetch context header | ||
610 | trace_set(T_LCTXH) | ||
611 | mov $r2 0xa04 | ||
612 | shl b32 $r2 6 | ||
613 | iowr I[$r2 + 0x000] $r1 // MEM_BASE | ||
614 | mov $r2 1 | ||
615 | mov $r1 0xa20 | ||
616 | shl b32 $r1 6 | ||
617 | iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm | ||
618 | mov $r1 chan_data | ||
619 | sethi $r1 0x00060000 // 256 bytes | ||
620 | xdld $r0 $r1 | ||
621 | xdwait | ||
622 | trace_clr(T_LCTXH) | ||
623 | |||
624 | trace_clr(T_CHAN) | ||
625 | ret | ||
626 | |||
627 | // ctx_chan - handler for HUB_SET_CHAN command, will set a channel as | ||
628 | // the active channel for ctxctl, but not actually transfer | ||
629 | // any context data. intended for use only during initial | ||
630 | // context construction. | ||
631 | // | ||
632 | // In: $r2 channel address | ||
633 | // | ||
634 | ctx_chan: | ||
635 | call ctx_4160s | ||
636 | call ctx_load | ||
637 | mov $r10 12 // DONE_UNK12 | ||
638 | call wait_donez | ||
639 | mov $r1 0xa10 | ||
640 | shl b32 $r1 6 | ||
641 | mov $r2 5 | ||
642 | iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) | ||
643 | ctx_chan_wait: | ||
644 | iord $r2 I[$r1 + 0x000] | ||
645 | or $r2 $r2 | ||
646 | bra ne ctx_chan_wait | ||
647 | call ctx_4160c | ||
648 | ret | ||
649 | |||
650 | // Execute per-context state overrides list | ||
651 | // | ||
652 | // Only executed on the first load of a channel. Might want to look into | ||
653 | // removing this and having the host directly modify the channel's context | ||
654 | // to change this state... The nouveau DRM already builds this list as | ||
655 | // it's definitely needed for NVIDIA's, so we may as well use it for now | ||
656 | // | ||
657 | // Input: $r1 mmio list length | ||
658 | // | ||
659 | ctx_mmio_exec: | ||
660 | // set transfer base to be the mmio list | ||
661 | ld b32 $r3 D[$r0 + chan_mmio_address] | ||
662 | mov $r2 0xa04 | ||
663 | shl b32 $r2 6 | ||
664 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | ||
665 | |||
666 | clear b32 $r3 | ||
667 | ctx_mmio_loop: | ||
668 | // fetch next 256 bytes of mmio list if necessary | ||
669 | and $r4 $r3 0xff | ||
670 | bra ne ctx_mmio_pull | ||
671 | mov $r5 xfer_data | ||
672 | sethi $r5 0x00060000 // 256 bytes | ||
673 | xdld $r3 $r5 | ||
674 | xdwait | ||
675 | |||
676 | // execute a single list entry | ||
677 | ctx_mmio_pull: | ||
678 | ld b32 $r14 D[$r4 + xfer_data + 0x00] | ||
679 | ld b32 $r15 D[$r4 + xfer_data + 0x04] | ||
680 | call nv_wr32 | ||
681 | |||
682 | // next! | ||
683 | add b32 $r3 8 | ||
684 | sub b32 $r1 1 | ||
685 | bra ne ctx_mmio_loop | ||
686 | |||
687 | // set transfer base back to the current context | ||
688 | ctx_mmio_done: | ||
689 | ld b32 $r3 D[$r0 + ctx_current] | ||
690 | iowr I[$r2 + 0x000] $r3 // MEM_BASE | ||
691 | |||
692 | // disable the mmio list now, we don't need/want to execute it again | ||
693 | st b32 D[$r0 + chan_mmio_count] $r0 | ||
694 | mov $r1 chan_data | ||
695 | sethi $r1 0x00060000 // 256 bytes | ||
696 | xdst $r0 $r1 | ||
697 | xdwait | ||
698 | ret | ||
699 | |||
700 | // Transfer HUB context data between GPU and storage area | ||
701 | // | ||
702 | // In: $r2 channel address | ||
703 | // $p1 clear on save, set on load | ||
704 | // $p2 set if opposite direction done/will be done, so: | ||
705 | // on save it means: "a load will follow this save" | ||
706 | // on load it means: "a save preceeded this load" | ||
707 | // | ||
708 | ctx_xfer: | ||
709 | bra not $p1 ctx_xfer_pre | ||
710 | bra $p2 ctx_xfer_pre_load | ||
711 | ctx_xfer_pre: | ||
712 | mov $r15 0x10 | ||
713 | call ctx_86c | ||
714 | call ctx_4160s | ||
715 | bra not $p1 ctx_xfer_exec | ||
716 | |||
717 | ctx_xfer_pre_load: | ||
718 | mov $r15 2 | ||
719 | call ctx_4170s | ||
720 | call ctx_4170w | ||
721 | call ctx_redswitch | ||
722 | clear b32 $r15 | ||
723 | call ctx_4170s | ||
724 | call ctx_load | ||
725 | |||
726 | // fetch context pointer, and initiate xfer on all GPCs | ||
727 | ctx_xfer_exec: | ||
728 | ld b32 $r1 D[$r0 + ctx_current] | ||
729 | mov $r2 0x414 | ||
730 | shl b32 $r2 6 | ||
731 | iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset | ||
732 | mov $r14 -0x5b00 | ||
733 | sethi $r14 0x410000 | ||
734 | mov b32 $r15 $r1 | ||
735 | call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer | ||
736 | add b32 $r14 4 | ||
737 | xbit $r15 $flags $p1 | ||
738 | xbit $r2 $flags $p2 | ||
739 | shl b32 $r2 1 | ||
740 | or $r15 $r2 | ||
741 | call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) | ||
742 | |||
743 | // strands | ||
744 | mov $r1 0x4afc | ||
745 | sethi $r1 0x20000 | ||
746 | mov $r2 0xc | ||
747 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c | ||
748 | call strand_wait | ||
749 | mov $r2 0x47fc | ||
750 | sethi $r2 0x20000 | ||
751 | iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 | ||
752 | xbit $r2 $flags $p1 | ||
753 | add b32 $r2 3 | ||
754 | iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) | ||
755 | |||
756 | // mmio context | ||
757 | xbit $r10 $flags $p1 // direction | ||
758 | or $r10 6 // first, last | ||
759 | mov $r11 0 // base = 0 | ||
760 | ld b32 $r12 D[$r0 + hub_mmio_list_head] | ||
761 | ld b32 $r13 D[$r0 + hub_mmio_list_tail] | ||
762 | mov $r14 0 // not multi | ||
763 | call mmctx_xfer | ||
764 | |||
765 | // wait for GPCs to all complete | ||
766 | mov $r10 8 // DONE_BAR | ||
767 | call wait_doneo | ||
768 | |||
769 | // wait for strand xfer to complete | ||
770 | call strand_wait | ||
771 | |||
772 | // post-op | ||
773 | bra $p1 ctx_xfer_post | ||
774 | mov $r10 12 // DONE_UNK12 | ||
775 | call wait_donez | ||
776 | mov $r1 0xa10 | ||
777 | shl b32 $r1 6 | ||
778 | mov $r2 5 | ||
779 | iowr I[$r1] $r2 // MEM_CMD | ||
780 | ctx_xfer_post_save_wait: | ||
781 | iord $r2 I[$r1] | ||
782 | or $r2 $r2 | ||
783 | bra ne ctx_xfer_post_save_wait | ||
784 | |||
785 | bra $p2 ctx_xfer_done | ||
786 | ctx_xfer_post: | ||
787 | mov $r15 2 | ||
788 | call ctx_4170s | ||
789 | clear b32 $r15 | ||
790 | call ctx_86c | ||
791 | call strand_post | ||
792 | call ctx_4170w | ||
793 | clear b32 $r15 | ||
794 | call ctx_4170s | ||
795 | |||
796 | bra not $p1 ctx_xfer_no_post_mmio | ||
797 | ld b32 $r1 D[$r0 + chan_mmio_count] | ||
798 | or $r1 $r1 | ||
799 | bra e ctx_xfer_no_post_mmio | ||
800 | call ctx_mmio_exec | ||
801 | |||
802 | ctx_xfer_no_post_mmio: | ||
803 | call ctx_4160c | ||
804 | |||
805 | ctx_xfer_done: | ||
806 | ret | ||
807 | |||
808 | .align 256 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h new file mode 100644 index 00000000000..b3b541b6d04 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h | |||
@@ -0,0 +1,838 @@ | |||
1 | uint32_t nvc0_grhub_data[] = { | ||
2 | 0x00000000, | ||
3 | 0x00000000, | ||
4 | 0x00000000, | ||
5 | 0x00000000, | ||
6 | 0x00000000, | ||
7 | 0x00000000, | ||
8 | 0x00000000, | ||
9 | 0x00000000, | ||
10 | 0x00000000, | ||
11 | 0x00000000, | ||
12 | 0x00000000, | ||
13 | 0x00000000, | ||
14 | 0x00000000, | ||
15 | 0x00000000, | ||
16 | 0x00000000, | ||
17 | 0x00000000, | ||
18 | 0x00000000, | ||
19 | 0x00000000, | ||
20 | 0x00000000, | ||
21 | 0x00000000, | ||
22 | 0x00000000, | ||
23 | 0x00000000, | ||
24 | 0x00000000, | ||
25 | 0x000000c0, | ||
26 | 0x012c0090, | ||
27 | 0x000000c1, | ||
28 | 0x01300090, | ||
29 | 0x000000c3, | ||
30 | 0x012c0090, | ||
31 | 0x000000c4, | ||
32 | 0x012c0090, | ||
33 | 0x000000c8, | ||
34 | 0x012c0090, | ||
35 | 0x000000ce, | ||
36 | 0x012c0090, | ||
37 | 0x00000000, | ||
38 | 0x0417e91c, | ||
39 | 0x04400204, | ||
40 | 0x28404004, | ||
41 | 0x00404044, | ||
42 | 0x34404094, | ||
43 | 0x184040d0, | ||
44 | 0x004040f8, | ||
45 | 0x08404130, | ||
46 | 0x08404150, | ||
47 | 0x04404164, | ||
48 | 0x08404174, | ||
49 | 0x1c404200, | ||
50 | 0x34404404, | ||
51 | 0x0c404460, | ||
52 | 0x00404480, | ||
53 | 0x00404498, | ||
54 | 0x0c404604, | ||
55 | 0x7c404618, | ||
56 | 0x50404698, | ||
57 | 0x044046f0, | ||
58 | 0x54404700, | ||
59 | 0x00405800, | ||
60 | 0x08405830, | ||
61 | 0x00405854, | ||
62 | 0x0c405870, | ||
63 | 0x04405a00, | ||
64 | 0x00405a18, | ||
65 | 0x00406020, | ||
66 | 0x0c406028, | ||
67 | 0x044064a8, | ||
68 | 0x044064b4, | ||
69 | 0x00407804, | ||
70 | 0x1440780c, | ||
71 | 0x004078bc, | ||
72 | 0x18408000, | ||
73 | 0x00408064, | ||
74 | 0x08408800, | ||
75 | 0x0c408900, | ||
76 | 0x00408980, | ||
77 | 0x044064c0, | ||
78 | 0x00000000, | ||
79 | 0x00000000, | ||
80 | 0x00000000, | ||
81 | 0x00000000, | ||
82 | 0x00000000, | ||
83 | 0x00000000, | ||
84 | 0x00000000, | ||
85 | 0x00000000, | ||
86 | 0x00000000, | ||
87 | 0x00000000, | ||
88 | 0x00000000, | ||
89 | 0x00000000, | ||
90 | 0x00000000, | ||
91 | 0x00000000, | ||
92 | 0x00000000, | ||
93 | 0x00000000, | ||
94 | 0x00000000, | ||
95 | 0x00000000, | ||
96 | 0x00000000, | ||
97 | 0x00000000, | ||
98 | 0x00000000, | ||
99 | 0x00000000, | ||
100 | 0x00000000, | ||
101 | 0x00000000, | ||
102 | 0x00000000, | ||
103 | 0x00000000, | ||
104 | 0x00000000, | ||
105 | 0x00000000, | ||
106 | 0x00000000, | ||
107 | 0x00000000, | ||
108 | 0x00000000, | ||
109 | 0x00000000, | ||
110 | 0x00000000, | ||
111 | 0x00000000, | ||
112 | 0x00000000, | ||
113 | 0x00000000, | ||
114 | 0x00000000, | ||
115 | 0x00000000, | ||
116 | 0x00000000, | ||
117 | 0x00000000, | ||
118 | 0x00000000, | ||
119 | 0x00000000, | ||
120 | 0x00000000, | ||
121 | 0x00000000, | ||
122 | 0x00000000, | ||
123 | 0x00000000, | ||
124 | 0x00000000, | ||
125 | 0x00000000, | ||
126 | 0x00000000, | ||
127 | 0x00000000, | ||
128 | 0x00000000, | ||
129 | 0x00000000, | ||
130 | 0x00000000, | ||
131 | 0x00000000, | ||
132 | 0x00000000, | ||
133 | 0x00000000, | ||
134 | 0x00000000, | ||
135 | 0x00000000, | ||
136 | 0x00000000, | ||
137 | 0x00000000, | ||
138 | 0x00000000, | ||
139 | 0x00000000, | ||
140 | 0x00000000, | ||
141 | 0x00000000, | ||
142 | 0x00000000, | ||
143 | 0x00000000, | ||
144 | 0x00000000, | ||
145 | 0x00000000, | ||
146 | 0x00000000, | ||
147 | 0x00000000, | ||
148 | 0x00000000, | ||
149 | 0x00000000, | ||
150 | 0x00000000, | ||
151 | 0x00000000, | ||
152 | 0x00000000, | ||
153 | 0x00000000, | ||
154 | 0x00000000, | ||
155 | 0x00000000, | ||
156 | 0x00000000, | ||
157 | 0x00000000, | ||
158 | 0x00000000, | ||
159 | 0x00000000, | ||
160 | 0x00000000, | ||
161 | 0x00000000, | ||
162 | 0x00000000, | ||
163 | 0x00000000, | ||
164 | 0x00000000, | ||
165 | 0x00000000, | ||
166 | 0x00000000, | ||
167 | 0x00000000, | ||
168 | 0x00000000, | ||
169 | 0x00000000, | ||
170 | 0x00000000, | ||
171 | 0x00000000, | ||
172 | 0x00000000, | ||
173 | 0x00000000, | ||
174 | 0x00000000, | ||
175 | 0x00000000, | ||
176 | 0x00000000, | ||
177 | 0x00000000, | ||
178 | 0x00000000, | ||
179 | 0x00000000, | ||
180 | 0x00000000, | ||
181 | 0x00000000, | ||
182 | 0x00000000, | ||
183 | 0x00000000, | ||
184 | 0x00000000, | ||
185 | 0x00000000, | ||
186 | 0x00000000, | ||
187 | 0x00000000, | ||
188 | 0x00000000, | ||
189 | 0x00000000, | ||
190 | 0x00000000, | ||
191 | 0x00000000, | ||
192 | 0x00000000, | ||
193 | 0x00000000, | ||
194 | 0x00000000, | ||
195 | }; | ||
196 | |||
197 | uint32_t nvc0_grhub_code[] = { | ||
198 | 0x03090ef5, | ||
199 | 0x9800d898, | ||
200 | 0x86f001d9, | ||
201 | 0x0489b808, | ||
202 | 0xf00c1bf4, | ||
203 | 0x21f502f7, | ||
204 | 0x00f802ec, | ||
205 | 0xb60798c4, | ||
206 | 0x8dbb0384, | ||
207 | 0x0880b600, | ||
208 | 0x80008e80, | ||
209 | 0x90b6018f, | ||
210 | 0x0f94f001, | ||
211 | 0xf801d980, | ||
212 | 0x0131f400, | ||
213 | 0x9800d898, | ||
214 | 0x89b801d9, | ||
215 | 0x210bf404, | ||
216 | 0xb60789c4, | ||
217 | 0x9dbb0394, | ||
218 | 0x0890b600, | ||
219 | 0x98009e98, | ||
220 | 0x80b6019f, | ||
221 | 0x0f84f001, | ||
222 | 0xf400d880, | ||
223 | 0x00f80132, | ||
224 | 0x0728b7f1, | ||
225 | 0xb906b4b6, | ||
226 | 0xc9f002ec, | ||
227 | 0x00bcd01f, | ||
228 | 0xc800bccf, | ||
229 | 0x1bf41fcc, | ||
230 | 0x06a7f0fa, | ||
231 | 0x010321f5, | ||
232 | 0xf840bfcf, | ||
233 | 0x28b7f100, | ||
234 | 0x06b4b607, | ||
235 | 0xb980bfd0, | ||
236 | 0xc9f002ec, | ||
237 | 0x1ec9f01f, | ||
238 | 0xcf00bcd0, | ||
239 | 0xccc800bc, | ||
240 | 0xfa1bf41f, | ||
241 | 0x87f100f8, | ||
242 | 0x84b60430, | ||
243 | 0x1ff9f006, | ||
244 | 0xf8008fd0, | ||
245 | 0x3087f100, | ||
246 | 0x0684b604, | ||
247 | 0xf80080d0, | ||
248 | 0x3c87f100, | ||
249 | 0x0684b608, | ||
250 | 0x99f094bd, | ||
251 | 0x0089d000, | ||
252 | 0x081887f1, | ||
253 | 0xd00684b6, | ||
254 | 0x87f1008a, | ||
255 | 0x84b60400, | ||
256 | 0x0088cf06, | ||
257 | 0xf4888aff, | ||
258 | 0x87f1f31b, | ||
259 | 0x84b6085c, | ||
260 | 0xf094bd06, | ||
261 | 0x89d00099, | ||
262 | 0xf100f800, | ||
263 | 0xb6083c87, | ||
264 | 0x94bd0684, | ||
265 | 0xd00099f0, | ||
266 | 0x87f10089, | ||
267 | 0x84b60818, | ||
268 | 0x008ad006, | ||
269 | 0x040087f1, | ||
270 | 0xcf0684b6, | ||
271 | 0x8aff0088, | ||
272 | 0xf30bf488, | ||
273 | 0x085c87f1, | ||
274 | 0xbd0684b6, | ||
275 | 0x0099f094, | ||
276 | 0xf80089d0, | ||
277 | 0x9894bd00, | ||
278 | 0x85b600e8, | ||
279 | 0x0180b61a, | ||
280 | 0xbb0284b6, | ||
281 | 0xe0b60098, | ||
282 | 0x04efb804, | ||
283 | 0xb9eb1bf4, | ||
284 | 0x00f8029f, | ||
285 | 0x083c87f1, | ||
286 | 0xbd0684b6, | ||
287 | 0x0199f094, | ||
288 | 0xf10089d0, | ||
289 | 0xb6071087, | ||
290 | 0x94bd0684, | ||
291 | 0xf405bbfd, | ||
292 | 0x8bd0090b, | ||
293 | 0x0099f000, | ||
294 | 0xf405eefd, | ||
295 | 0x8ed00c0b, | ||
296 | 0xc08fd080, | ||
297 | 0xb70199f0, | ||
298 | 0xc8010080, | ||
299 | 0xb4b600ab, | ||
300 | 0x0cb9f010, | ||
301 | 0xb601aec8, | ||
302 | 0xbefd11e4, | ||
303 | 0x008bd005, | ||
304 | 0xf0008ecf, | ||
305 | 0x0bf41fe4, | ||
306 | 0x00ce98fa, | ||
307 | 0xd005e9fd, | ||
308 | 0xc0b6c08e, | ||
309 | 0x04cdb804, | ||
310 | 0xc8e81bf4, | ||
311 | 0x1bf402ab, | ||
312 | 0x008bcf18, | ||
313 | 0xb01fb4f0, | ||
314 | 0x1bf410b4, | ||
315 | 0x02a7f0f7, | ||
316 | 0xf4c921f4, | ||
317 | 0xabc81b0e, | ||
318 | 0x10b4b600, | ||
319 | 0xf00cb9f0, | ||
320 | 0x8bd012b9, | ||
321 | 0x008bcf00, | ||
322 | 0xf412bbc8, | ||
323 | 0x87f1fa1b, | ||
324 | 0x84b6085c, | ||
325 | 0xf094bd06, | ||
326 | 0x89d00199, | ||
327 | 0xf900f800, | ||
328 | 0x02a7f0a0, | ||
329 | 0xfcc921f4, | ||
330 | 0xf100f8a0, | ||
331 | 0xf04afc87, | ||
332 | 0x97f00283, | ||
333 | 0x0089d00c, | ||
334 | 0x020721f5, | ||
335 | 0x87f100f8, | ||
336 | 0x83f04afc, | ||
337 | 0x0d97f002, | ||
338 | 0xf50089d0, | ||
339 | 0xf8020721, | ||
340 | 0xfca7f100, | ||
341 | 0x02a3f04f, | ||
342 | 0x0500aba2, | ||
343 | 0xd00fc7f0, | ||
344 | 0xc7f000ac, | ||
345 | 0x00bcd00b, | ||
346 | 0x020721f5, | ||
347 | 0xf000aed0, | ||
348 | 0xbcd00ac7, | ||
349 | 0x0721f500, | ||
350 | 0xf100f802, | ||
351 | 0xb6083c87, | ||
352 | 0x94bd0684, | ||
353 | 0xd00399f0, | ||
354 | 0x21f50089, | ||
355 | 0xe7f00213, | ||
356 | 0x3921f503, | ||
357 | 0xfca7f102, | ||
358 | 0x02a3f046, | ||
359 | 0x0400aba0, | ||
360 | 0xf040a0d0, | ||
361 | 0xbcd001c7, | ||
362 | 0x0721f500, | ||
363 | 0x010c9202, | ||
364 | 0xf000acd0, | ||
365 | 0xbcd002c7, | ||
366 | 0x0721f500, | ||
367 | 0x2621f502, | ||
368 | 0x8087f102, | ||
369 | 0x0684b608, | ||
370 | 0xb70089cf, | ||
371 | 0x95220080, | ||
372 | 0x8ed008fe, | ||
373 | 0x408ed000, | ||
374 | 0xb6808acf, | ||
375 | 0xa0b606a5, | ||
376 | 0x00eabb01, | ||
377 | 0xb60480b6, | ||
378 | 0x1bf40192, | ||
379 | 0x08e4b6e8, | ||
380 | 0xf1f2efbc, | ||
381 | 0xb6085c87, | ||
382 | 0x94bd0684, | ||
383 | 0xd00399f0, | ||
384 | 0x00f80089, | ||
385 | 0xe7f1e0f9, | ||
386 | 0xe4b60814, | ||
387 | 0x00efd006, | ||
388 | 0x0c1ce7f1, | ||
389 | 0xf006e4b6, | ||
390 | 0xefd001f7, | ||
391 | 0xf8e0fc00, | ||
392 | 0xfe04bd00, | ||
393 | 0x07fe0004, | ||
394 | 0x0017f100, | ||
395 | 0x0227f012, | ||
396 | 0xf10012d0, | ||
397 | 0xfe05b917, | ||
398 | 0x17f10010, | ||
399 | 0x10d00400, | ||
400 | 0x0437f1c0, | ||
401 | 0x0634b604, | ||
402 | 0x200327f1, | ||
403 | 0xf10032d0, | ||
404 | 0xd0200427, | ||
405 | 0x27f10132, | ||
406 | 0x32d0200b, | ||
407 | 0x0c27f102, | ||
408 | 0x0732d020, | ||
409 | 0x0c2427f1, | ||
410 | 0xb90624b6, | ||
411 | 0x23d00003, | ||
412 | 0x0427f100, | ||
413 | 0x0023f087, | ||
414 | 0xb70012d0, | ||
415 | 0xf0010012, | ||
416 | 0x12d00427, | ||
417 | 0x1031f400, | ||
418 | 0x9604e7f1, | ||
419 | 0xf440e3f0, | ||
420 | 0xf1c76821, | ||
421 | 0x01018090, | ||
422 | 0x801ff4f0, | ||
423 | 0x17f0000f, | ||
424 | 0x041fbb01, | ||
425 | 0xf10112b6, | ||
426 | 0xb6040c27, | ||
427 | 0x21d00624, | ||
428 | 0x4021d000, | ||
429 | 0x080027f1, | ||
430 | 0xcf0624b6, | ||
431 | 0xf7f00022, | ||
432 | 0x08f0b654, | ||
433 | 0xb800f398, | ||
434 | 0x0bf40432, | ||
435 | 0x0034b00b, | ||
436 | 0xf8f11bf4, | ||
437 | 0x0017f100, | ||
438 | 0x02fe5801, | ||
439 | 0xf003ff58, | ||
440 | 0x0e8000e3, | ||
441 | 0x150f8014, | ||
442 | 0x013d21f5, | ||
443 | 0x070037f1, | ||
444 | 0x950634b6, | ||
445 | 0x34d00814, | ||
446 | 0x4034d000, | ||
447 | 0x130030b7, | ||
448 | 0xb6001fbb, | ||
449 | 0x3fd002f5, | ||
450 | 0x0815b600, | ||
451 | 0xb60110b6, | ||
452 | 0x1fb90814, | ||
453 | 0x6321f502, | ||
454 | 0x001fbb02, | ||
455 | 0xf1000398, | ||
456 | 0xf0200047, | ||
457 | 0x4ea05043, | ||
458 | 0x1fb90804, | ||
459 | 0x8d21f402, | ||
460 | 0x08004ea0, | ||
461 | 0xf4022fb9, | ||
462 | 0x4ea08d21, | ||
463 | 0xf4bd010c, | ||
464 | 0xa08d21f4, | ||
465 | 0xf401044e, | ||
466 | 0x4ea08d21, | ||
467 | 0xf7f00100, | ||
468 | 0x8d21f402, | ||
469 | 0x08004ea0, | ||
470 | 0xc86821f4, | ||
471 | 0x0bf41fff, | ||
472 | 0x044ea0fa, | ||
473 | 0x6821f408, | ||
474 | 0xb7001fbb, | ||
475 | 0xb6800040, | ||
476 | 0x1bf40132, | ||
477 | 0x0027f1b4, | ||
478 | 0x0624b608, | ||
479 | 0xb74021d0, | ||
480 | 0xbd080020, | ||
481 | 0x1f19f014, | ||
482 | 0xf40021d0, | ||
483 | 0x28f40031, | ||
484 | 0x08d7f000, | ||
485 | 0xf43921f4, | ||
486 | 0xe4b1f401, | ||
487 | 0x1bf54001, | ||
488 | 0x87f100d1, | ||
489 | 0x84b6083c, | ||
490 | 0xf094bd06, | ||
491 | 0x89d00499, | ||
492 | 0x0017f100, | ||
493 | 0x0614b60b, | ||
494 | 0xcf4012cf, | ||
495 | 0x13c80011, | ||
496 | 0x7e0bf41f, | ||
497 | 0xf41f23c8, | ||
498 | 0x20f95a0b, | ||
499 | 0xf10212b9, | ||
500 | 0xb6083c87, | ||
501 | 0x94bd0684, | ||
502 | 0xd00799f0, | ||
503 | 0x32f40089, | ||
504 | 0x0231f401, | ||
505 | 0x082921f5, | ||
506 | 0x085c87f1, | ||
507 | 0xbd0684b6, | ||
508 | 0x0799f094, | ||
509 | 0xfc0089d0, | ||
510 | 0x3c87f120, | ||
511 | 0x0684b608, | ||
512 | 0x99f094bd, | ||
513 | 0x0089d006, | ||
514 | 0xf50131f4, | ||
515 | 0xf1082921, | ||
516 | 0xb6085c87, | ||
517 | 0x94bd0684, | ||
518 | 0xd00699f0, | ||
519 | 0x0ef40089, | ||
520 | 0xb920f931, | ||
521 | 0x32f40212, | ||
522 | 0x0232f401, | ||
523 | 0x082921f5, | ||
524 | 0x17f120fc, | ||
525 | 0x14b60b00, | ||
526 | 0x0012d006, | ||
527 | 0xc8130ef4, | ||
528 | 0x0bf41f23, | ||
529 | 0x0131f40d, | ||
530 | 0xf50232f4, | ||
531 | 0xf1082921, | ||
532 | 0xb60b0c17, | ||
533 | 0x27f00614, | ||
534 | 0x0012d001, | ||
535 | 0x085c87f1, | ||
536 | 0xbd0684b6, | ||
537 | 0x0499f094, | ||
538 | 0xf50089d0, | ||
539 | 0xb0ff200e, | ||
540 | 0x1bf401e4, | ||
541 | 0x02f2b90d, | ||
542 | 0x07b521f5, | ||
543 | 0xb0420ef4, | ||
544 | 0x1bf402e4, | ||
545 | 0x3c87f12e, | ||
546 | 0x0684b608, | ||
547 | 0x99f094bd, | ||
548 | 0x0089d007, | ||
549 | 0xf40132f4, | ||
550 | 0x21f50232, | ||
551 | 0x87f10829, | ||
552 | 0x84b6085c, | ||
553 | 0xf094bd06, | ||
554 | 0x89d00799, | ||
555 | 0x110ef400, | ||
556 | 0xf010ef94, | ||
557 | 0x21f501f5, | ||
558 | 0x0ef502ec, | ||
559 | 0x17f1fed1, | ||
560 | 0x14b60820, | ||
561 | 0xf024bd06, | ||
562 | 0x12d01f29, | ||
563 | 0xbe0ef500, | ||
564 | 0xfe80f9fe, | ||
565 | 0x80f90188, | ||
566 | 0xa0f990f9, | ||
567 | 0xd0f9b0f9, | ||
568 | 0xf0f9e0f9, | ||
569 | 0xc4800acf, | ||
570 | 0x0bf404ab, | ||
571 | 0x00b7f11d, | ||
572 | 0x08d7f019, | ||
573 | 0xcf40becf, | ||
574 | 0x21f400bf, | ||
575 | 0x00b0b704, | ||
576 | 0x01e7f004, | ||
577 | 0xe400bed0, | ||
578 | 0xf40100ab, | ||
579 | 0xd7f00d0b, | ||
580 | 0x01e7f108, | ||
581 | 0x0421f440, | ||
582 | 0x0104b7f1, | ||
583 | 0xabffb0bd, | ||
584 | 0x0d0bf4b4, | ||
585 | 0x0c1ca7f1, | ||
586 | 0xd006a4b6, | ||
587 | 0x0ad000ab, | ||
588 | 0xfcf0fc40, | ||
589 | 0xfcd0fce0, | ||
590 | 0xfca0fcb0, | ||
591 | 0xfe80fc90, | ||
592 | 0x80fc0088, | ||
593 | 0xf80032f4, | ||
594 | 0x60e7f101, | ||
595 | 0x40e3f041, | ||
596 | 0xf401f7f0, | ||
597 | 0x21f48d21, | ||
598 | 0x04ffc868, | ||
599 | 0xf8fa0bf4, | ||
600 | 0x60e7f100, | ||
601 | 0x40e3f041, | ||
602 | 0x21f4f4bd, | ||
603 | 0xf100f88d, | ||
604 | 0xf04170e7, | ||
605 | 0xf5f040e3, | ||
606 | 0x8d21f410, | ||
607 | 0xe7f100f8, | ||
608 | 0xe3f04170, | ||
609 | 0x6821f440, | ||
610 | 0xf410f4f0, | ||
611 | 0x00f8f31b, | ||
612 | 0x0614e7f1, | ||
613 | 0xf106e4b6, | ||
614 | 0xd00270f7, | ||
615 | 0xf7f000ef, | ||
616 | 0x01f2b608, | ||
617 | 0xf1fd1bf4, | ||
618 | 0xd00770f7, | ||
619 | 0x00f800ef, | ||
620 | 0x086ce7f1, | ||
621 | 0xd006e4b6, | ||
622 | 0xe7f100ef, | ||
623 | 0xe3f08a14, | ||
624 | 0x8d21f440, | ||
625 | 0xa86ce7f1, | ||
626 | 0xf441e3f0, | ||
627 | 0x00f88d21, | ||
628 | 0x083c87f1, | ||
629 | 0xbd0684b6, | ||
630 | 0x0599f094, | ||
631 | 0xf00089d0, | ||
632 | 0x21f40ca7, | ||
633 | 0x2417f1c9, | ||
634 | 0x0614b60a, | ||
635 | 0xf10010d0, | ||
636 | 0xb60b0037, | ||
637 | 0x32d00634, | ||
638 | 0x0c17f140, | ||
639 | 0x0614b60a, | ||
640 | 0xd00747f0, | ||
641 | 0x14d00012, | ||
642 | 0x4014cf40, | ||
643 | 0xf41f44f0, | ||
644 | 0x32d0fa1b, | ||
645 | 0x000bfe00, | ||
646 | 0xb61f2af0, | ||
647 | 0x20b60424, | ||
648 | 0x3c87f102, | ||
649 | 0x0684b608, | ||
650 | 0x99f094bd, | ||
651 | 0x0089d008, | ||
652 | 0x0a0417f1, | ||
653 | 0xd00614b6, | ||
654 | 0x17f10012, | ||
655 | 0x14b60a20, | ||
656 | 0x0227f006, | ||
657 | 0x800023f1, | ||
658 | 0xf00012d0, | ||
659 | 0x27f11017, | ||
660 | 0x23f00300, | ||
661 | 0x0512fa02, | ||
662 | 0x87f103f8, | ||
663 | 0x84b6085c, | ||
664 | 0xf094bd06, | ||
665 | 0x89d00899, | ||
666 | 0xc1019800, | ||
667 | 0x981814b6, | ||
668 | 0x25b6c002, | ||
669 | 0x0512fd08, | ||
670 | 0xf1160180, | ||
671 | 0xb6083c87, | ||
672 | 0x94bd0684, | ||
673 | 0xd00999f0, | ||
674 | 0x27f10089, | ||
675 | 0x24b60a04, | ||
676 | 0x0021d006, | ||
677 | 0xf10127f0, | ||
678 | 0xb60a2017, | ||
679 | 0x12d00614, | ||
680 | 0x0017f100, | ||
681 | 0x0613f002, | ||
682 | 0xf80501fa, | ||
683 | 0x5c87f103, | ||
684 | 0x0684b608, | ||
685 | 0x99f094bd, | ||
686 | 0x0089d009, | ||
687 | 0x085c87f1, | ||
688 | 0xbd0684b6, | ||
689 | 0x0599f094, | ||
690 | 0xf80089d0, | ||
691 | 0x3121f500, | ||
692 | 0xb821f506, | ||
693 | 0x0ca7f006, | ||
694 | 0xf1c921f4, | ||
695 | 0xb60a1017, | ||
696 | 0x27f00614, | ||
697 | 0x0012d005, | ||
698 | 0xfd0012cf, | ||
699 | 0x1bf40522, | ||
700 | 0x4921f5fa, | ||
701 | 0x9800f806, | ||
702 | 0x27f18103, | ||
703 | 0x24b60a04, | ||
704 | 0x0023d006, | ||
705 | 0x34c434bd, | ||
706 | 0x0f1bf4ff, | ||
707 | 0x030057f1, | ||
708 | 0xfa0653f0, | ||
709 | 0x03f80535, | ||
710 | 0x98c04e98, | ||
711 | 0x21f4c14f, | ||
712 | 0x0830b68d, | ||
713 | 0xf40112b6, | ||
714 | 0x0398df1b, | ||
715 | 0x0023d016, | ||
716 | 0xf1800080, | ||
717 | 0xf0020017, | ||
718 | 0x01fa0613, | ||
719 | 0xf803f806, | ||
720 | 0x0611f400, | ||
721 | 0xf01102f4, | ||
722 | 0x21f510f7, | ||
723 | 0x21f50698, | ||
724 | 0x11f40631, | ||
725 | 0x02f7f01c, | ||
726 | 0x065721f5, | ||
727 | 0x066621f5, | ||
728 | 0x067821f5, | ||
729 | 0x21f5f4bd, | ||
730 | 0x21f50657, | ||
731 | 0x019806b8, | ||
732 | 0x1427f116, | ||
733 | 0x0624b604, | ||
734 | 0xf10020d0, | ||
735 | 0xf0a500e7, | ||
736 | 0x1fb941e3, | ||
737 | 0x8d21f402, | ||
738 | 0xf004e0b6, | ||
739 | 0x2cf001fc, | ||
740 | 0x0124b602, | ||
741 | 0xf405f2fd, | ||
742 | 0x17f18d21, | ||
743 | 0x13f04afc, | ||
744 | 0x0c27f002, | ||
745 | 0xf50012d0, | ||
746 | 0xf1020721, | ||
747 | 0xf047fc27, | ||
748 | 0x20d00223, | ||
749 | 0x012cf000, | ||
750 | 0xd00320b6, | ||
751 | 0xacf00012, | ||
752 | 0x06a5f001, | ||
753 | 0x9800b7f0, | ||
754 | 0x0d98140c, | ||
755 | 0x00e7f015, | ||
756 | 0x015c21f5, | ||
757 | 0xf508a7f0, | ||
758 | 0xf5010321, | ||
759 | 0xf4020721, | ||
760 | 0xa7f02201, | ||
761 | 0xc921f40c, | ||
762 | 0x0a1017f1, | ||
763 | 0xf00614b6, | ||
764 | 0x12d00527, | ||
765 | 0x0012cf00, | ||
766 | 0xf40522fd, | ||
767 | 0x02f4fa1b, | ||
768 | 0x02f7f032, | ||
769 | 0x065721f5, | ||
770 | 0x21f5f4bd, | ||
771 | 0x21f50698, | ||
772 | 0x21f50226, | ||
773 | 0xf4bd0666, | ||
774 | 0x065721f5, | ||
775 | 0x981011f4, | ||
776 | 0x11fd8001, | ||
777 | 0x070bf405, | ||
778 | 0x07df21f5, | ||
779 | 0x064921f5, | ||
780 | 0x000000f8, | ||
781 | 0x00000000, | ||
782 | 0x00000000, | ||
783 | 0x00000000, | ||
784 | 0x00000000, | ||
785 | 0x00000000, | ||
786 | 0x00000000, | ||
787 | 0x00000000, | ||
788 | 0x00000000, | ||
789 | 0x00000000, | ||
790 | 0x00000000, | ||
791 | 0x00000000, | ||
792 | 0x00000000, | ||
793 | 0x00000000, | ||
794 | 0x00000000, | ||
795 | 0x00000000, | ||
796 | 0x00000000, | ||
797 | 0x00000000, | ||
798 | 0x00000000, | ||
799 | 0x00000000, | ||
800 | 0x00000000, | ||
801 | 0x00000000, | ||
802 | 0x00000000, | ||
803 | 0x00000000, | ||
804 | 0x00000000, | ||
805 | 0x00000000, | ||
806 | 0x00000000, | ||
807 | 0x00000000, | ||
808 | 0x00000000, | ||
809 | 0x00000000, | ||
810 | 0x00000000, | ||
811 | 0x00000000, | ||
812 | 0x00000000, | ||
813 | 0x00000000, | ||
814 | 0x00000000, | ||
815 | 0x00000000, | ||
816 | 0x00000000, | ||
817 | 0x00000000, | ||
818 | 0x00000000, | ||
819 | 0x00000000, | ||
820 | 0x00000000, | ||
821 | 0x00000000, | ||
822 | 0x00000000, | ||
823 | 0x00000000, | ||
824 | 0x00000000, | ||
825 | 0x00000000, | ||
826 | 0x00000000, | ||
827 | 0x00000000, | ||
828 | 0x00000000, | ||
829 | 0x00000000, | ||
830 | 0x00000000, | ||
831 | 0x00000000, | ||
832 | 0x00000000, | ||
833 | 0x00000000, | ||
834 | 0x00000000, | ||
835 | 0x00000000, | ||
836 | 0x00000000, | ||
837 | 0x00000000, | ||
838 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 82357d2df1f..b701c439c92 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -32,7 +32,6 @@ struct nvc0_instmem_priv { | |||
32 | struct nouveau_channel *bar1; | 32 | struct nouveau_channel *bar1; |
33 | struct nouveau_gpuobj *bar3_pgd; | 33 | struct nouveau_gpuobj *bar3_pgd; |
34 | struct nouveau_channel *bar3; | 34 | struct nouveau_channel *bar3; |
35 | struct nouveau_gpuobj *chan_pgd; | ||
36 | }; | 35 | }; |
37 | 36 | ||
38 | int | 37 | int |
@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev) | |||
181 | goto error; | 180 | goto error; |
182 | 181 | ||
183 | /* channel vm */ | 182 | /* channel vm */ |
184 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); | 183 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, |
184 | &dev_priv->chan_vm); | ||
185 | if (ret) | 185 | if (ret) |
186 | goto error; | 186 | goto error; |
187 | 187 | ||
188 | ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd); | ||
189 | if (ret) | ||
190 | goto error; | ||
191 | |||
192 | nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd); | ||
193 | nouveau_vm_ref(NULL, &vm, NULL); | ||
194 | |||
195 | nvc0_instmem_resume(dev); | 188 | nvc0_instmem_resume(dev); |
196 | return 0; | 189 | return 0; |
197 | error: | 190 | error: |
@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev) | |||
211 | nv_wr32(dev, 0x1704, 0x00000000); | 204 | nv_wr32(dev, 0x1704, 0x00000000); |
212 | nv_wr32(dev, 0x1714, 0x00000000); | 205 | nv_wr32(dev, 0x1714, 0x00000000); |
213 | 206 | ||
214 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); | 207 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); |
215 | nouveau_gpuobj_ref(NULL, &priv->chan_pgd); | ||
216 | 208 | ||
217 | nvc0_channel_del(&priv->bar1); | 209 | nvc0_channel_del(&priv->bar1); |
218 | nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); | 210 | nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index a179e6c55af..9e352944a35 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm) | |||
105 | struct drm_device *dev = vm->dev; | 105 | struct drm_device *dev = vm->dev; |
106 | struct nouveau_vm_pgd *vpgd; | 106 | struct nouveau_vm_pgd *vpgd; |
107 | unsigned long flags; | 107 | unsigned long flags; |
108 | u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; | 108 | u32 engine; |
109 | |||
110 | engine = 1; | ||
111 | if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) | ||
112 | engine |= 4; | ||
109 | 113 | ||
110 | pinstmem->flush(vm->dev); | 114 | pinstmem->flush(vm->dev); |
111 | 115 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index 67c6ec6f34e..e45a24d84e9 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, | |||
61 | u32 type, struct nouveau_mem **pmem) | 61 | u32 type, struct nouveau_mem **pmem) |
62 | { | 62 | { |
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
64 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | 64 | struct nouveau_mm *mm = dev_priv->engine.vram.mm; |
65 | struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; | ||
66 | struct nouveau_mm *mm = man->priv; | ||
67 | struct nouveau_mm_node *r; | 65 | struct nouveau_mm_node *r; |
68 | struct nouveau_mem *mem; | 66 | struct nouveau_mem *mem; |
69 | int ret; | 67 | int ret; |
@@ -105,9 +103,15 @@ int | |||
105 | nvc0_vram_init(struct drm_device *dev) | 103 | nvc0_vram_init(struct drm_device *dev) |
106 | { | 104 | { |
107 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 105 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | ||
108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | ||
109 | u32 length; | ||
108 | 110 | ||
109 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | 111 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; |
110 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | 112 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); |
111 | dev_priv->vram_rblock_size = 4096; | 113 | |
112 | return 0; | 114 | length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; |
115 | |||
116 | return nouveau_mm_init(&vram->mm, rsvd_head, length, 1); | ||
113 | } | 117 | } |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3896ef81110..9f363e0c4b6 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | ccflags-y := -Iinclude/drm | 5 | ccflags-y := -Iinclude/drm |
6 | 6 | ||
7 | hostprogs-y := mkregtable | 7 | hostprogs-y := mkregtable |
8 | clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h | ||
8 | 9 | ||
9 | quiet_cmd_mkregtable = MKREGTABLE $@ | 10 | quiet_cmd_mkregtable = MKREGTABLE $@ |
10 | cmd_mkregtable = $(obj)/mkregtable $< > $@ | 11 | cmd_mkregtable = $(obj)/mkregtable $< > $@ |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index ebdb0fdb834..14cc88aaf3a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
277 | case ATOM_ARG_FB: | 277 | case ATOM_ARG_FB: |
278 | idx = U8(*ptr); | 278 | idx = U8(*ptr); |
279 | (*ptr)++; | 279 | (*ptr)++; |
280 | val = gctx->scratch[((gctx->fb_base + idx) / 4)]; | 280 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
281 | DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", | ||
282 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
283 | val = 0; | ||
284 | } else | ||
285 | val = gctx->scratch[(gctx->fb_base / 4) + idx]; | ||
281 | if (print) | 286 | if (print) |
282 | DEBUG("FB[0x%02X]", idx); | 287 | DEBUG("FB[0x%02X]", idx); |
283 | break; | 288 | break; |
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
531 | case ATOM_ARG_FB: | 536 | case ATOM_ARG_FB: |
532 | idx = U8(*ptr); | 537 | idx = U8(*ptr); |
533 | (*ptr)++; | 538 | (*ptr)++; |
534 | gctx->scratch[((gctx->fb_base + idx) / 4)] = val; | 539 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
540 | DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", | ||
541 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
542 | } else | ||
543 | gctx->scratch[(gctx->fb_base / 4) + idx] = val; | ||
535 | DEBUG("FB[0x%02X]", idx); | 544 | DEBUG("FB[0x%02X]", idx); |
536 | break; | 545 | break; |
537 | case ATOM_ARG_PLL: | 546 | case ATOM_ARG_PLL: |
@@ -1245,6 +1254,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) | |||
1245 | char name[512]; | 1254 | char name[512]; |
1246 | int i; | 1255 | int i; |
1247 | 1256 | ||
1257 | if (!ctx) | ||
1258 | return NULL; | ||
1259 | |||
1248 | ctx->card = card; | 1260 | ctx->card = card; |
1249 | ctx->bios = bios; | 1261 | ctx->bios = bios; |
1250 | 1262 | ||
@@ -1367,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) | |||
1367 | 1379 | ||
1368 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | 1380 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1369 | } | 1381 | } |
1382 | ctx->scratch_size_bytes = 0; | ||
1370 | if (usage_bytes == 0) | 1383 | if (usage_bytes == 0) |
1371 | usage_bytes = 20 * 1024; | 1384 | usage_bytes = 20 * 1024; |
1372 | /* allocate some scratch memory */ | 1385 | /* allocate some scratch memory */ |
1373 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); | 1386 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); |
1374 | if (!ctx->scratch) | 1387 | if (!ctx->scratch) |
1375 | return -ENOMEM; | 1388 | return -ENOMEM; |
1389 | ctx->scratch_size_bytes = usage_bytes; | ||
1376 | return 0; | 1390 | return 0; |
1377 | } | 1391 | } |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223..93cfe2086ba 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -137,6 +137,7 @@ struct atom_context { | |||
137 | int cs_equal, cs_above; | 137 | int cs_equal, cs_above; |
138 | int io_mode; | 138 | int io_mode; |
139 | uint32_t *scratch; | 139 | uint32_t *scratch; |
140 | int scratch_size_bytes; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | extern int atom_debug; | 143 | extern int atom_debug; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9541995e4b2..9bb3d6f3b7b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
466 | return; | 466 | return; |
467 | } | 467 | } |
468 | args.v2.ucEnable = enable; | 468 | args.v2.ucEnable = enable; |
469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) | 469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) |
470 | args.v2.ucEnable = ATOM_DISABLE; | 470 | args.v2.ucEnable = ATOM_DISABLE; |
471 | } else if (ASIC_IS_DCE3(rdev)) { | 471 | } else if (ASIC_IS_DCE3(rdev)) { |
472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
@@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
558 | bpc = connector->display_info.bpc; | 558 | bpc = connector->display_info.bpc; |
559 | encoder_mode = atombios_get_encoder_mode(encoder); | 559 | encoder_mode = atombios_get_encoder_mode(encoder); |
560 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || | 560 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
561 | radeon_encoder_is_dp_bridge(encoder)) { | 561 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { |
562 | if (connector) { | 562 | if (connector) { |
563 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 563 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
564 | struct radeon_connector_atom_dig *dig_connector = | 564 | struct radeon_connector_atom_dig *dig_connector = |
@@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
638 | if (ss_enabled && ss->percentage) | 638 | if (ss_enabled && ss->percentage) |
639 | args.v3.sInput.ucDispPllConfig |= | 639 | args.v3.sInput.ucDispPllConfig |= |
640 | DISPPLL_CONFIG_SS_ENABLE; | 640 | DISPPLL_CONFIG_SS_ENABLE; |
641 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || | 641 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
642 | radeon_encoder_is_dp_bridge(encoder)) { | 642 | args.v3.sInput.ucDispPllConfig |= |
643 | DISPPLL_CONFIG_COHERENT_MODE; | ||
644 | /* 16200 or 27000 */ | ||
645 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
646 | } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
643 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 647 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
644 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 648 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) |
649 | /* deep color support */ | ||
650 | args.v3.sInput.usPixelClock = | ||
651 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
652 | if (dig->coherent_mode) | ||
645 | args.v3.sInput.ucDispPllConfig |= | 653 | args.v3.sInput.ucDispPllConfig |= |
646 | DISPPLL_CONFIG_COHERENT_MODE; | 654 | DISPPLL_CONFIG_COHERENT_MODE; |
647 | /* 16200 or 27000 */ | 655 | if (mode->clock > 165000) |
648 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
649 | } else { | ||
650 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
651 | /* deep color support */ | ||
652 | args.v3.sInput.usPixelClock = | ||
653 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
654 | } | ||
655 | if (dig->coherent_mode) | ||
656 | args.v3.sInput.ucDispPllConfig |= | ||
657 | DISPPLL_CONFIG_COHERENT_MODE; | ||
658 | if (mode->clock > 165000) | ||
659 | args.v3.sInput.ucDispPllConfig |= | ||
660 | DISPPLL_CONFIG_DUAL_LINK; | ||
661 | } | ||
662 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
663 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | ||
664 | args.v3.sInput.ucDispPllConfig |= | 656 | args.v3.sInput.ucDispPllConfig |= |
665 | DISPPLL_CONFIG_COHERENT_MODE; | 657 | DISPPLL_CONFIG_DUAL_LINK; |
666 | /* 16200 or 27000 */ | ||
667 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
668 | } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { | ||
669 | if (mode->clock > 165000) | ||
670 | args.v3.sInput.ucDispPllConfig |= | ||
671 | DISPPLL_CONFIG_DUAL_LINK; | ||
672 | } | ||
673 | } | 658 | } |
674 | if (radeon_encoder_is_dp_bridge(encoder)) { | 659 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != |
675 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | 660 | ENCODER_OBJECT_ID_NONE) |
676 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | 661 | args.v3.sInput.ucExtTransmitterID = |
677 | args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; | 662 | radeon_encoder_get_dp_bridge_encoder_id(encoder); |
678 | } else | 663 | else |
679 | args.v3.sInput.ucExtTransmitterID = 0; | 664 | args.v3.sInput.ucExtTransmitterID = 0; |
680 | 665 | ||
681 | atom_execute_table(rdev->mode_info.atom_context, | 666 | atom_execute_table(rdev->mode_info.atom_context, |
@@ -764,7 +749,7 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, | |||
764 | } | 749 | } |
765 | 750 | ||
766 | static void atombios_crtc_program_pll(struct drm_crtc *crtc, | 751 | static void atombios_crtc_program_pll(struct drm_crtc *crtc, |
767 | int crtc_id, | 752 | u32 crtc_id, |
768 | int pll_id, | 753 | int pll_id, |
769 | u32 encoder_mode, | 754 | u32 encoder_mode, |
770 | u32 encoder_id, | 755 | u32 encoder_id, |
@@ -851,8 +836,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, | |||
851 | args.v5.ucPpll = pll_id; | 836 | args.v5.ucPpll = pll_id; |
852 | break; | 837 | break; |
853 | case 6: | 838 | case 6: |
854 | args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; | 839 | args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); |
855 | args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10); | ||
856 | args.v6.ucRefDiv = ref_div; | 840 | args.v6.ucRefDiv = ref_div; |
857 | args.v6.usFbDiv = cpu_to_le16(fb_div); | 841 | args.v6.usFbDiv = cpu_to_le16(fb_div); |
858 | args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | 842 | args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 8c0f9e36ff8..03a347a99d7 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
115 | u8 msg[20]; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | 116 | int msg_bytes = send_bytes + 4; |
117 | u8 ack; | 117 | u8 ack; |
118 | unsigned retry; | ||
118 | 119 | ||
119 | if (send_bytes > 16) | 120 | if (send_bytes > 16) |
120 | return -1; | 121 | return -1; |
@@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 126 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
126 | memcpy(&msg[4], send, send_bytes); | 127 | memcpy(&msg[4], send, send_bytes); |
127 | 128 | ||
128 | while (1) { | 129 | for (retry = 0; retry < 4; retry++) { |
129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 130 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
130 | msg, msg_bytes, NULL, 0, delay, &ack); | 131 | msg, msg_bytes, NULL, 0, delay, &ack); |
131 | if (ret < 0) | 132 | if (ret == -EBUSY) |
133 | continue; | ||
134 | else if (ret < 0) | ||
132 | return ret; | 135 | return ret; |
133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 136 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
134 | break; | 137 | return send_bytes; |
135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 138 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
136 | udelay(400); | 139 | udelay(400); |
137 | else | 140 | else |
138 | return -EIO; | 141 | return -EIO; |
139 | } | 142 | } |
140 | 143 | ||
141 | return send_bytes; | 144 | return -EIO; |
142 | } | 145 | } |
143 | 146 | ||
144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | 147 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
@@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
149 | int msg_bytes = 4; | 152 | int msg_bytes = 4; |
150 | u8 ack; | 153 | u8 ack; |
151 | int ret; | 154 | int ret; |
155 | unsigned retry; | ||
152 | 156 | ||
153 | msg[0] = address; | 157 | msg[0] = address; |
154 | msg[1] = address >> 8; | 158 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | 159 | msg[2] = AUX_NATIVE_READ << 4; |
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 160 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
157 | 161 | ||
158 | while (1) { | 162 | for (retry = 0; retry < 4; retry++) { |
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 163 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 164 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
161 | if (ret == 0) | 165 | if (ret == -EBUSY) |
162 | return -EPROTO; | 166 | continue; |
163 | if (ret < 0) | 167 | else if (ret < 0) |
164 | return ret; | 168 | return ret; |
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 169 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
166 | return ret; | 170 | return ret; |
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 171 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
168 | udelay(400); | 172 | udelay(400); |
173 | else if (ret == 0) | ||
174 | return -EPROTO; | ||
169 | else | 175 | else |
170 | return -EIO; | 176 | return -EIO; |
171 | } | 177 | } |
178 | |||
179 | return -EIO; | ||
172 | } | 180 | } |
173 | 181 | ||
174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, | 182 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
@@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
232 | for (retry = 0; retry < 4; retry++) { | 240 | for (retry = 0; retry < 4; retry++) { |
233 | ret = radeon_process_aux_ch(auxch, | 241 | ret = radeon_process_aux_ch(auxch, |
234 | msg, msg_bytes, reply, reply_bytes, 0, &ack); | 242 | msg, msg_bytes, reply, reply_bytes, 0, &ack); |
235 | if (ret < 0) { | 243 | if (ret == -EBUSY) |
244 | continue; | ||
245 | else if (ret < 0) { | ||
236 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 246 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
237 | return ret; | 247 | return ret; |
238 | } | 248 | } |
@@ -273,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
273 | } | 283 | } |
274 | } | 284 | } |
275 | 285 | ||
276 | DRM_ERROR("aux i2c too many retries, giving up\n"); | 286 | DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); |
277 | return -EREMOTEIO; | 287 | return -EREMOTEIO; |
278 | } | 288 | } |
279 | 289 | ||
@@ -472,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, | |||
472 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); | 482 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); |
473 | int lane_num, max_pix_clock; | 483 | int lane_num, max_pix_clock; |
474 | 484 | ||
475 | if (radeon_connector_encoder_is_dp_bridge(connector)) | 485 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != |
486 | ENCODER_OBJECT_ID_NONE) | ||
476 | return 270000; | 487 | return 270000; |
477 | 488 | ||
478 | lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); | 489 | lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); |
@@ -543,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, | |||
543 | { | 554 | { |
544 | struct drm_device *dev = encoder->dev; | 555 | struct drm_device *dev = encoder->dev; |
545 | struct radeon_device *rdev = dev->dev_private; | 556 | struct radeon_device *rdev = dev->dev_private; |
557 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
546 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | 558 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
547 | 559 | ||
548 | if (!ASIC_IS_DCE4(rdev)) | 560 | if (!ASIC_IS_DCE4(rdev)) |
549 | return; | 561 | return; |
550 | 562 | ||
551 | if (radeon_connector_encoder_is_dp_bridge(connector)) | 563 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == |
564 | ENCODER_OBJECT_ID_NUTMEG) | ||
552 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 565 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
566 | else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == | ||
567 | ENCODER_OBJECT_ID_TRAVIS) | ||
568 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
569 | else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
570 | u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); | ||
571 | if (tmp & 1) | ||
572 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
573 | } | ||
553 | 574 | ||
554 | atombios_dig_encoder_setup(encoder, | 575 | atombios_dig_encoder_setup(encoder, |
555 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | 576 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, |
556 | panel_mode); | 577 | panel_mode); |
578 | |||
579 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) && | ||
580 | (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { | ||
581 | radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1); | ||
582 | } | ||
557 | } | 583 | } |
558 | 584 | ||
559 | void radeon_dp_set_link_config(struct drm_connector *connector, | 585 | void radeon_dp_set_link_config(struct drm_connector *connector, |
@@ -613,6 +639,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, | |||
613 | return true; | 639 | return true; |
614 | } | 640 | } |
615 | 641 | ||
642 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | ||
643 | { | ||
644 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
645 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | ||
646 | |||
647 | if (!radeon_dp_get_link_status(radeon_connector, link_status)) | ||
648 | return false; | ||
649 | if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) | ||
650 | return false; | ||
651 | return true; | ||
652 | } | ||
653 | |||
616 | struct radeon_dp_link_train_info { | 654 | struct radeon_dp_link_train_info { |
617 | struct radeon_device *rdev; | 655 | struct radeon_device *rdev; |
618 | struct drm_encoder *encoder; | 656 | struct drm_encoder *encoder; |
@@ -627,6 +665,7 @@ struct radeon_dp_link_train_info { | |||
627 | u8 train_set[4]; | 665 | u8 train_set[4]; |
628 | u8 link_status[DP_LINK_STATUS_SIZE]; | 666 | u8 link_status[DP_LINK_STATUS_SIZE]; |
629 | u8 tries; | 667 | u8 tries; |
668 | bool use_dpencoder; | ||
630 | }; | 669 | }; |
631 | 670 | ||
632 | static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) | 671 | static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) |
@@ -646,7 +685,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) | |||
646 | int rtp = 0; | 685 | int rtp = 0; |
647 | 686 | ||
648 | /* set training pattern on the source */ | 687 | /* set training pattern on the source */ |
649 | if (ASIC_IS_DCE4(dp_info->rdev)) { | 688 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) { |
650 | switch (tp) { | 689 | switch (tp) { |
651 | case DP_TRAINING_PATTERN_1: | 690 | case DP_TRAINING_PATTERN_1: |
652 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; | 691 | rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; |
@@ -706,7 +745,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) | |||
706 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); | 745 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); |
707 | 746 | ||
708 | /* start training on the source */ | 747 | /* start training on the source */ |
709 | if (ASIC_IS_DCE4(dp_info->rdev)) | 748 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) |
710 | atombios_dig_encoder_setup(dp_info->encoder, | 749 | atombios_dig_encoder_setup(dp_info->encoder, |
711 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); | 750 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); |
712 | else | 751 | else |
@@ -731,7 +770,7 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info | |||
731 | DP_TRAINING_PATTERN_DISABLE); | 770 | DP_TRAINING_PATTERN_DISABLE); |
732 | 771 | ||
733 | /* disable the training pattern on the source */ | 772 | /* disable the training pattern on the source */ |
734 | if (ASIC_IS_DCE4(dp_info->rdev)) | 773 | if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) |
735 | atombios_dig_encoder_setup(dp_info->encoder, | 774 | atombios_dig_encoder_setup(dp_info->encoder, |
736 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); | 775 | ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); |
737 | else | 776 | else |
@@ -869,7 +908,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder, | |||
869 | struct radeon_connector *radeon_connector; | 908 | struct radeon_connector *radeon_connector; |
870 | struct radeon_connector_atom_dig *dig_connector; | 909 | struct radeon_connector_atom_dig *dig_connector; |
871 | struct radeon_dp_link_train_info dp_info; | 910 | struct radeon_dp_link_train_info dp_info; |
872 | u8 tmp; | 911 | int index; |
912 | u8 tmp, frev, crev; | ||
873 | 913 | ||
874 | if (!radeon_encoder->enc_priv) | 914 | if (!radeon_encoder->enc_priv) |
875 | return; | 915 | return; |
@@ -884,6 +924,18 @@ void radeon_dp_link_train(struct drm_encoder *encoder, | |||
884 | (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) | 924 | (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) |
885 | return; | 925 | return; |
886 | 926 | ||
927 | /* DPEncoderService newer than 1.1 can't program properly the | ||
928 | * training pattern. When facing such version use the | ||
929 | * DIGXEncoderControl (X== 1 | 2) | ||
930 | */ | ||
931 | dp_info.use_dpencoder = true; | ||
932 | index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); | ||
933 | if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) { | ||
934 | if (crev > 1) { | ||
935 | dp_info.use_dpencoder = false; | ||
936 | } | ||
937 | } | ||
938 | |||
887 | dp_info.enc_id = 0; | 939 | dp_info.enc_id = 0; |
888 | if (dig->dig_encoder) | 940 | if (dig->dig_encoder) |
889 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | 941 | dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 15bd0477a3e..c3f0d428c51 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev); | |||
41 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | 42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); |
43 | 43 | ||
44 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | ||
45 | { | ||
46 | u16 ctl, v; | ||
47 | int cap, err; | ||
48 | |||
49 | cap = pci_pcie_cap(rdev->pdev); | ||
50 | if (!cap) | ||
51 | return; | ||
52 | |||
53 | err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); | ||
54 | if (err) | ||
55 | return; | ||
56 | |||
57 | v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; | ||
58 | |||
59 | /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it | ||
60 | * to avoid hangs or perfomance issues | ||
61 | */ | ||
62 | if ((v == 0) || (v == 6) || (v == 7)) { | ||
63 | ctl &= ~PCI_EXP_DEVCTL_READRQ; | ||
64 | ctl |= (2 << 12); | ||
65 | pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); | ||
66 | } | ||
67 | } | ||
68 | |||
44 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | 69 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) |
45 | { | 70 | { |
46 | /* enable the pflip int */ | 71 | /* enable the pflip int */ |
@@ -57,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
57 | { | 82 | { |
58 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | 83 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
59 | u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); | 84 | u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); |
85 | int i; | ||
60 | 86 | ||
61 | /* Lock the graphics update lock */ | 87 | /* Lock the graphics update lock */ |
62 | tmp |= EVERGREEN_GRPH_UPDATE_LOCK; | 88 | tmp |= EVERGREEN_GRPH_UPDATE_LOCK; |
@@ -74,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
74 | (u32)crtc_base); | 100 | (u32)crtc_base); |
75 | 101 | ||
76 | /* Wait for update_pending to go high. */ | 102 | /* Wait for update_pending to go high. */ |
77 | while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); | 103 | for (i = 0; i < rdev->usec_timeout; i++) { |
104 | if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) | ||
105 | break; | ||
106 | udelay(1); | ||
107 | } | ||
78 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | 108 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
79 | 109 | ||
80 | /* Unlock the lock, so double-buffering can take place inside vblank */ | 110 | /* Unlock the lock, so double-buffering can take place inside vblank */ |
@@ -328,6 +358,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) | |||
328 | default: | 358 | default: |
329 | break; | 359 | break; |
330 | } | 360 | } |
361 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
331 | } | 362 | } |
332 | if (rdev->irq.installed) | 363 | if (rdev->irq.installed) |
333 | evergreen_irq_set(rdev); | 364 | evergreen_irq_set(rdev); |
@@ -743,7 +774,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
743 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || | 774 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || |
744 | !evergreen_check_latency_hiding(&wm) || | 775 | !evergreen_check_latency_hiding(&wm) || |
745 | (rdev->disp_priority == 2)) { | 776 | (rdev->disp_priority == 2)) { |
746 | DRM_INFO("force priority to high\n"); | 777 | DRM_DEBUG_KMS("force priority to high\n"); |
747 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | 778 | priority_a_cnt |= PRIORITY_ALWAYS_ON; |
748 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | 779 | priority_b_cnt |= PRIORITY_ALWAYS_ON; |
749 | } | 780 | } |
@@ -1357,6 +1388,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1357 | SOFT_RESET_PA | | 1388 | SOFT_RESET_PA | |
1358 | SOFT_RESET_SH | | 1389 | SOFT_RESET_SH | |
1359 | SOFT_RESET_VGT | | 1390 | SOFT_RESET_VGT | |
1391 | SOFT_RESET_SPI | | ||
1360 | SOFT_RESET_SX)); | 1392 | SOFT_RESET_SX)); |
1361 | RREG32(GRBM_SOFT_RESET); | 1393 | RREG32(GRBM_SOFT_RESET); |
1362 | mdelay(15); | 1394 | mdelay(15); |
@@ -1378,13 +1410,11 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1378 | /* Initialize the ring buffer's read and write pointers */ | 1410 | /* Initialize the ring buffer's read and write pointers */ |
1379 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1411 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
1380 | WREG32(CP_RB_RPTR_WR, 0); | 1412 | WREG32(CP_RB_RPTR_WR, 0); |
1381 | WREG32(CP_RB_WPTR, 0); | 1413 | rdev->cp.wptr = 0; |
1414 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
1382 | 1415 | ||
1383 | /* set the wb address wether it's enabled or not */ | 1416 | /* set the wb address wether it's enabled or not */ |
1384 | WREG32(CP_RB_RPTR_ADDR, | 1417 | WREG32(CP_RB_RPTR_ADDR, |
1385 | #ifdef __BIG_ENDIAN | ||
1386 | RB_RPTR_SWAP(2) | | ||
1387 | #endif | ||
1388 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | 1418 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); |
1389 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 1419 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
1390 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 1420 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
@@ -1403,7 +1433,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1403 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 1433 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
1404 | 1434 | ||
1405 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 1435 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
1406 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
1407 | 1436 | ||
1408 | evergreen_cp_start(rdev); | 1437 | evergreen_cp_start(rdev); |
1409 | rdev->cp.ready = true; | 1438 | rdev->cp.ready = true; |
@@ -1567,48 +1596,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1567 | return backend_map; | 1596 | return backend_map; |
1568 | } | 1597 | } |
1569 | 1598 | ||
1570 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1571 | { | ||
1572 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1573 | |||
1574 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1575 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1576 | case 0: | ||
1577 | case 1: | ||
1578 | case 2: | ||
1579 | case 3: | ||
1580 | default: | ||
1581 | /* default mapping */ | ||
1582 | mc_shared_chremap = 0x00fac688; | ||
1583 | break; | ||
1584 | } | ||
1585 | |||
1586 | switch (rdev->family) { | ||
1587 | case CHIP_HEMLOCK: | ||
1588 | case CHIP_CYPRESS: | ||
1589 | case CHIP_BARTS: | ||
1590 | tcp_chan_steer_lo = 0x54763210; | ||
1591 | tcp_chan_steer_hi = 0x0000ba98; | ||
1592 | break; | ||
1593 | case CHIP_JUNIPER: | ||
1594 | case CHIP_REDWOOD: | ||
1595 | case CHIP_CEDAR: | ||
1596 | case CHIP_PALM: | ||
1597 | case CHIP_SUMO: | ||
1598 | case CHIP_SUMO2: | ||
1599 | case CHIP_TURKS: | ||
1600 | case CHIP_CAICOS: | ||
1601 | default: | ||
1602 | tcp_chan_steer_lo = 0x76543210; | ||
1603 | tcp_chan_steer_hi = 0x0000ba98; | ||
1604 | break; | ||
1605 | } | ||
1606 | |||
1607 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1608 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1609 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1610 | } | ||
1611 | |||
1612 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1599 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1613 | { | 1600 | { |
1614 | u32 cc_rb_backend_disable = 0; | 1601 | u32 cc_rb_backend_disable = 0; |
@@ -1865,6 +1852,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1865 | 1852 | ||
1866 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 1853 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
1867 | 1854 | ||
1855 | evergreen_fix_pci_max_read_req_size(rdev); | ||
1856 | |||
1868 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; | 1857 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; |
1869 | 1858 | ||
1870 | cc_gc_shader_pipe_config |= | 1859 | cc_gc_shader_pipe_config |= |
@@ -2047,13 +2036,12 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2047 | rdev->config.evergreen.tile_config |= | 2036 | rdev->config.evergreen.tile_config |= |
2048 | ((gb_addr_config & 0x30000000) >> 28) << 12; | 2037 | ((gb_addr_config & 0x30000000) >> 28) << 12; |
2049 | 2038 | ||
2039 | rdev->config.evergreen.backend_map = gb_backend_map; | ||
2050 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 2040 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
2051 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 2041 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
2052 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2042 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2053 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2043 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2054 | 2044 | ||
2055 | evergreen_program_channel_remap(rdev); | ||
2056 | |||
2057 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2045 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
2058 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2046 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
2059 | 2047 | ||
@@ -2761,6 +2749,9 @@ int evergreen_irq_process(struct radeon_device *rdev) | |||
2761 | return IRQ_NONE; | 2749 | return IRQ_NONE; |
2762 | } | 2750 | } |
2763 | restart_ih: | 2751 | restart_ih: |
2752 | /* Order reading of wptr vs. reading of IH ring data */ | ||
2753 | rmb(); | ||
2754 | |||
2764 | /* display interrupts */ | 2755 | /* display interrupts */ |
2765 | evergreen_irq_ack(rdev); | 2756 | evergreen_irq_ack(rdev); |
2766 | 2757 | ||
@@ -3142,21 +3133,23 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
3142 | } | 3133 | } |
3143 | 3134 | ||
3144 | int evergreen_copy_blit(struct radeon_device *rdev, | 3135 | int evergreen_copy_blit(struct radeon_device *rdev, |
3145 | uint64_t src_offset, uint64_t dst_offset, | 3136 | uint64_t src_offset, |
3146 | unsigned num_pages, struct radeon_fence *fence) | 3137 | uint64_t dst_offset, |
3138 | unsigned num_gpu_pages, | ||
3139 | struct radeon_fence *fence) | ||
3147 | { | 3140 | { |
3148 | int r; | 3141 | int r; |
3149 | 3142 | ||
3150 | mutex_lock(&rdev->r600_blit.mutex); | 3143 | mutex_lock(&rdev->r600_blit.mutex); |
3151 | rdev->r600_blit.vb_ib = NULL; | 3144 | rdev->r600_blit.vb_ib = NULL; |
3152 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 3145 | r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3153 | if (r) { | 3146 | if (r) { |
3154 | if (rdev->r600_blit.vb_ib) | 3147 | if (rdev->r600_blit.vb_ib) |
3155 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 3148 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
3156 | mutex_unlock(&rdev->r600_blit.mutex); | 3149 | mutex_unlock(&rdev->r600_blit.mutex); |
3157 | return r; | 3150 | return r; |
3158 | } | 3151 | } |
3159 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 3152 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3160 | evergreen_blit_done_copy(rdev, fence); | 3153 | evergreen_blit_done_copy(rdev, fence); |
3161 | mutex_unlock(&rdev->r600_blit.mutex); | 3154 | mutex_unlock(&rdev->r600_blit.mutex); |
3162 | return 0; | 3155 | return 0; |
@@ -3265,6 +3258,18 @@ int evergreen_init(struct radeon_device *rdev) | |||
3265 | rdev->accel_working = false; | 3258 | rdev->accel_working = false; |
3266 | } | 3259 | } |
3267 | } | 3260 | } |
3261 | |||
3262 | /* Don't start up if the MC ucode is missing on BTC parts. | ||
3263 | * The default clocks and voltages before the MC ucode | ||
3264 | * is loaded are not suffient for advanced operations. | ||
3265 | */ | ||
3266 | if (ASIC_IS_DCE5(rdev)) { | ||
3267 | if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { | ||
3268 | DRM_ERROR("radeon: MC ucode required for NI+.\n"); | ||
3269 | return -EINVAL; | ||
3270 | } | ||
3271 | } | ||
3272 | |||
3268 | return 0; | 3273 | return 0; |
3269 | } | 3274 | } |
3270 | 3275 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 23d36417158..a134790903d 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
428 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | 428 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); |
429 | 429 | ||
430 | i = (reg >> 7); | 430 | i = (reg >> 7); |
431 | if (i > last_reg) { | 431 | if (i >= last_reg) { |
432 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 432 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
433 | return -EINVAL; | 433 | return -EINVAL; |
434 | } | 434 | } |
@@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
856 | case SQ_PGM_START_PS: | 856 | case SQ_PGM_START_PS: |
857 | case SQ_PGM_START_HS: | 857 | case SQ_PGM_START_HS: |
858 | case SQ_PGM_START_LS: | 858 | case SQ_PGM_START_LS: |
859 | case GDS_ADDR_BASE: | ||
860 | case SQ_CONST_MEM_BASE: | 859 | case SQ_CONST_MEM_BASE: |
861 | case SQ_ALU_CONST_CACHE_GS_0: | 860 | case SQ_ALU_CONST_CACHE_GS_0: |
862 | case SQ_ALU_CONST_CACHE_GS_1: | 861 | case SQ_ALU_CONST_CACHE_GS_1: |
@@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
946 | } | 945 | } |
947 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 946 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
948 | break; | 947 | break; |
948 | case SX_MEMORY_EXPORT_BASE: | ||
949 | if (p->rdev->family >= CHIP_CAYMAN) { | ||
950 | dev_warn(p->dev, "bad SET_CONFIG_REG " | ||
951 | "0x%04X\n", reg); | ||
952 | return -EINVAL; | ||
953 | } | ||
954 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
955 | if (r) { | ||
956 | dev_warn(p->dev, "bad SET_CONFIG_REG " | ||
957 | "0x%04X\n", reg); | ||
958 | return -EINVAL; | ||
959 | } | ||
960 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
961 | break; | ||
962 | case CAYMAN_SX_SCATTER_EXPORT_BASE: | ||
963 | if (p->rdev->family < CHIP_CAYMAN) { | ||
964 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
965 | "0x%04X\n", reg); | ||
966 | return -EINVAL; | ||
967 | } | ||
968 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
969 | if (r) { | ||
970 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
971 | "0x%04X\n", reg); | ||
972 | return -EINVAL; | ||
973 | } | ||
974 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
975 | break; | ||
949 | default: | 976 | default: |
950 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 977 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
951 | return -EINVAL; | 978 | return -EINVAL; |
@@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1153 | return r; | 1180 | return r; |
1154 | } | 1181 | } |
1155 | break; | 1182 | break; |
1183 | case PACKET3_DISPATCH_DIRECT: | ||
1184 | if (pkt->count != 3) { | ||
1185 | DRM_ERROR("bad DISPATCH_DIRECT\n"); | ||
1186 | return -EINVAL; | ||
1187 | } | ||
1188 | r = evergreen_cs_track_check(p); | ||
1189 | if (r) { | ||
1190 | dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); | ||
1191 | return r; | ||
1192 | } | ||
1193 | break; | ||
1194 | case PACKET3_DISPATCH_INDIRECT: | ||
1195 | if (pkt->count != 1) { | ||
1196 | DRM_ERROR("bad DISPATCH_INDIRECT\n"); | ||
1197 | return -EINVAL; | ||
1198 | } | ||
1199 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1200 | if (r) { | ||
1201 | DRM_ERROR("bad DISPATCH_INDIRECT\n"); | ||
1202 | return -EINVAL; | ||
1203 | } | ||
1204 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
1205 | r = evergreen_cs_track_check(p); | ||
1206 | if (r) { | ||
1207 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
1208 | return r; | ||
1209 | } | ||
1210 | break; | ||
1156 | case PACKET3_WAIT_REG_MEM: | 1211 | case PACKET3_WAIT_REG_MEM: |
1157 | if (pkt->count != 5) { | 1212 | if (pkt->count != 5) { |
1158 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 1213 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b7b2714f0b3..7363d9dec90 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -351,6 +351,7 @@ | |||
351 | #define COLOR_BUFFER_SIZE(x) ((x) << 0) | 351 | #define COLOR_BUFFER_SIZE(x) ((x) << 0) |
352 | #define POSITION_BUFFER_SIZE(x) ((x) << 8) | 352 | #define POSITION_BUFFER_SIZE(x) ((x) << 8) |
353 | #define SMX_BUFFER_SIZE(x) ((x) << 16) | 353 | #define SMX_BUFFER_SIZE(x) ((x) << 16) |
354 | #define SX_MEMORY_EXPORT_BASE 0x9010 | ||
354 | #define SX_MISC 0x28350 | 355 | #define SX_MISC 0x28350 |
355 | 356 | ||
356 | #define CB_PERF_CTR0_SEL_0 0x9A20 | 357 | #define CB_PERF_CTR0_SEL_0 0x9A20 |
@@ -1122,6 +1123,7 @@ | |||
1122 | #define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 | 1123 | #define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 |
1123 | #define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 | 1124 | #define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 |
1124 | #define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 | 1125 | #define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 |
1126 | #define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358 | ||
1125 | /* cayman packet3 addition */ | 1127 | /* cayman packet3 addition */ |
1126 | #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 | 1128 | #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 |
1127 | 1129 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 559dbd41290..8c79ca97753 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); | |||
39 | extern void evergreen_mc_program(struct radeon_device *rdev); | 39 | extern void evergreen_mc_program(struct radeon_device *rdev); |
40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); | 40 | extern void evergreen_irq_suspend(struct radeon_device *rdev); |
41 | extern int evergreen_mc_init(struct radeon_device *rdev); | 41 | extern int evergreen_mc_init(struct radeon_device *rdev); |
42 | extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); | ||
42 | 43 | ||
43 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | 44 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
44 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | 45 | #define EVERGREEN_PM4_UCODE_SIZE 1376 |
@@ -568,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
568 | return backend_map; | 569 | return backend_map; |
569 | } | 570 | } |
570 | 571 | ||
571 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
572 | { | ||
573 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
574 | |||
575 | tmp = RREG32(MC_SHARED_CHMAP); | ||
576 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
577 | case 0: | ||
578 | case 1: | ||
579 | case 2: | ||
580 | case 3: | ||
581 | default: | ||
582 | /* default mapping */ | ||
583 | mc_shared_chremap = 0x00fac688; | ||
584 | break; | ||
585 | } | ||
586 | |||
587 | switch (rdev->family) { | ||
588 | case CHIP_CAYMAN: | ||
589 | default: | ||
590 | //tcp_chan_steer_lo = 0x54763210 | ||
591 | tcp_chan_steer_lo = 0x76543210; | ||
592 | tcp_chan_steer_hi = 0x0000ba98; | ||
593 | break; | ||
594 | } | ||
595 | |||
596 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
597 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
598 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
599 | } | ||
600 | |||
601 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | 572 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, |
602 | u32 disable_mask_per_se, | 573 | u32 disable_mask_per_se, |
603 | u32 max_disable_mask_per_se, | 574 | u32 max_disable_mask_per_se, |
@@ -669,6 +640,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
669 | 640 | ||
670 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 641 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
671 | 642 | ||
643 | evergreen_fix_pci_max_read_req_size(rdev); | ||
644 | |||
672 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 645 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
673 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 646 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
674 | 647 | ||
@@ -833,13 +806,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
833 | rdev->config.cayman.tile_config |= | 806 | rdev->config.cayman.tile_config |= |
834 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | 807 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; |
835 | 808 | ||
809 | rdev->config.cayman.backend_map = gb_backend_map; | ||
836 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 810 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
837 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 811 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
838 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 812 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
839 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 813 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
840 | 814 | ||
841 | cayman_program_channel_remap(rdev); | ||
842 | |||
843 | /* primary versions */ | 815 | /* primary versions */ |
844 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 816 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
845 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 817 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
@@ -1158,6 +1130,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1158 | SOFT_RESET_PA | | 1130 | SOFT_RESET_PA | |
1159 | SOFT_RESET_SH | | 1131 | SOFT_RESET_SH | |
1160 | SOFT_RESET_VGT | | 1132 | SOFT_RESET_VGT | |
1133 | SOFT_RESET_SPI | | ||
1161 | SOFT_RESET_SX)); | 1134 | SOFT_RESET_SX)); |
1162 | RREG32(GRBM_SOFT_RESET); | 1135 | RREG32(GRBM_SOFT_RESET); |
1163 | mdelay(15); | 1136 | mdelay(15); |
@@ -1182,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1182 | 1155 | ||
1183 | /* Initialize the ring buffer's read and write pointers */ | 1156 | /* Initialize the ring buffer's read and write pointers */ |
1184 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | 1157 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); |
1185 | WREG32(CP_RB0_WPTR, 0); | 1158 | rdev->cp.wptr = 0; |
1159 | WREG32(CP_RB0_WPTR, rdev->cp.wptr); | ||
1186 | 1160 | ||
1187 | /* set the wb address wether it's enabled or not */ | 1161 | /* set the wb address wether it's enabled or not */ |
1188 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1162 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1202,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1202 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); | 1176 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); |
1203 | 1177 | ||
1204 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | 1178 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); |
1205 | rdev->cp.wptr = RREG32(CP_RB0_WPTR); | ||
1206 | 1179 | ||
1207 | /* ring1 - compute only */ | 1180 | /* ring1 - compute only */ |
1208 | /* Set ring buffer size */ | 1181 | /* Set ring buffer size */ |
@@ -1215,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1215 | 1188 | ||
1216 | /* Initialize the ring buffer's read and write pointers */ | 1189 | /* Initialize the ring buffer's read and write pointers */ |
1217 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | 1190 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); |
1218 | WREG32(CP_RB1_WPTR, 0); | 1191 | rdev->cp1.wptr = 0; |
1192 | WREG32(CP_RB1_WPTR, rdev->cp1.wptr); | ||
1219 | 1193 | ||
1220 | /* set the wb address wether it's enabled or not */ | 1194 | /* set the wb address wether it's enabled or not */ |
1221 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | 1195 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1227,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1227 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); | 1201 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); |
1228 | 1202 | ||
1229 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); | 1203 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); |
1230 | rdev->cp1.wptr = RREG32(CP_RB1_WPTR); | ||
1231 | 1204 | ||
1232 | /* ring2 - compute only */ | 1205 | /* ring2 - compute only */ |
1233 | /* Set ring buffer size */ | 1206 | /* Set ring buffer size */ |
@@ -1240,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1240 | 1213 | ||
1241 | /* Initialize the ring buffer's read and write pointers */ | 1214 | /* Initialize the ring buffer's read and write pointers */ |
1242 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | 1215 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); |
1243 | WREG32(CP_RB2_WPTR, 0); | 1216 | rdev->cp2.wptr = 0; |
1217 | WREG32(CP_RB2_WPTR, rdev->cp2.wptr); | ||
1244 | 1218 | ||
1245 | /* set the wb address wether it's enabled or not */ | 1219 | /* set the wb address wether it's enabled or not */ |
1246 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | 1220 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1252,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1252 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); | 1226 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); |
1253 | 1227 | ||
1254 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); | 1228 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); |
1255 | rdev->cp2.wptr = RREG32(CP_RB2_WPTR); | ||
1256 | 1229 | ||
1257 | /* start the rings */ | 1230 | /* start the rings */ |
1258 | cayman_cp_start(rdev); | 1231 | cayman_cp_start(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccd..764249587f1 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -84,13 +84,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
84 | { | 84 | { |
85 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | 85 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
86 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; | 86 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; |
87 | int i; | ||
87 | 88 | ||
88 | /* Lock the graphics update lock */ | 89 | /* Lock the graphics update lock */ |
89 | /* update the scanout addresses */ | 90 | /* update the scanout addresses */ |
90 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | 91 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); |
91 | 92 | ||
92 | /* Wait for update_pending to go high. */ | 93 | /* Wait for update_pending to go high. */ |
93 | while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); | 94 | for (i = 0; i < rdev->usec_timeout; i++) { |
95 | if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) | ||
96 | break; | ||
97 | udelay(1); | ||
98 | } | ||
94 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | 99 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
95 | 100 | ||
96 | /* Unlock the lock, so double-buffering can take place inside vblank */ | 101 | /* Unlock the lock, so double-buffering can take place inside vblank */ |
@@ -434,6 +439,7 @@ void r100_hpd_init(struct radeon_device *rdev) | |||
434 | default: | 439 | default: |
435 | break; | 440 | break; |
436 | } | 441 | } |
442 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
437 | } | 443 | } |
438 | if (rdev->irq.installed) | 444 | if (rdev->irq.installed) |
439 | r100_irq_set(rdev); | 445 | r100_irq_set(rdev); |
@@ -721,11 +727,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
721 | int r100_copy_blit(struct radeon_device *rdev, | 727 | int r100_copy_blit(struct radeon_device *rdev, |
722 | uint64_t src_offset, | 728 | uint64_t src_offset, |
723 | uint64_t dst_offset, | 729 | uint64_t dst_offset, |
724 | unsigned num_pages, | 730 | unsigned num_gpu_pages, |
725 | struct radeon_fence *fence) | 731 | struct radeon_fence *fence) |
726 | { | 732 | { |
727 | uint32_t cur_pages; | 733 | uint32_t cur_pages; |
728 | uint32_t stride_bytes = PAGE_SIZE; | 734 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
729 | uint32_t pitch; | 735 | uint32_t pitch; |
730 | uint32_t stride_pixels; | 736 | uint32_t stride_pixels; |
731 | unsigned ndw; | 737 | unsigned ndw; |
@@ -737,7 +743,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
737 | /* radeon pitch is /64 */ | 743 | /* radeon pitch is /64 */ |
738 | pitch = stride_bytes / 64; | 744 | pitch = stride_bytes / 64; |
739 | stride_pixels = stride_bytes / 4; | 745 | stride_pixels = stride_bytes / 4; |
740 | num_loops = DIV_ROUND_UP(num_pages, 8191); | 746 | num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); |
741 | 747 | ||
742 | /* Ask for enough room for blit + flush + fence */ | 748 | /* Ask for enough room for blit + flush + fence */ |
743 | ndw = 64 + (10 * num_loops); | 749 | ndw = 64 + (10 * num_loops); |
@@ -746,12 +752,12 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
746 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | 752 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
747 | return -EINVAL; | 753 | return -EINVAL; |
748 | } | 754 | } |
749 | while (num_pages > 0) { | 755 | while (num_gpu_pages > 0) { |
750 | cur_pages = num_pages; | 756 | cur_pages = num_gpu_pages; |
751 | if (cur_pages > 8191) { | 757 | if (cur_pages > 8191) { |
752 | cur_pages = 8191; | 758 | cur_pages = 8191; |
753 | } | 759 | } |
754 | num_pages -= cur_pages; | 760 | num_gpu_pages -= cur_pages; |
755 | 761 | ||
756 | /* pages are in Y direction - height | 762 | /* pages are in Y direction - height |
757 | page width in X direction - width */ | 763 | page width in X direction - width */ |
@@ -773,8 +779,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 779 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
774 | radeon_ring_write(rdev, 0); | 780 | radeon_ring_write(rdev, 0); |
775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 781 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
776 | radeon_ring_write(rdev, num_pages); | 782 | radeon_ring_write(rdev, num_gpu_pages); |
777 | radeon_ring_write(rdev, num_pages); | 783 | radeon_ring_write(rdev, num_gpu_pages); |
778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | 784 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); |
779 | } | 785 | } |
780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 786 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
@@ -990,7 +996,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
990 | /* Force read & write ptr to 0 */ | 996 | /* Force read & write ptr to 0 */ |
991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); | 997 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 998 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
993 | WREG32(RADEON_CP_RB_WPTR, 0); | 999 | rdev->cp.wptr = 0; |
1000 | WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); | ||
994 | 1001 | ||
995 | /* set the wb address whether it's enabled or not */ | 1002 | /* set the wb address whether it's enabled or not */ |
996 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | 1003 | WREG32(R_00070C_CP_RB_RPTR_ADDR, |
@@ -1007,9 +1014,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1007 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1014 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1008 | udelay(10); | 1015 | udelay(10); |
1009 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 1016 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
1010 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | ||
1011 | /* protect against crazy HW on resume */ | ||
1012 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
1013 | /* Set cp mode to bus mastering & enable cp*/ | 1017 | /* Set cp mode to bus mastering & enable cp*/ |
1014 | WREG32(RADEON_CP_CSQ_MODE, | 1018 | WREG32(RADEON_CP_CSQ_MODE, |
1015 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1019 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
@@ -2065,6 +2069,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev) | |||
2065 | void r100_bm_disable(struct radeon_device *rdev) | 2069 | void r100_bm_disable(struct radeon_device *rdev) |
2066 | { | 2070 | { |
2067 | u32 tmp; | 2071 | u32 tmp; |
2072 | u16 tmp16; | ||
2068 | 2073 | ||
2069 | /* disable bus mastering */ | 2074 | /* disable bus mastering */ |
2070 | tmp = RREG32(R_000030_BUS_CNTL); | 2075 | tmp = RREG32(R_000030_BUS_CNTL); |
@@ -2075,8 +2080,8 @@ void r100_bm_disable(struct radeon_device *rdev) | |||
2075 | WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); | 2080 | WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); |
2076 | tmp = RREG32(RADEON_BUS_CNTL); | 2081 | tmp = RREG32(RADEON_BUS_CNTL); |
2077 | mdelay(1); | 2082 | mdelay(1); |
2078 | pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); | 2083 | pci_read_config_word(rdev->pdev, 0x4, &tmp16); |
2079 | pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); | 2084 | pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB); |
2080 | mdelay(1); | 2085 | mdelay(1); |
2081 | } | 2086 | } |
2082 | 2087 | ||
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f2405830041..a1f3ba063c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) | |||
84 | int r200_copy_dma(struct radeon_device *rdev, | 84 | int r200_copy_dma(struct radeon_device *rdev, |
85 | uint64_t src_offset, | 85 | uint64_t src_offset, |
86 | uint64_t dst_offset, | 86 | uint64_t dst_offset, |
87 | unsigned num_pages, | 87 | unsigned num_gpu_pages, |
88 | struct radeon_fence *fence) | 88 | struct radeon_fence *fence) |
89 | { | 89 | { |
90 | uint32_t size; | 90 | uint32_t size; |
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
93 | int r = 0; | 93 | int r = 0; |
94 | 94 | ||
95 | /* radeon pitch is /64 */ | 95 | /* radeon pitch is /64 */ |
96 | size = num_pages << PAGE_SHIFT; | 96 | size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; |
97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); | 97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); | 98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); |
99 | if (r) { | 99 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index bc54b26cb32..9b62a97742a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -762,13 +762,14 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
762 | struct drm_device *dev = rdev->ddev; | 762 | struct drm_device *dev = rdev->ddev; |
763 | struct drm_connector *connector; | 763 | struct drm_connector *connector; |
764 | 764 | ||
765 | if (ASIC_IS_DCE3(rdev)) { | 765 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
766 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | 766 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
767 | if (ASIC_IS_DCE32(rdev)) | 767 | |
768 | tmp |= DC_HPDx_EN; | 768 | if (ASIC_IS_DCE3(rdev)) { |
769 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | ||
770 | if (ASIC_IS_DCE32(rdev)) | ||
771 | tmp |= DC_HPDx_EN; | ||
769 | 772 | ||
770 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
771 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
772 | switch (radeon_connector->hpd.hpd) { | 773 | switch (radeon_connector->hpd.hpd) { |
773 | case RADEON_HPD_1: | 774 | case RADEON_HPD_1: |
774 | WREG32(DC_HPD1_CONTROL, tmp); | 775 | WREG32(DC_HPD1_CONTROL, tmp); |
@@ -798,10 +799,7 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
798 | default: | 799 | default: |
799 | break; | 800 | break; |
800 | } | 801 | } |
801 | } | 802 | } else { |
802 | } else { | ||
803 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
804 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
805 | switch (radeon_connector->hpd.hpd) { | 803 | switch (radeon_connector->hpd.hpd) { |
806 | case RADEON_HPD_1: | 804 | case RADEON_HPD_1: |
807 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); | 805 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); |
@@ -819,6 +817,7 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
819 | break; | 817 | break; |
820 | } | 818 | } |
821 | } | 819 | } |
820 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
822 | } | 821 | } |
823 | if (rdev->irq.installed) | 822 | if (rdev->irq.installed) |
824 | r600_irq_set(rdev); | 823 | r600_irq_set(rdev); |
@@ -1662,6 +1661,7 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1662 | R6XX_MAX_BACKENDS_MASK) >> 16)), | 1661 | R6XX_MAX_BACKENDS_MASK) >> 16)), |
1663 | (cc_rb_backend_disable >> 16)); | 1662 | (cc_rb_backend_disable >> 16)); |
1664 | rdev->config.r600.tile_config = tiling_config; | 1663 | rdev->config.r600.tile_config = tiling_config; |
1664 | rdev->config.r600.backend_map = backend_map; | ||
1665 | tiling_config |= BACKEND_MAP(backend_map); | 1665 | tiling_config |= BACKEND_MAP(backend_map); |
1666 | WREG32(GB_TILING_CONFIG, tiling_config); | 1666 | WREG32(GB_TILING_CONFIG, tiling_config); |
1667 | WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); | 1667 | WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); |
@@ -2208,13 +2208,11 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2208 | /* Initialize the ring buffer's read and write pointers */ | 2208 | /* Initialize the ring buffer's read and write pointers */ |
2209 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2209 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2210 | WREG32(CP_RB_RPTR_WR, 0); | 2210 | WREG32(CP_RB_RPTR_WR, 0); |
2211 | WREG32(CP_RB_WPTR, 0); | 2211 | rdev->cp.wptr = 0; |
2212 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
2212 | 2213 | ||
2213 | /* set the wb address whether it's enabled or not */ | 2214 | /* set the wb address whether it's enabled or not */ |
2214 | WREG32(CP_RB_RPTR_ADDR, | 2215 | WREG32(CP_RB_RPTR_ADDR, |
2215 | #ifdef __BIG_ENDIAN | ||
2216 | RB_RPTR_SWAP(2) | | ||
2217 | #endif | ||
2218 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | 2216 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); |
2219 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 2217 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
2220 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 2218 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
@@ -2233,7 +2231,6 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2233 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2231 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2234 | 2232 | ||
2235 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 2233 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
2236 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
2237 | 2234 | ||
2238 | r600_cp_start(rdev); | 2235 | r600_cp_start(rdev); |
2239 | rdev->cp.ready = true; | 2236 | rdev->cp.ready = true; |
@@ -2355,21 +2352,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
2355 | } | 2352 | } |
2356 | 2353 | ||
2357 | int r600_copy_blit(struct radeon_device *rdev, | 2354 | int r600_copy_blit(struct radeon_device *rdev, |
2358 | uint64_t src_offset, uint64_t dst_offset, | 2355 | uint64_t src_offset, |
2359 | unsigned num_pages, struct radeon_fence *fence) | 2356 | uint64_t dst_offset, |
2357 | unsigned num_gpu_pages, | ||
2358 | struct radeon_fence *fence) | ||
2360 | { | 2359 | { |
2361 | int r; | 2360 | int r; |
2362 | 2361 | ||
2363 | mutex_lock(&rdev->r600_blit.mutex); | 2362 | mutex_lock(&rdev->r600_blit.mutex); |
2364 | rdev->r600_blit.vb_ib = NULL; | 2363 | rdev->r600_blit.vb_ib = NULL; |
2365 | r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 2364 | r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2366 | if (r) { | 2365 | if (r) { |
2367 | if (rdev->r600_blit.vb_ib) | 2366 | if (rdev->r600_blit.vb_ib) |
2368 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 2367 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
2369 | mutex_unlock(&rdev->r600_blit.mutex); | 2368 | mutex_unlock(&rdev->r600_blit.mutex); |
2370 | return r; | 2369 | return r; |
2371 | } | 2370 | } |
2372 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 2371 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2373 | r600_blit_done_copy(rdev, fence); | 2372 | r600_blit_done_copy(rdev, fence); |
2374 | mutex_unlock(&rdev->r600_blit.mutex); | 2373 | mutex_unlock(&rdev->r600_blit.mutex); |
2375 | return 0; | 2374 | return 0; |
@@ -2994,10 +2993,6 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2994 | /* RPTR_REARM only works if msi's are enabled */ | 2993 | /* RPTR_REARM only works if msi's are enabled */ |
2995 | if (rdev->msi_enabled) | 2994 | if (rdev->msi_enabled) |
2996 | ih_cntl |= RPTR_REARM; | 2995 | ih_cntl |= RPTR_REARM; |
2997 | |||
2998 | #ifdef __BIG_ENDIAN | ||
2999 | ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT); | ||
3000 | #endif | ||
3001 | WREG32(IH_CNTL, ih_cntl); | 2996 | WREG32(IH_CNTL, ih_cntl); |
3002 | 2997 | ||
3003 | /* force the active interrupt state to all disabled */ | 2998 | /* force the active interrupt state to all disabled */ |
@@ -3308,6 +3303,10 @@ int r600_irq_process(struct radeon_device *rdev) | |||
3308 | if (!rdev->ih.enabled || rdev->shutdown) | 3303 | if (!rdev->ih.enabled || rdev->shutdown) |
3309 | return IRQ_NONE; | 3304 | return IRQ_NONE; |
3310 | 3305 | ||
3306 | /* No MSIs, need a dummy read to flush PCI DMAs */ | ||
3307 | if (!rdev->msi_enabled) | ||
3308 | RREG32(IH_RB_WPTR); | ||
3309 | |||
3311 | wptr = r600_get_ih_wptr(rdev); | 3310 | wptr = r600_get_ih_wptr(rdev); |
3312 | rptr = rdev->ih.rptr; | 3311 | rptr = rdev->ih.rptr; |
3313 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 3312 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
@@ -3320,6 +3319,9 @@ int r600_irq_process(struct radeon_device *rdev) | |||
3320 | } | 3319 | } |
3321 | 3320 | ||
3322 | restart_ih: | 3321 | restart_ih: |
3322 | /* Order reading of wptr vs. reading of IH ring data */ | ||
3323 | rmb(); | ||
3324 | |||
3323 | /* display interrupts */ | 3325 | /* display interrupts */ |
3324 | r600_irq_ack(rdev); | 3326 | r600_irq_ack(rdev); |
3325 | 3327 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index c3ab959bdc7..45fd592f960 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -1802,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1802 | /* Set ring buffer size */ | 1802 | /* Set ring buffer size */ |
1803 | #ifdef __BIG_ENDIAN | 1803 | #ifdef __BIG_ENDIAN |
1804 | RADEON_WRITE(R600_CP_RB_CNTL, | 1804 | RADEON_WRITE(R600_CP_RB_CNTL, |
1805 | RADEON_BUF_SWAP_32BIT | | 1805 | R600_BUF_SWAP_32BIT | |
1806 | RADEON_RB_NO_UPDATE | | 1806 | R600_RB_NO_UPDATE | |
1807 | (dev_priv->ring.rptr_update_l2qw << 8) | | 1807 | (dev_priv->ring.rptr_update_l2qw << 8) | |
1808 | dev_priv->ring.size_l2qw); | 1808 | dev_priv->ring.size_l2qw); |
1809 | #else | 1809 | #else |
@@ -1820,15 +1820,15 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1820 | 1820 | ||
1821 | #ifdef __BIG_ENDIAN | 1821 | #ifdef __BIG_ENDIAN |
1822 | RADEON_WRITE(R600_CP_RB_CNTL, | 1822 | RADEON_WRITE(R600_CP_RB_CNTL, |
1823 | RADEON_BUF_SWAP_32BIT | | 1823 | R600_BUF_SWAP_32BIT | |
1824 | RADEON_RB_NO_UPDATE | | 1824 | R600_RB_NO_UPDATE | |
1825 | RADEON_RB_RPTR_WR_ENA | | 1825 | R600_RB_RPTR_WR_ENA | |
1826 | (dev_priv->ring.rptr_update_l2qw << 8) | | 1826 | (dev_priv->ring.rptr_update_l2qw << 8) | |
1827 | dev_priv->ring.size_l2qw); | 1827 | dev_priv->ring.size_l2qw); |
1828 | #else | 1828 | #else |
1829 | RADEON_WRITE(R600_CP_RB_CNTL, | 1829 | RADEON_WRITE(R600_CP_RB_CNTL, |
1830 | RADEON_RB_NO_UPDATE | | 1830 | R600_RB_NO_UPDATE | |
1831 | RADEON_RB_RPTR_WR_ENA | | 1831 | R600_RB_RPTR_WR_ENA | |
1832 | (dev_priv->ring.rptr_update_l2qw << 8) | | 1832 | (dev_priv->ring.rptr_update_l2qw << 8) | |
1833 | dev_priv->ring.size_l2qw); | 1833 | dev_priv->ring.size_l2qw); |
1834 | #endif | 1834 | #endif |
@@ -1851,13 +1851,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1851 | - ((unsigned long) dev->sg->virtual) | 1851 | - ((unsigned long) dev->sg->virtual) |
1852 | + dev_priv->gart_vm_start; | 1852 | + dev_priv->gart_vm_start; |
1853 | } | 1853 | } |
1854 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, | 1854 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc)); |
1855 | #ifdef __BIG_ENDIAN | 1855 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr)); |
1856 | (2 << 0) | | ||
1857 | #endif | ||
1858 | (rptr_addr & 0xfffffffc)); | ||
1859 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, | ||
1860 | upper_32_bits(rptr_addr)); | ||
1861 | 1856 | ||
1862 | #ifdef __BIG_ENDIAN | 1857 | #ifdef __BIG_ENDIAN |
1863 | RADEON_WRITE(R600_CP_RB_CNTL, | 1858 | RADEON_WRITE(R600_CP_RB_CNTL, |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 909bda8dd55..cf83aa05a68 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
915 | { | 915 | { |
916 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; | 916 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; |
917 | struct radeon_cs_reloc *reloc; | 917 | struct radeon_cs_reloc *reloc; |
918 | u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm); | ||
919 | u32 m, i, tmp, *ib; | 918 | u32 m, i, tmp, *ib; |
920 | int r; | 919 | int r; |
921 | 920 | ||
922 | i = (reg >> 7); | 921 | i = (reg >> 7); |
923 | if (i > last_reg) { | 922 | if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { |
924 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 923 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
925 | return -EINVAL; | 924 | return -EINVAL; |
926 | } | 925 | } |
@@ -1200,6 +1199,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
1200 | } | 1199 | } |
1201 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1200 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1202 | break; | 1201 | break; |
1202 | case SX_MEMORY_EXPORT_BASE: | ||
1203 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1204 | if (r) { | ||
1205 | dev_warn(p->dev, "bad SET_CONFIG_REG " | ||
1206 | "0x%04X\n", reg); | ||
1207 | return -EINVAL; | ||
1208 | } | ||
1209 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1210 | break; | ||
1203 | default: | 1211 | default: |
1204 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 1212 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
1205 | return -EINVAL; | 1213 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f5ac7e788d8..c45d92191fd 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe( | |||
196 | frame[0xD] = (right_bar >> 8); | 196 | frame[0xD] = (right_bar >> 8); |
197 | 197 | ||
198 | r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); | 198 | r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); |
199 | /* Our header values (type, version, length) should be alright, Intel | ||
200 | * is using the same. Checksum function also seems to be OK, it works | ||
201 | * fine for audio infoframe. However calculated value is always lower | ||
202 | * by 2 in comparison to fglrx. It breaks displaying anything in case | ||
203 | * of TVs that strictly check the checksum. Hack it manually here to | ||
204 | * workaround this issue. */ | ||
205 | frame[0x0] += 2; | ||
199 | 206 | ||
200 | WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, | 207 | WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, |
201 | frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); | 208 | frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ef0e0e01691..184628c7e02 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -60,7 +60,7 @@ | |||
60 | * are considered as fatal) | 60 | * are considered as fatal) |
61 | */ | 61 | */ |
62 | 62 | ||
63 | #include <asm/atomic.h> | 63 | #include <linux/atomic.h> |
64 | #include <linux/wait.h> | 64 | #include <linux/wait.h> |
65 | #include <linux/list.h> | 65 | #include <linux/list.h> |
66 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
@@ -93,6 +93,7 @@ extern int radeon_audio; | |||
93 | extern int radeon_disp_priority; | 93 | extern int radeon_disp_priority; |
94 | extern int radeon_hw_i2c; | 94 | extern int radeon_hw_i2c; |
95 | extern int radeon_pcie_gen2; | 95 | extern int radeon_pcie_gen2; |
96 | extern int radeon_msi; | ||
96 | 97 | ||
97 | /* | 98 | /* |
98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 99 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -322,6 +323,7 @@ union radeon_gart_table { | |||
322 | 323 | ||
323 | #define RADEON_GPU_PAGE_SIZE 4096 | 324 | #define RADEON_GPU_PAGE_SIZE 4096 |
324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) | 325 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) |
326 | #define RADEON_GPU_PAGE_SHIFT 12 | ||
325 | 327 | ||
326 | struct radeon_gart { | 328 | struct radeon_gart { |
327 | dma_addr_t table_addr; | 329 | dma_addr_t table_addr; |
@@ -914,17 +916,17 @@ struct radeon_asic { | |||
914 | int (*copy_blit)(struct radeon_device *rdev, | 916 | int (*copy_blit)(struct radeon_device *rdev, |
915 | uint64_t src_offset, | 917 | uint64_t src_offset, |
916 | uint64_t dst_offset, | 918 | uint64_t dst_offset, |
917 | unsigned num_pages, | 919 | unsigned num_gpu_pages, |
918 | struct radeon_fence *fence); | 920 | struct radeon_fence *fence); |
919 | int (*copy_dma)(struct radeon_device *rdev, | 921 | int (*copy_dma)(struct radeon_device *rdev, |
920 | uint64_t src_offset, | 922 | uint64_t src_offset, |
921 | uint64_t dst_offset, | 923 | uint64_t dst_offset, |
922 | unsigned num_pages, | 924 | unsigned num_gpu_pages, |
923 | struct radeon_fence *fence); | 925 | struct radeon_fence *fence); |
924 | int (*copy)(struct radeon_device *rdev, | 926 | int (*copy)(struct radeon_device *rdev, |
925 | uint64_t src_offset, | 927 | uint64_t src_offset, |
926 | uint64_t dst_offset, | 928 | uint64_t dst_offset, |
927 | unsigned num_pages, | 929 | unsigned num_gpu_pages, |
928 | struct radeon_fence *fence); | 930 | struct radeon_fence *fence); |
929 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | 931 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); |
930 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | 932 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); |
@@ -1003,6 +1005,7 @@ struct r600_asic { | |||
1003 | unsigned tiling_npipes; | 1005 | unsigned tiling_npipes; |
1004 | unsigned tiling_group_size; | 1006 | unsigned tiling_group_size; |
1005 | unsigned tile_config; | 1007 | unsigned tile_config; |
1008 | unsigned backend_map; | ||
1006 | struct r100_gpu_lockup lockup; | 1009 | struct r100_gpu_lockup lockup; |
1007 | }; | 1010 | }; |
1008 | 1011 | ||
@@ -1028,6 +1031,7 @@ struct rv770_asic { | |||
1028 | unsigned tiling_npipes; | 1031 | unsigned tiling_npipes; |
1029 | unsigned tiling_group_size; | 1032 | unsigned tiling_group_size; |
1030 | unsigned tile_config; | 1033 | unsigned tile_config; |
1034 | unsigned backend_map; | ||
1031 | struct r100_gpu_lockup lockup; | 1035 | struct r100_gpu_lockup lockup; |
1032 | }; | 1036 | }; |
1033 | 1037 | ||
@@ -1054,6 +1058,7 @@ struct evergreen_asic { | |||
1054 | unsigned tiling_npipes; | 1058 | unsigned tiling_npipes; |
1055 | unsigned tiling_group_size; | 1059 | unsigned tiling_group_size; |
1056 | unsigned tile_config; | 1060 | unsigned tile_config; |
1061 | unsigned backend_map; | ||
1057 | struct r100_gpu_lockup lockup; | 1062 | struct r100_gpu_lockup lockup; |
1058 | }; | 1063 | }; |
1059 | 1064 | ||
@@ -1174,7 +1179,7 @@ struct radeon_device { | |||
1174 | /* Register mmio */ | 1179 | /* Register mmio */ |
1175 | resource_size_t rmmio_base; | 1180 | resource_size_t rmmio_base; |
1176 | resource_size_t rmmio_size; | 1181 | resource_size_t rmmio_size; |
1177 | void *rmmio; | 1182 | void __iomem *rmmio; |
1178 | radeon_rreg_t mc_rreg; | 1183 | radeon_rreg_t mc_rreg; |
1179 | radeon_wreg_t mc_wreg; | 1184 | radeon_wreg_t mc_wreg; |
1180 | radeon_rreg_t pll_rreg; | 1185 | radeon_rreg_t pll_rreg; |
@@ -1251,20 +1256,20 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | |||
1251 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 1256 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
1252 | { | 1257 | { |
1253 | if (reg < rdev->rmmio_size) | 1258 | if (reg < rdev->rmmio_size) |
1254 | return readl(((void __iomem *)rdev->rmmio) + reg); | 1259 | return readl((rdev->rmmio) + reg); |
1255 | else { | 1260 | else { |
1256 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 1261 | writel(reg, (rdev->rmmio) + RADEON_MM_INDEX); |
1257 | return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | 1262 | return readl((rdev->rmmio) + RADEON_MM_DATA); |
1258 | } | 1263 | } |
1259 | } | 1264 | } |
1260 | 1265 | ||
1261 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 1266 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
1262 | { | 1267 | { |
1263 | if (reg < rdev->rmmio_size) | 1268 | if (reg < rdev->rmmio_size) |
1264 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | 1269 | writel(v, (rdev->rmmio) + reg); |
1265 | else { | 1270 | else { |
1266 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 1271 | writel(reg, (rdev->rmmio) + RADEON_MM_INDEX); |
1267 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | 1272 | writel(v, (rdev->rmmio) + RADEON_MM_DATA); |
1268 | } | 1273 | } |
1269 | } | 1274 | } |
1270 | 1275 | ||
@@ -1296,10 +1301,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1296 | /* | 1301 | /* |
1297 | * Registers read & write functions. | 1302 | * Registers read & write functions. |
1298 | */ | 1303 | */ |
1299 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) | 1304 | #define RREG8(reg) readb((rdev->rmmio) + (reg)) |
1300 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) | 1305 | #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) |
1301 | #define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) | 1306 | #define RREG16(reg) readw((rdev->rmmio) + (reg)) |
1302 | #define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) | 1307 | #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) |
1303 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) | 1308 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
1304 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) | 1309 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) |
1305 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) | 1310 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index b2449629537..df8218bb83a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = { | |||
625 | .fence_ring_emit = &r600_fence_ring_emit, | 625 | .fence_ring_emit = &r600_fence_ring_emit, |
626 | .cs_parse = &r600_cs_parse, | 626 | .cs_parse = &r600_cs_parse, |
627 | .copy_blit = &r600_copy_blit, | 627 | .copy_blit = &r600_copy_blit, |
628 | .copy_dma = &r600_copy_blit, | 628 | .copy_dma = NULL, |
629 | .copy = &r600_copy_blit, | 629 | .copy = &r600_copy_blit, |
630 | .get_engine_clock = &radeon_atom_get_engine_clock, | 630 | .get_engine_clock = &radeon_atom_get_engine_clock, |
631 | .set_engine_clock = &radeon_atom_set_engine_clock, | 631 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = { | |||
672 | .fence_ring_emit = &r600_fence_ring_emit, | 672 | .fence_ring_emit = &r600_fence_ring_emit, |
673 | .cs_parse = &r600_cs_parse, | 673 | .cs_parse = &r600_cs_parse, |
674 | .copy_blit = &r600_copy_blit, | 674 | .copy_blit = &r600_copy_blit, |
675 | .copy_dma = &r600_copy_blit, | 675 | .copy_dma = NULL, |
676 | .copy = &r600_copy_blit, | 676 | .copy = &r600_copy_blit, |
677 | .get_engine_clock = &radeon_atom_get_engine_clock, | 677 | .get_engine_clock = &radeon_atom_get_engine_clock, |
678 | .set_engine_clock = &radeon_atom_set_engine_clock, | 678 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = { | |||
719 | .fence_ring_emit = &r600_fence_ring_emit, | 719 | .fence_ring_emit = &r600_fence_ring_emit, |
720 | .cs_parse = &r600_cs_parse, | 720 | .cs_parse = &r600_cs_parse, |
721 | .copy_blit = &r600_copy_blit, | 721 | .copy_blit = &r600_copy_blit, |
722 | .copy_dma = &r600_copy_blit, | 722 | .copy_dma = NULL, |
723 | .copy = &r600_copy_blit, | 723 | .copy = &r600_copy_blit, |
724 | .get_engine_clock = &radeon_atom_get_engine_clock, | 724 | .get_engine_clock = &radeon_atom_get_engine_clock, |
725 | .set_engine_clock = &radeon_atom_set_engine_clock, | 725 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = { | |||
766 | .fence_ring_emit = &r600_fence_ring_emit, | 766 | .fence_ring_emit = &r600_fence_ring_emit, |
767 | .cs_parse = &evergreen_cs_parse, | 767 | .cs_parse = &evergreen_cs_parse, |
768 | .copy_blit = &evergreen_copy_blit, | 768 | .copy_blit = &evergreen_copy_blit, |
769 | .copy_dma = &evergreen_copy_blit, | 769 | .copy_dma = NULL, |
770 | .copy = &evergreen_copy_blit, | 770 | .copy = &evergreen_copy_blit, |
771 | .get_engine_clock = &radeon_atom_get_engine_clock, | 771 | .get_engine_clock = &radeon_atom_get_engine_clock, |
772 | .set_engine_clock = &radeon_atom_set_engine_clock, | 772 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = { | |||
813 | .fence_ring_emit = &r600_fence_ring_emit, | 813 | .fence_ring_emit = &r600_fence_ring_emit, |
814 | .cs_parse = &evergreen_cs_parse, | 814 | .cs_parse = &evergreen_cs_parse, |
815 | .copy_blit = &evergreen_copy_blit, | 815 | .copy_blit = &evergreen_copy_blit, |
816 | .copy_dma = &evergreen_copy_blit, | 816 | .copy_dma = NULL, |
817 | .copy = &evergreen_copy_blit, | 817 | .copy = &evergreen_copy_blit, |
818 | .get_engine_clock = &radeon_atom_get_engine_clock, | 818 | .get_engine_clock = &radeon_atom_get_engine_clock, |
819 | .set_engine_clock = &radeon_atom_set_engine_clock, | 819 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = { | |||
860 | .fence_ring_emit = &r600_fence_ring_emit, | 860 | .fence_ring_emit = &r600_fence_ring_emit, |
861 | .cs_parse = &evergreen_cs_parse, | 861 | .cs_parse = &evergreen_cs_parse, |
862 | .copy_blit = &evergreen_copy_blit, | 862 | .copy_blit = &evergreen_copy_blit, |
863 | .copy_dma = &evergreen_copy_blit, | 863 | .copy_dma = NULL, |
864 | .copy = &evergreen_copy_blit, | 864 | .copy = &evergreen_copy_blit, |
865 | .get_engine_clock = &radeon_atom_get_engine_clock, | 865 | .get_engine_clock = &radeon_atom_get_engine_clock, |
866 | .set_engine_clock = &radeon_atom_set_engine_clock, | 866 | .set_engine_clock = &radeon_atom_set_engine_clock, |
@@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = { | |||
907 | .fence_ring_emit = &r600_fence_ring_emit, | 907 | .fence_ring_emit = &r600_fence_ring_emit, |
908 | .cs_parse = &evergreen_cs_parse, | 908 | .cs_parse = &evergreen_cs_parse, |
909 | .copy_blit = &evergreen_copy_blit, | 909 | .copy_blit = &evergreen_copy_blit, |
910 | .copy_dma = &evergreen_copy_blit, | 910 | .copy_dma = NULL, |
911 | .copy = &evergreen_copy_blit, | 911 | .copy = &evergreen_copy_blit, |
912 | .get_engine_clock = &radeon_atom_get_engine_clock, | 912 | .get_engine_clock = &radeon_atom_get_engine_clock, |
913 | .set_engine_clock = &radeon_atom_set_engine_clock, | 913 | .set_engine_clock = &radeon_atom_set_engine_clock, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9..3dedaa07aac 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); | |||
75 | int r100_copy_blit(struct radeon_device *rdev, | 75 | int r100_copy_blit(struct radeon_device *rdev, |
76 | uint64_t src_offset, | 76 | uint64_t src_offset, |
77 | uint64_t dst_offset, | 77 | uint64_t dst_offset, |
78 | unsigned num_pages, | 78 | unsigned num_gpu_pages, |
79 | struct radeon_fence *fence); | 79 | struct radeon_fence *fence); |
80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
81 | uint32_t tiling_flags, uint32_t pitch, | 81 | uint32_t tiling_flags, uint32_t pitch, |
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
143 | extern int r200_copy_dma(struct radeon_device *rdev, | 143 | extern int r200_copy_dma(struct radeon_device *rdev, |
144 | uint64_t src_offset, | 144 | uint64_t src_offset, |
145 | uint64_t dst_offset, | 145 | uint64_t dst_offset, |
146 | unsigned num_pages, | 146 | unsigned num_gpu_pages, |
147 | struct radeon_fence *fence); | 147 | struct radeon_fence *fence); |
148 | void r200_set_safe_registers(struct radeon_device *rdev); | 148 | void r200_set_safe_registers(struct radeon_device *rdev); |
149 | 149 | ||
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |||
311 | int r600_ring_test(struct radeon_device *rdev); | 311 | int r600_ring_test(struct radeon_device *rdev); |
312 | int r600_copy_blit(struct radeon_device *rdev, | 312 | int r600_copy_blit(struct radeon_device *rdev, |
313 | uint64_t src_offset, uint64_t dst_offset, | 313 | uint64_t src_offset, uint64_t dst_offset, |
314 | unsigned num_pages, struct radeon_fence *fence); | 314 | unsigned num_gpu_pages, struct radeon_fence *fence); |
315 | void r600_hpd_init(struct radeon_device *rdev); | 315 | void r600_hpd_init(struct radeon_device *rdev); |
316 | void r600_hpd_fini(struct radeon_device *rdev); | 316 | void r600_hpd_fini(struct radeon_device *rdev); |
317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); | |||
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
404 | int evergreen_copy_blit(struct radeon_device *rdev, | 404 | int evergreen_copy_blit(struct radeon_device *rdev, |
405 | uint64_t src_offset, uint64_t dst_offset, | 405 | uint64_t src_offset, uint64_t dst_offset, |
406 | unsigned num_pages, struct radeon_fence *fence); | 406 | unsigned num_gpu_pages, struct radeon_fence *fence); |
407 | void evergreen_hpd_init(struct radeon_device *rdev); | 407 | void evergreen_hpd_init(struct radeon_device *rdev); |
408 | void evergreen_hpd_fini(struct radeon_device *rdev); | 408 | void evergreen_hpd_fini(struct radeon_device *rdev); |
409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bf2b61584cd..a098edcf662 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -85,6 +85,18 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
85 | for (i = 0; i < num_indices; i++) { | 85 | for (i = 0; i < num_indices; i++) { |
86 | gpio = &i2c_info->asGPIO_Info[i]; | 86 | gpio = &i2c_info->asGPIO_Info[i]; |
87 | 87 | ||
88 | /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ | ||
89 | if ((rdev->family == CHIP_R420) || | ||
90 | (rdev->family == CHIP_R423) || | ||
91 | (rdev->family == CHIP_RV410)) { | ||
92 | if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || | ||
93 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || | ||
94 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { | ||
95 | gpio->ucClkMaskShift = 0x19; | ||
96 | gpio->ucDataMaskShift = 0x18; | ||
97 | } | ||
98 | } | ||
99 | |||
88 | /* some evergreen boards have bad data for this entry */ | 100 | /* some evergreen boards have bad data for this entry */ |
89 | if (ASIC_IS_DCE4(rdev)) { | 101 | if (ASIC_IS_DCE4(rdev)) { |
90 | if ((i == 7) && | 102 | if ((i == 7) && |
@@ -169,6 +181,18 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
169 | gpio = &i2c_info->asGPIO_Info[i]; | 181 | gpio = &i2c_info->asGPIO_Info[i]; |
170 | i2c.valid = false; | 182 | i2c.valid = false; |
171 | 183 | ||
184 | /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ | ||
185 | if ((rdev->family == CHIP_R420) || | ||
186 | (rdev->family == CHIP_R423) || | ||
187 | (rdev->family == CHIP_RV410)) { | ||
188 | if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || | ||
189 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || | ||
190 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { | ||
191 | gpio->ucClkMaskShift = 0x19; | ||
192 | gpio->ucDataMaskShift = 0x18; | ||
193 | } | ||
194 | } | ||
195 | |||
172 | /* some evergreen boards have bad data for this entry */ | 196 | /* some evergreen boards have bad data for this entry */ |
173 | if (ASIC_IS_DCE4(rdev)) { | 197 | if (ASIC_IS_DCE4(rdev)) { |
174 | if ((i == 7) && | 198 | if ((i == 7) && |
@@ -2544,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
2544 | 2568 | ||
2545 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 2569 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
2546 | rdev->pm.current_clock_mode_index = 0; | 2570 | rdev->pm.current_clock_mode_index = 0; |
2547 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 2571 | if (rdev->pm.default_power_state_index >= 0) |
2572 | rdev->pm.current_vddc = | ||
2573 | rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | ||
2574 | else | ||
2575 | rdev->pm.current_vddc = 0; | ||
2548 | } | 2576 | } |
2549 | 2577 | ||
2550 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) | 2578 | void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 2d48e7a1474..b6e18c8db9f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | |||
96 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device | 96 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device |
97 | * tree. Hopefully, ATI OF driver is kind enough to fill these | 97 | * tree. Hopefully, ATI OF driver is kind enough to fill these |
98 | */ | 98 | */ |
99 | static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | 99 | static bool radeon_read_clocks_OF(struct drm_device *dev) |
100 | { | 100 | { |
101 | struct radeon_device *rdev = dev->dev_private; | 101 | struct radeon_device *rdev = dev->dev_private; |
102 | struct device_node *dp = rdev->pdev->dev.of_node; | 102 | struct device_node *dp = rdev->pdev->dev.of_node; |
@@ -166,7 +166,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | |||
166 | return true; | 166 | return true; |
167 | } | 167 | } |
168 | #else | 168 | #else |
169 | static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) | 169 | static bool radeon_read_clocks_OF(struct drm_device *dev) |
170 | { | 170 | { |
171 | return false; | 171 | return false; |
172 | } | 172 | } |
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
219 | } else { | 219 | } else { |
220 | DRM_INFO("Using generic clock info\n"); | 220 | DRM_INFO("Using generic clock info\n"); |
221 | 221 | ||
222 | /* may need to be per card */ | ||
223 | rdev->clock.max_pixel_clock = 35000; | ||
224 | |||
222 | if (rdev->flags & RADEON_IS_IGP) { | 225 | if (rdev->flags & RADEON_IS_IGP) { |
223 | p1pll->reference_freq = 1432; | 226 | p1pll->reference_freq = 1432; |
224 | p2pll->reference_freq = 1432; | 227 | p2pll->reference_freq = 1432; |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e4594676a07..8bf83c4b414 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -620,8 +620,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde | |||
620 | i2c.y_data_mask = 0x80; | 620 | i2c.y_data_mask = 0x80; |
621 | } else { | 621 | } else { |
622 | /* default masks for ddc pads */ | 622 | /* default masks for ddc pads */ |
623 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 623 | i2c.mask_clk_mask = RADEON_GPIO_MASK_1; |
624 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 624 | i2c.mask_data_mask = RADEON_GPIO_MASK_0; |
625 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 625 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
626 | i2c.a_data_mask = RADEON_GPIO_A_0; | 626 | i2c.a_data_mask = RADEON_GPIO_A_0; |
627 | i2c.en_clk_mask = RADEON_GPIO_EN_1; | 627 | i2c.en_clk_mask = RADEON_GPIO_EN_1; |
@@ -779,7 +779,8 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) | |||
779 | } | 779 | } |
780 | } | 780 | } |
781 | } | 781 | } |
782 | } else if (rdev->family >= CHIP_R200) { | 782 | } else if ((rdev->family == CHIP_R200) || |
783 | (rdev->family >= CHIP_R300)) { | ||
783 | /* 0x68 */ | 784 | /* 0x68 */ |
784 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | 785 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); |
785 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); | 786 | rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); |
@@ -2556,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2556 | u16 offset, misc, misc2 = 0; | 2557 | u16 offset, misc, misc2 = 0; |
2557 | u8 rev, blocks, tmp; | 2558 | u8 rev, blocks, tmp; |
2558 | int state_index = 0; | 2559 | int state_index = 0; |
2560 | struct radeon_i2c_bus_rec i2c_bus; | ||
2559 | 2561 | ||
2560 | rdev->pm.default_power_state_index = -1; | 2562 | rdev->pm.default_power_state_index = -1; |
2561 | 2563 | ||
@@ -2574,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2574 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); | 2576 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); |
2575 | if (offset) { | 2577 | if (offset) { |
2576 | u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; | 2578 | u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; |
2577 | struct radeon_i2c_bus_rec i2c_bus; | ||
2578 | 2579 | ||
2579 | rev = RBIOS8(offset); | 2580 | rev = RBIOS8(offset); |
2580 | 2581 | ||
@@ -2616,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2616 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | 2617 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); |
2617 | } | 2618 | } |
2618 | } | 2619 | } |
2620 | } else { | ||
2621 | /* boards with a thermal chip, but no overdrive table */ | ||
2622 | |||
2623 | /* Asus 9600xt has an f75375 on the monid bus */ | ||
2624 | if ((dev->pdev->device == 0x4152) && | ||
2625 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
2626 | (dev->pdev->subsystem_device == 0xc002)) { | ||
2627 | i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | ||
2628 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | ||
2629 | if (rdev->pm.i2c_bus) { | ||
2630 | struct i2c_board_info info = { }; | ||
2631 | const char *name = "f75375"; | ||
2632 | info.addr = 0x28; | ||
2633 | strlcpy(info.type, name, sizeof(info.type)); | ||
2634 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2635 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | ||
2636 | name, info.addr); | ||
2637 | } | ||
2638 | } | ||
2619 | } | 2639 | } |
2620 | 2640 | ||
2621 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2641 | if (rdev->flags & RADEON_IS_MOBILITY) { |
@@ -3278,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
3278 | rdev->pdev->subsystem_device == 0x30a4) | 3298 | rdev->pdev->subsystem_device == 0x30a4) |
3279 | return; | 3299 | return; |
3280 | 3300 | ||
3301 | /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume | ||
3302 | * - it hangs on resume inside the dynclk 1 table. | ||
3303 | */ | ||
3304 | if (rdev->family == CHIP_RS480 && | ||
3305 | rdev->pdev->subsystem_vendor == 0x103c && | ||
3306 | rdev->pdev->subsystem_device == 0x30ae) | ||
3307 | return; | ||
3308 | |||
3281 | /* DYN CLK 1 */ | 3309 | /* DYN CLK 1 */ |
3282 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3310 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
3283 | if (table) | 3311 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 9792d4ffdc8..a58b4522404 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -44,8 +44,6 @@ extern void | |||
44 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | 44 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, |
45 | struct drm_connector *drm_connector); | 45 | struct drm_connector *drm_connector); |
46 | 46 | ||
47 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | ||
48 | |||
49 | void radeon_connector_hotplug(struct drm_connector *connector) | 47 | void radeon_connector_hotplug(struct drm_connector *connector) |
50 | { | 48 | { |
51 | struct drm_device *dev = connector->dev; | 49 | struct drm_device *dev = connector->dev; |
@@ -60,18 +58,20 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
60 | 58 | ||
61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 59 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
62 | 60 | ||
63 | /* powering up/down the eDP panel generates hpd events which | 61 | /* if the connector is already off, don't turn it back on */ |
64 | * can interfere with modesetting. | 62 | if (connector->dpms != DRM_MODE_DPMS_ON) |
65 | */ | ||
66 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
67 | return; | 63 | return; |
68 | 64 | ||
69 | /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ | 65 | /* just deal with DP (not eDP) here. */ |
70 | if (rdev->family >= CHIP_R600) { | 66 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | 67 | int saved_dpms = connector->dpms; |
72 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 68 | |
73 | else | 69 | /* Only turn off the display it it's physically disconnected */ |
70 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
74 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 71 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
72 | else if (radeon_dp_needs_link_train(radeon_connector)) | ||
73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
74 | connector->dpms = saved_dpms; | ||
75 | } | 75 | } |
76 | } | 76 | } |
77 | 77 | ||
@@ -430,16 +430,73 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
430 | return 0; | 430 | return 0; |
431 | } | 431 | } |
432 | 432 | ||
433 | /* | ||
434 | * Some integrated ATI Radeon chipset implementations (e. g. | ||
435 | * Asus M2A-VM HDMI) may indicate the availability of a DDC, | ||
436 | * even when there's no monitor connected. For these connectors | ||
437 | * following DDC probe extension will be applied: check also for the | ||
438 | * availability of EDID with at least a correct EDID header. Only then, | ||
439 | * DDC is assumed to be available. This prevents drm_get_edid() and | ||
440 | * drm_edid_block_valid() from periodically dumping data and kernel | ||
441 | * errors into the logs and onto the terminal. | ||
442 | */ | ||
443 | static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, | ||
444 | uint32_t supported_device, | ||
445 | int connector_type) | ||
446 | { | ||
447 | /* Asus M2A-VM HDMI board sends data to i2c bus even, | ||
448 | * if HDMI add-on card is not plugged in or HDMI is disabled in | ||
449 | * BIOS. Valid DDC can only be assumed, if also a valid EDID header | ||
450 | * can be retrieved via i2c bus during DDC probe */ | ||
451 | if ((dev->pdev->device == 0x791e) && | ||
452 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
453 | (dev->pdev->subsystem_device == 0x826d)) { | ||
454 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
455 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
456 | return true; | ||
457 | } | ||
458 | /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus | ||
459 | * for a DVI connector that is not implemented */ | ||
460 | if ((dev->pdev->device == 0x796e) && | ||
461 | (dev->pdev->subsystem_vendor == 0x1019) && | ||
462 | (dev->pdev->subsystem_device == 0x2615)) { | ||
463 | if ((connector_type == DRM_MODE_CONNECTOR_DVID) && | ||
464 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
465 | return true; | ||
466 | } | ||
467 | /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 | ||
468 | * (RS690M) sends data to i2c bus for a HDMI connector that | ||
469 | * is not implemented */ | ||
470 | if ((dev->pdev->device == 0x791f) && | ||
471 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
472 | (dev->pdev->subsystem_device == 0xff68)) { | ||
473 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
474 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
475 | return true; | ||
476 | } | ||
477 | |||
478 | /* Default: no EDID header probe required for DDC probing */ | ||
479 | return false; | ||
480 | } | ||
481 | |||
433 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | 482 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, |
434 | struct drm_connector *connector) | 483 | struct drm_connector *connector) |
435 | { | 484 | { |
436 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
437 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | 486 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
487 | struct drm_display_mode *t, *mode; | ||
488 | |||
489 | /* If the EDID preferred mode doesn't match the native mode, use it */ | ||
490 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | ||
491 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | ||
492 | if (mode->hdisplay != native_mode->hdisplay || | ||
493 | mode->vdisplay != native_mode->vdisplay) | ||
494 | memcpy(native_mode, mode, sizeof(*mode)); | ||
495 | } | ||
496 | } | ||
438 | 497 | ||
439 | /* Try to get native mode details from EDID if necessary */ | 498 | /* Try to get native mode details from EDID if necessary */ |
440 | if (!native_mode->clock) { | 499 | if (!native_mode->clock) { |
441 | struct drm_display_mode *t, *mode; | ||
442 | |||
443 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { | 500 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { |
444 | if (mode->hdisplay == native_mode->hdisplay && | 501 | if (mode->hdisplay == native_mode->hdisplay && |
445 | mode->vdisplay == native_mode->vdisplay) { | 502 | mode->vdisplay == native_mode->vdisplay) { |
@@ -450,6 +507,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
450 | } | 507 | } |
451 | } | 508 | } |
452 | } | 509 | } |
510 | |||
453 | if (!native_mode->clock) { | 511 | if (!native_mode->clock) { |
454 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); | 512 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
455 | radeon_encoder->rmx_type = RMX_OFF; | 513 | radeon_encoder->rmx_type = RMX_OFF; |
@@ -661,8 +719,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
661 | ret = connector_status_disconnected; | 719 | ret = connector_status_disconnected; |
662 | 720 | ||
663 | if (radeon_connector->ddc_bus) | 721 | if (radeon_connector->ddc_bus) |
664 | dret = radeon_ddc_probe(radeon_connector); | 722 | dret = radeon_ddc_probe(radeon_connector, |
723 | radeon_connector->requires_extended_probe); | ||
665 | if (dret) { | 724 | if (dret) { |
725 | radeon_connector->detected_by_load = false; | ||
666 | if (radeon_connector->edid) { | 726 | if (radeon_connector->edid) { |
667 | kfree(radeon_connector->edid); | 727 | kfree(radeon_connector->edid); |
668 | radeon_connector->edid = NULL; | 728 | radeon_connector->edid = NULL; |
@@ -689,12 +749,21 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
689 | } else { | 749 | } else { |
690 | 750 | ||
691 | /* if we aren't forcing don't do destructive polling */ | 751 | /* if we aren't forcing don't do destructive polling */ |
692 | if (!force) | 752 | if (!force) { |
693 | return connector->status; | 753 | /* only return the previous status if we last |
754 | * detected a monitor via load. | ||
755 | */ | ||
756 | if (radeon_connector->detected_by_load) | ||
757 | return connector->status; | ||
758 | else | ||
759 | return ret; | ||
760 | } | ||
694 | 761 | ||
695 | if (radeon_connector->dac_load_detect && encoder) { | 762 | if (radeon_connector->dac_load_detect && encoder) { |
696 | encoder_funcs = encoder->helper_private; | 763 | encoder_funcs = encoder->helper_private; |
697 | ret = encoder_funcs->detect(encoder, connector); | 764 | ret = encoder_funcs->detect(encoder, connector); |
765 | if (ret != connector_status_disconnected) | ||
766 | radeon_connector->detected_by_load = true; | ||
698 | } | 767 | } |
699 | } | 768 | } |
700 | 769 | ||
@@ -833,8 +902,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
833 | bool dret = false; | 902 | bool dret = false; |
834 | 903 | ||
835 | if (radeon_connector->ddc_bus) | 904 | if (radeon_connector->ddc_bus) |
836 | dret = radeon_ddc_probe(radeon_connector); | 905 | dret = radeon_ddc_probe(radeon_connector, |
906 | radeon_connector->requires_extended_probe); | ||
837 | if (dret) { | 907 | if (dret) { |
908 | radeon_connector->detected_by_load = false; | ||
838 | if (radeon_connector->edid) { | 909 | if (radeon_connector->edid) { |
839 | kfree(radeon_connector->edid); | 910 | kfree(radeon_connector->edid); |
840 | radeon_connector->edid = NULL; | 911 | radeon_connector->edid = NULL; |
@@ -897,8 +968,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
897 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) | 968 | if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) |
898 | goto out; | 969 | goto out; |
899 | 970 | ||
971 | /* DVI-D and HDMI-A are digital only */ | ||
972 | if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) || | ||
973 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)) | ||
974 | goto out; | ||
975 | |||
976 | /* if we aren't forcing don't do destructive polling */ | ||
900 | if (!force) { | 977 | if (!force) { |
901 | ret = connector->status; | 978 | /* only return the previous status if we last |
979 | * detected a monitor via load. | ||
980 | */ | ||
981 | if (radeon_connector->detected_by_load) | ||
982 | ret = connector->status; | ||
902 | goto out; | 983 | goto out; |
903 | } | 984 | } |
904 | 985 | ||
@@ -923,6 +1004,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
923 | if (ret == connector_status_connected) { | 1004 | if (ret == connector_status_connected) { |
924 | radeon_connector->use_digital = false; | 1005 | radeon_connector->use_digital = false; |
925 | } | 1006 | } |
1007 | if (ret != connector_status_disconnected) | ||
1008 | radeon_connector->detected_by_load = true; | ||
926 | } | 1009 | } |
927 | break; | 1010 | break; |
928 | } | 1011 | } |
@@ -1119,7 +1202,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
1119 | } | 1202 | } |
1120 | } else { | 1203 | } else { |
1121 | /* need to setup ddc on the bridge */ | 1204 | /* need to setup ddc on the bridge */ |
1122 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | 1205 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != |
1206 | ENCODER_OBJECT_ID_NONE) { | ||
1123 | if (encoder) | 1207 | if (encoder) |
1124 | radeon_atom_ext_encoder_setup_ddc(encoder); | 1208 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1125 | } | 1209 | } |
@@ -1129,13 +1213,12 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
1129 | return ret; | 1213 | return ret; |
1130 | } | 1214 | } |
1131 | 1215 | ||
1132 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) | 1216 | u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) |
1133 | { | 1217 | { |
1134 | struct drm_mode_object *obj; | 1218 | struct drm_mode_object *obj; |
1135 | struct drm_encoder *encoder; | 1219 | struct drm_encoder *encoder; |
1136 | struct radeon_encoder *radeon_encoder; | 1220 | struct radeon_encoder *radeon_encoder; |
1137 | int i; | 1221 | int i; |
1138 | bool found = false; | ||
1139 | 1222 | ||
1140 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 1223 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
1141 | if (connector->encoder_ids[i] == 0) | 1224 | if (connector->encoder_ids[i] == 0) |
@@ -1151,14 +1234,13 @@ bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) | |||
1151 | switch (radeon_encoder->encoder_id) { | 1234 | switch (radeon_encoder->encoder_id) { |
1152 | case ENCODER_OBJECT_ID_TRAVIS: | 1235 | case ENCODER_OBJECT_ID_TRAVIS: |
1153 | case ENCODER_OBJECT_ID_NUTMEG: | 1236 | case ENCODER_OBJECT_ID_NUTMEG: |
1154 | found = true; | 1237 | return radeon_encoder->encoder_id; |
1155 | break; | ||
1156 | default: | 1238 | default: |
1157 | break; | 1239 | break; |
1158 | } | 1240 | } |
1159 | } | 1241 | } |
1160 | 1242 | ||
1161 | return found; | 1243 | return ENCODER_OBJECT_ID_NONE; |
1162 | } | 1244 | } |
1163 | 1245 | ||
1164 | bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) | 1246 | bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) |
@@ -1235,12 +1317,25 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1235 | if (!radeon_dig_connector->edp_on) | 1317 | if (!radeon_dig_connector->edp_on) |
1236 | atombios_set_edp_panel_power(connector, | 1318 | atombios_set_edp_panel_power(connector, |
1237 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | 1319 | ATOM_TRANSMITTER_ACTION_POWER_OFF); |
1238 | } else { | 1320 | } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != |
1239 | /* need to setup ddc on the bridge */ | 1321 | ENCODER_OBJECT_ID_NONE) { |
1240 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | 1322 | /* DP bridges are always DP */ |
1241 | if (encoder) | 1323 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1242 | radeon_atom_ext_encoder_setup_ddc(encoder); | 1324 | /* get the DPCD from the bridge */ |
1325 | radeon_dp_getdpcd(radeon_connector); | ||
1326 | |||
1327 | if (encoder) { | ||
1328 | /* setup ddc on the bridge */ | ||
1329 | radeon_atom_ext_encoder_setup_ddc(encoder); | ||
1330 | if (radeon_ddc_probe(radeon_connector, | ||
1331 | radeon_connector->requires_extended_probe)) /* try DDC */ | ||
1332 | ret = connector_status_connected; | ||
1333 | else if (radeon_connector->dac_load_detect) { /* try load detection */ | ||
1334 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
1335 | ret = encoder_funcs->detect(encoder, connector); | ||
1336 | } | ||
1243 | } | 1337 | } |
1338 | } else { | ||
1244 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); | 1339 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
1245 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { | 1340 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
1246 | ret = connector_status_connected; | 1341 | ret = connector_status_connected; |
@@ -1251,20 +1346,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1251 | if (radeon_dp_getdpcd(radeon_connector)) | 1346 | if (radeon_dp_getdpcd(radeon_connector)) |
1252 | ret = connector_status_connected; | 1347 | ret = connector_status_connected; |
1253 | } else { | 1348 | } else { |
1254 | if (radeon_ddc_probe(radeon_connector)) | 1349 | if (radeon_ddc_probe(radeon_connector, |
1350 | radeon_connector->requires_extended_probe)) | ||
1255 | ret = connector_status_connected; | 1351 | ret = connector_status_connected; |
1256 | } | 1352 | } |
1257 | } | 1353 | } |
1258 | |||
1259 | if ((ret == connector_status_disconnected) && | ||
1260 | radeon_connector->dac_load_detect) { | ||
1261 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1262 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1263 | if (encoder) { | ||
1264 | encoder_funcs = encoder->helper_private; | ||
1265 | ret = encoder_funcs->detect(encoder, connector); | ||
1266 | } | ||
1267 | } | ||
1268 | } | 1354 | } |
1269 | 1355 | ||
1270 | radeon_connector_update_scratch_regs(connector, ret); | 1356 | radeon_connector_update_scratch_regs(connector, ret); |
@@ -1406,6 +1492,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1406 | radeon_connector->shared_ddc = shared_ddc; | 1492 | radeon_connector->shared_ddc = shared_ddc; |
1407 | radeon_connector->connector_object_id = connector_object_id; | 1493 | radeon_connector->connector_object_id = connector_object_id; |
1408 | radeon_connector->hpd = *hpd; | 1494 | radeon_connector->hpd = *hpd; |
1495 | radeon_connector->requires_extended_probe = | ||
1496 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1497 | connector_type); | ||
1409 | radeon_connector->router = *router; | 1498 | radeon_connector->router = *router; |
1410 | if (router->ddc_valid || router->cd_valid) { | 1499 | if (router->ddc_valid || router->cd_valid) { |
1411 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1500 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
@@ -1752,6 +1841,9 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1752 | radeon_connector->devices = supported_device; | 1841 | radeon_connector->devices = supported_device; |
1753 | radeon_connector->connector_object_id = connector_object_id; | 1842 | radeon_connector->connector_object_id = connector_object_id; |
1754 | radeon_connector->hpd = *hpd; | 1843 | radeon_connector->hpd = *hpd; |
1844 | radeon_connector->requires_extended_probe = | ||
1845 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1846 | connector_type); | ||
1755 | switch (connector_type) { | 1847 | switch (connector_type) { |
1756 | case DRM_MODE_CONNECTOR_VGA: | 1848 | case DRM_MODE_CONNECTOR_VGA: |
1757 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1849 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 75867792a4e..045ec59478f 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -2115,7 +2115,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
2115 | 2115 | ||
2116 | if (drm_pci_device_is_agp(dev)) | 2116 | if (drm_pci_device_is_agp(dev)) |
2117 | dev_priv->flags |= RADEON_IS_AGP; | 2117 | dev_priv->flags |= RADEON_IS_AGP; |
2118 | else if (drm_pci_device_is_pcie(dev)) | 2118 | else if (pci_is_pcie(dev->pdev)) |
2119 | dev_priv->flags |= RADEON_IS_PCIE; | 2119 | dev_priv->flags |= RADEON_IS_PCIE; |
2120 | else | 2120 | else |
2121 | dev_priv->flags |= RADEON_IS_PCI; | 2121 | dev_priv->flags |= RADEON_IS_PCI; |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e..fde25c0d65a 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
208 | int xorigin = 0, yorigin = 0; | 208 | int xorigin = 0, yorigin = 0; |
209 | int w = radeon_crtc->cursor_width; | 209 | int w = radeon_crtc->cursor_width; |
210 | 210 | ||
211 | if (x < 0) | ||
212 | xorigin = -x + 1; | ||
213 | if (y < 0) | ||
214 | yorigin = -y + 1; | ||
215 | if (xorigin >= CURSOR_WIDTH) | ||
216 | xorigin = CURSOR_WIDTH - 1; | ||
217 | if (yorigin >= CURSOR_HEIGHT) | ||
218 | yorigin = CURSOR_HEIGHT - 1; | ||
219 | |||
220 | if (ASIC_IS_AVIVO(rdev)) { | 211 | if (ASIC_IS_AVIVO(rdev)) { |
221 | int i = 0; | ||
222 | struct drm_crtc *crtc_p; | ||
223 | |||
224 | /* avivo cursor are offset into the total surface */ | 212 | /* avivo cursor are offset into the total surface */ |
225 | x += crtc->x; | 213 | x += crtc->x; |
226 | y += crtc->y; | 214 | y += crtc->y; |
227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 215 | } |
216 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
217 | |||
218 | if (x < 0) { | ||
219 | xorigin = min(-x, CURSOR_WIDTH - 1); | ||
220 | x = 0; | ||
221 | } | ||
222 | if (y < 0) { | ||
223 | yorigin = min(-y, CURSOR_HEIGHT - 1); | ||
224 | y = 0; | ||
225 | } | ||
226 | |||
227 | if (ASIC_IS_AVIVO(rdev)) { | ||
228 | int i = 0; | ||
229 | struct drm_crtc *crtc_p; | ||
228 | 230 | ||
229 | /* avivo cursor image can't end on 128 pixel boundary or | 231 | /* avivo cursor image can't end on 128 pixel boundary or |
230 | * go past the end of the frame if both crtcs are enabled | 232 | * go past the end of the frame if both crtcs are enabled |
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
253 | 255 | ||
254 | radeon_lock_cursor(crtc, true); | 256 | radeon_lock_cursor(crtc, true); |
255 | if (ASIC_IS_DCE4(rdev)) { | 257 | if (ASIC_IS_DCE4(rdev)) { |
256 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | 258 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
257 | ((xorigin ? 0 : x) << 16) | | ||
258 | (yorigin ? 0 : y)); | ||
259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | 260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
262 | } else if (ASIC_IS_AVIVO(rdev)) { | 262 | } else if (ASIC_IS_AVIVO(rdev)) { |
263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
264 | ((xorigin ? 0 : x) << 16) | | ||
265 | (yorigin ? 0 : y)); | ||
266 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 264 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
267 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, | 265 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
268 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 266 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
276 | | yorigin)); | 274 | | yorigin)); |
277 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, | 275 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
278 | (RADEON_CUR_LOCK | 276 | (RADEON_CUR_LOCK |
279 | | ((xorigin ? 0 : x) << 16) | 277 | | (x << 16) |
280 | | (yorigin ? 0 : y))); | 278 | | y)); |
281 | /* offset is from DISP(2)_BASE_ADDRESS */ | 279 | /* offset is from DISP(2)_BASE_ADDRESS */ |
282 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | 280 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
283 | (yorigin * 256))); | 281 | (yorigin * 256))); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7cfaa7e2f3b..50d105a1c74 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <drm/radeon_drm.h> | 32 | #include <drm/radeon_drm.h> |
33 | #include <linux/vgaarb.h> | 33 | #include <linux/vgaarb.h> |
34 | #include <linux/vga_switcheroo.h> | 34 | #include <linux/vga_switcheroo.h> |
35 | #include <linux/efi.h> | ||
35 | #include "radeon_reg.h" | 36 | #include "radeon_reg.h" |
36 | #include "radeon.h" | 37 | #include "radeon.h" |
37 | #include "atom.h" | 38 | #include "atom.h" |
@@ -223,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
223 | if (radeon_no_wb == 1) | 224 | if (radeon_no_wb == 1) |
224 | rdev->wb.enabled = false; | 225 | rdev->wb.enabled = false; |
225 | else { | 226 | else { |
226 | /* often unreliable on AGP */ | ||
227 | if (rdev->flags & RADEON_IS_AGP) { | 227 | if (rdev->flags & RADEON_IS_AGP) { |
228 | /* often unreliable on AGP */ | ||
229 | rdev->wb.enabled = false; | ||
230 | } else if (rdev->family < CHIP_R300) { | ||
231 | /* often unreliable on pre-r300 */ | ||
228 | rdev->wb.enabled = false; | 232 | rdev->wb.enabled = false; |
229 | } else { | 233 | } else { |
230 | rdev->wb.enabled = true; | 234 | rdev->wb.enabled = true; |
@@ -300,6 +304,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
300 | mc->mc_vram_size = mc->aper_size; | 304 | mc->mc_vram_size = mc->aper_size; |
301 | } | 305 | } |
302 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 306 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
307 | if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) | ||
308 | mc->real_vram_size = radeon_vram_limit; | ||
303 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | 309 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
304 | mc->mc_vram_size >> 20, mc->vram_start, | 310 | mc->mc_vram_size >> 20, mc->vram_start, |
305 | mc->vram_end, mc->real_vram_size >> 20); | 311 | mc->vram_end, mc->real_vram_size >> 20); |
@@ -348,6 +354,9 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
348 | { | 354 | { |
349 | uint32_t reg; | 355 | uint32_t reg; |
350 | 356 | ||
357 | if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) | ||
358 | return false; | ||
359 | |||
351 | /* first check CRTCs */ | 360 | /* first check CRTCs */ |
352 | if (ASIC_IS_DCE41(rdev)) { | 361 | if (ASIC_IS_DCE41(rdev)) { |
353 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | 362 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
@@ -704,8 +713,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
704 | rdev->gpu_lockup = false; | 713 | rdev->gpu_lockup = false; |
705 | rdev->accel_working = false; | 714 | rdev->accel_working = false; |
706 | 715 | ||
707 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | 716 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", |
708 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | 717 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
718 | pdev->subsystem_vendor, pdev->subsystem_device); | ||
709 | 719 | ||
710 | /* mutex initialization are all done here so we | 720 | /* mutex initialization are all done here so we |
711 | * can recall function without having locking issues */ | 721 | * can recall function without having locking issues */ |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 292f73f0ddb..07ac48162a1 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | |||
282 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); | 282 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
283 | work = radeon_crtc->unpin_work; | 283 | work = radeon_crtc->unpin_work; |
284 | if (work == NULL || | 284 | if (work == NULL || |
285 | !radeon_fence_signaled(work->fence)) { | 285 | (work->fence && !radeon_fence_signaled(work->fence))) { |
286 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | 286 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
287 | return; | 287 | return; |
288 | } | 288 | } |
@@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
348 | struct radeon_framebuffer *new_radeon_fb; | 348 | struct radeon_framebuffer *new_radeon_fb; |
349 | struct drm_gem_object *obj; | 349 | struct drm_gem_object *obj; |
350 | struct radeon_bo *rbo; | 350 | struct radeon_bo *rbo; |
351 | struct radeon_fence *fence; | ||
352 | struct radeon_unpin_work *work; | 351 | struct radeon_unpin_work *work; |
353 | unsigned long flags; | 352 | unsigned long flags; |
354 | u32 tiling_flags, pitch_pixels; | 353 | u32 tiling_flags, pitch_pixels; |
@@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
359 | if (work == NULL) | 358 | if (work == NULL) |
360 | return -ENOMEM; | 359 | return -ENOMEM; |
361 | 360 | ||
362 | r = radeon_fence_create(rdev, &fence); | ||
363 | if (unlikely(r != 0)) { | ||
364 | kfree(work); | ||
365 | DRM_ERROR("flip queue: failed to create fence.\n"); | ||
366 | return -ENOMEM; | ||
367 | } | ||
368 | work->event = event; | 361 | work->event = event; |
369 | work->rdev = rdev; | 362 | work->rdev = rdev; |
370 | work->crtc_id = radeon_crtc->crtc_id; | 363 | work->crtc_id = radeon_crtc->crtc_id; |
371 | work->fence = radeon_fence_ref(fence); | ||
372 | old_radeon_fb = to_radeon_framebuffer(crtc->fb); | 364 | old_radeon_fb = to_radeon_framebuffer(crtc->fb); |
373 | new_radeon_fb = to_radeon_framebuffer(fb); | 365 | new_radeon_fb = to_radeon_framebuffer(fb); |
374 | /* schedule unpin of the old buffer */ | 366 | /* schedule unpin of the old buffer */ |
@@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
377 | drm_gem_object_reference(obj); | 369 | drm_gem_object_reference(obj); |
378 | rbo = gem_to_radeon_bo(obj); | 370 | rbo = gem_to_radeon_bo(obj); |
379 | work->old_rbo = rbo; | 371 | work->old_rbo = rbo; |
372 | obj = new_radeon_fb->obj; | ||
373 | rbo = gem_to_radeon_bo(obj); | ||
374 | if (rbo->tbo.sync_obj) | ||
375 | work->fence = radeon_fence_ref(rbo->tbo.sync_obj); | ||
380 | INIT_WORK(&work->work, radeon_unpin_work_func); | 376 | INIT_WORK(&work->work, radeon_unpin_work_func); |
381 | 377 | ||
382 | /* We borrow the event spin lock for protecting unpin_work */ | 378 | /* We borrow the event spin lock for protecting unpin_work */ |
@@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
391 | spin_unlock_irqrestore(&dev->event_lock, flags); | 387 | spin_unlock_irqrestore(&dev->event_lock, flags); |
392 | 388 | ||
393 | /* pin the new buffer */ | 389 | /* pin the new buffer */ |
394 | obj = new_radeon_fb->obj; | ||
395 | rbo = gem_to_radeon_bo(obj); | ||
396 | |||
397 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | 390 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", |
398 | work->old_rbo, rbo); | 391 | work->old_rbo, rbo); |
399 | 392 | ||
@@ -461,37 +454,18 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
461 | goto pflip_cleanup1; | 454 | goto pflip_cleanup1; |
462 | } | 455 | } |
463 | 456 | ||
464 | /* 32 ought to cover us */ | ||
465 | r = radeon_ring_lock(rdev, 32); | ||
466 | if (r) { | ||
467 | DRM_ERROR("failed to lock the ring before flip\n"); | ||
468 | goto pflip_cleanup2; | ||
469 | } | ||
470 | |||
471 | /* emit the fence */ | ||
472 | radeon_fence_emit(rdev, fence); | ||
473 | /* set the proper interrupt */ | 457 | /* set the proper interrupt */ |
474 | radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); | 458 | radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); |
475 | /* fire the ring */ | ||
476 | radeon_ring_unlock_commit(rdev); | ||
477 | 459 | ||
478 | return 0; | 460 | return 0; |
479 | 461 | ||
480 | pflip_cleanup2: | ||
481 | drm_vblank_put(dev, radeon_crtc->crtc_id); | ||
482 | |||
483 | pflip_cleanup1: | 462 | pflip_cleanup1: |
484 | r = radeon_bo_reserve(rbo, false); | 463 | if (unlikely(radeon_bo_reserve(rbo, false) != 0)) { |
485 | if (unlikely(r != 0)) { | ||
486 | DRM_ERROR("failed to reserve new rbo in error path\n"); | 464 | DRM_ERROR("failed to reserve new rbo in error path\n"); |
487 | goto pflip_cleanup; | 465 | goto pflip_cleanup; |
488 | } | 466 | } |
489 | r = radeon_bo_unpin(rbo); | 467 | if (unlikely(radeon_bo_unpin(rbo) != 0)) { |
490 | if (unlikely(r != 0)) { | ||
491 | radeon_bo_unreserve(rbo); | ||
492 | r = -EINVAL; | ||
493 | DRM_ERROR("failed to unpin new rbo in error path\n"); | 468 | DRM_ERROR("failed to unpin new rbo in error path\n"); |
494 | goto pflip_cleanup; | ||
495 | } | 469 | } |
496 | radeon_bo_unreserve(rbo); | 470 | radeon_bo_unreserve(rbo); |
497 | 471 | ||
@@ -499,9 +473,9 @@ pflip_cleanup: | |||
499 | spin_lock_irqsave(&dev->event_lock, flags); | 473 | spin_lock_irqsave(&dev->event_lock, flags); |
500 | radeon_crtc->unpin_work = NULL; | 474 | radeon_crtc->unpin_work = NULL; |
501 | unlock_free: | 475 | unlock_free: |
502 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
503 | spin_unlock_irqrestore(&dev->event_lock, flags); | 476 | spin_unlock_irqrestore(&dev->event_lock, flags); |
504 | radeon_fence_unref(&fence); | 477 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); |
478 | radeon_fence_unref(&work->fence); | ||
505 | kfree(work); | 479 | kfree(work); |
506 | 480 | ||
507 | return r; | 481 | return r; |
@@ -733,16 +707,22 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
733 | radeon_router_select_ddc_port(radeon_connector); | 707 | radeon_router_select_ddc_port(radeon_connector); |
734 | 708 | ||
735 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 709 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
736 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 710 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || |
711 | (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != | ||
712 | ENCODER_OBJECT_ID_NONE)) { | ||
737 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 713 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
714 | |||
738 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || | 715 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
739 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) | 716 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) |
740 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | 717 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
741 | } | 718 | &dig->dp_i2c_bus->adapter); |
742 | if (!radeon_connector->ddc_bus) | 719 | else if (radeon_connector->ddc_bus && !radeon_connector->edid) |
743 | return -1; | 720 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
744 | if (!radeon_connector->edid) { | 721 | &radeon_connector->ddc_bus->adapter); |
745 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 722 | } else { |
723 | if (radeon_connector->ddc_bus && !radeon_connector->edid) | ||
724 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, | ||
725 | &radeon_connector->ddc_bus->adapter); | ||
746 | } | 726 | } |
747 | 727 | ||
748 | if (!radeon_connector->edid) { | 728 | if (!radeon_connector->edid) { |
@@ -777,8 +757,17 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
777 | if (!radeon_connector->ddc_bus) | 757 | if (!radeon_connector->ddc_bus) |
778 | return -1; | 758 | return -1; |
779 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); | 759 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); |
760 | /* Log EDID retrieval status here. In particular with regard to | ||
761 | * connectors with requires_extended_probe flag set, that will prevent | ||
762 | * function radeon_dvi_detect() to fetch EDID on this connector, | ||
763 | * as long as there is no valid EDID header found */ | ||
780 | if (edid) { | 764 | if (edid) { |
765 | DRM_INFO("Radeon display connector %s: Found valid EDID", | ||
766 | drm_get_connector_name(connector)); | ||
781 | kfree(edid); | 767 | kfree(edid); |
768 | } else { | ||
769 | DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID", | ||
770 | drm_get_connector_name(connector)); | ||
782 | } | 771 | } |
783 | return ret; | 772 | return ret; |
784 | } | 773 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 73dfbe8e5f9..c12b0775d68 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -51,9 +51,10 @@ | |||
51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query | 51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query |
52 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query | 52 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query |
53 | * 2.10.0 - fusion 2D tiling | 53 | * 2.10.0 - fusion 2D tiling |
54 | * 2.11.0 - backend map, initial compute support for the CS checker | ||
54 | */ | 55 | */ |
55 | #define KMS_DRIVER_MAJOR 2 | 56 | #define KMS_DRIVER_MAJOR 2 |
56 | #define KMS_DRIVER_MINOR 10 | 57 | #define KMS_DRIVER_MINOR 11 |
57 | #define KMS_DRIVER_PATCHLEVEL 0 | 58 | #define KMS_DRIVER_PATCHLEVEL 0 |
58 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 59 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
59 | int radeon_driver_unload_kms(struct drm_device *dev); | 60 | int radeon_driver_unload_kms(struct drm_device *dev); |
@@ -117,6 +118,7 @@ int radeon_audio = 0; | |||
117 | int radeon_disp_priority = 0; | 118 | int radeon_disp_priority = 0; |
118 | int radeon_hw_i2c = 0; | 119 | int radeon_hw_i2c = 0; |
119 | int radeon_pcie_gen2 = 0; | 120 | int radeon_pcie_gen2 = 0; |
121 | int radeon_msi = -1; | ||
120 | 122 | ||
121 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 123 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
122 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 124 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -163,6 +165,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | |||
163 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); | 165 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); |
164 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); | 166 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); |
165 | 167 | ||
168 | MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); | ||
169 | module_param_named(msi, radeon_msi, int, 0444); | ||
170 | |||
166 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 171 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
167 | { | 172 | { |
168 | drm_radeon_private_t *dev_priv = dev->dev_private; | 173 | drm_radeon_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index b293487e5aa..9838865e223 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -266,7 +266,7 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder | |||
266 | return NULL; | 266 | return NULL; |
267 | } | 267 | } |
268 | 268 | ||
269 | bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) | 269 | u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) |
270 | { | 270 | { |
271 | struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); | 271 | struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); |
272 | 272 | ||
@@ -368,7 +368,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
368 | 368 | ||
369 | if (ASIC_IS_DCE3(rdev) && | 369 | if (ASIC_IS_DCE3(rdev) && |
370 | ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | 370 | ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || |
371 | radeon_encoder_is_dp_bridge(encoder))) { | 371 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { |
372 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 372 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
373 | radeon_dp_set_link_config(connector, mode); | 373 | radeon_dp_set_link_config(connector, mode); |
374 | } | 374 | } |
@@ -658,7 +658,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
658 | struct radeon_connector_atom_dig *dig_connector; | 658 | struct radeon_connector_atom_dig *dig_connector; |
659 | 659 | ||
660 | /* dp bridges are always DP */ | 660 | /* dp bridges are always DP */ |
661 | if (radeon_encoder_is_dp_bridge(encoder)) | 661 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) |
662 | return ATOM_ENCODER_MODE_DP; | 662 | return ATOM_ENCODER_MODE_DP; |
663 | 663 | ||
664 | /* DVO is always DVO */ | 664 | /* DVO is always DVO */ |
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1507 | switch (mode) { | 1507 | switch (mode) { |
1508 | case DRM_MODE_DPMS_ON: | 1508 | case DRM_MODE_DPMS_ON: |
1509 | args.ucAction = ATOM_ENABLE; | 1509 | args.ucAction = ATOM_ENABLE; |
1510 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1510 | /* workaround for DVOOutputControl on some RS690 systems */ |
1511 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1512 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1513 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1514 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1515 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1516 | } else | ||
1517 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1511 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1512 | args.ucAction = ATOM_LCD_BLON; | 1519 | args.ucAction = ATOM_LCD_BLON; |
1513 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -1631,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1631 | break; | 1638 | break; |
1632 | case 2: | 1639 | case 2: |
1633 | args.v2.ucCRTC = radeon_crtc->crtc_id; | 1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; |
1634 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | 1641 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { |
1642 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1643 | |||
1644 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
1645 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
1646 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
1647 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
1648 | else | ||
1649 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1650 | } else | ||
1651 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1635 | switch (radeon_encoder->encoder_id) { | 1652 | switch (radeon_encoder->encoder_id) { |
1636 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1653 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1637 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1654 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
@@ -1748,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1748 | /* DCE4/5 */ | 1765 | /* DCE4/5 */ |
1749 | if (ASIC_IS_DCE4(rdev)) { | 1766 | if (ASIC_IS_DCE4(rdev)) { |
1750 | dig = radeon_encoder->enc_priv; | 1767 | dig = radeon_encoder->enc_priv; |
1751 | if (ASIC_IS_DCE41(rdev)) | 1768 | if (ASIC_IS_DCE41(rdev)) { |
1752 | return radeon_crtc->crtc_id; | 1769 | /* ontario follows DCE4 */ |
1753 | else { | 1770 | if (rdev->family == CHIP_PALM) { |
1771 | if (dig->linkb) | ||
1772 | return 1; | ||
1773 | else | ||
1774 | return 0; | ||
1775 | } else | ||
1776 | /* llano follows DCE3.2 */ | ||
1777 | return radeon_crtc->crtc_id; | ||
1778 | } else { | ||
1754 | switch (radeon_encoder->encoder_id) { | 1779 | switch (radeon_encoder->encoder_id) { |
1755 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1780 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1756 | if (dig->linkb) | 1781 | if (dig->linkb) |
@@ -2074,7 +2099,8 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
2074 | 2099 | ||
2075 | if ((radeon_encoder->active_device & | 2100 | if ((radeon_encoder->active_device & |
2076 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | 2101 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || |
2077 | radeon_encoder_is_dp_bridge(encoder)) { | 2102 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != |
2103 | ENCODER_OBJECT_ID_NONE)) { | ||
2078 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 2104 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
2079 | if (dig) | 2105 | if (dig) |
2080 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | 2106 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); |
@@ -2323,6 +2349,9 @@ radeon_add_atom_encoder(struct drm_device *dev, | |||
2323 | default: | 2349 | default: |
2324 | encoder->possible_crtcs = 0x3; | 2350 | encoder->possible_crtcs = 0x3; |
2325 | break; | 2351 | break; |
2352 | case 4: | ||
2353 | encoder->possible_crtcs = 0xf; | ||
2354 | break; | ||
2326 | case 6: | 2355 | case 6: |
2327 | encoder->possible_crtcs = 0x3f; | 2356 | encoder->possible_crtcs = 0x3f; |
2328 | break; | 2357 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 021d2b6b556..7fd4e3e5ad5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * Dave Airlie | 29 | * Dave Airlie |
30 | */ | 30 | */ |
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <asm/atomic.h> | 32 | #include <linux/atomic.h> |
33 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/kref.h> | 35 | #include <linux/kref.h> |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 781196db792..6c111c1fa3f 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -32,17 +32,17 @@ | |||
32 | * radeon_ddc_probe | 32 | * radeon_ddc_probe |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | 35 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) |
36 | { | 36 | { |
37 | u8 out_buf[] = { 0x0, 0x0}; | 37 | u8 out = 0x0; |
38 | u8 buf[2]; | 38 | u8 buf[8]; |
39 | int ret; | 39 | int ret; |
40 | struct i2c_msg msgs[] = { | 40 | struct i2c_msg msgs[] = { |
41 | { | 41 | { |
42 | .addr = 0x50, | 42 | .addr = 0x50, |
43 | .flags = 0, | 43 | .flags = 0, |
44 | .len = 1, | 44 | .len = 1, |
45 | .buf = out_buf, | 45 | .buf = &out, |
46 | }, | 46 | }, |
47 | { | 47 | { |
48 | .addr = 0x50, | 48 | .addr = 0x50, |
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
52 | } | 52 | } |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* Read 8 bytes from i2c for extended probe of EDID header */ | ||
56 | if (requires_extended_probe) | ||
57 | msgs[1].len = 8; | ||
58 | |||
55 | /* on hw with routers, select right port */ | 59 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.ddc_valid) | 60 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_ddc_port(radeon_connector); | 61 | radeon_router_select_ddc_port(radeon_connector); |
58 | 62 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 63 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 64 | if (ret != 2) |
61 | return true; | 65 | /* Couldn't find an accessible DDC on this connector */ |
62 | 66 | return false; | |
63 | return false; | 67 | if (requires_extended_probe) { |
68 | /* Probe also for valid EDID header | ||
69 | * EDID header starts with: | ||
70 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. | ||
71 | * Only the first 6 bytes must be valid as | ||
72 | * drm_edid_block_valid() can fix the last 2 bytes */ | ||
73 | if (drm_edid_header_is_valid(buf) < 6) { | ||
74 | /* Couldn't find an accessible EDID on this | ||
75 | * connector */ | ||
76 | return false; | ||
77 | } | ||
78 | } | ||
79 | return true; | ||
64 | } | 80 | } |
65 | 81 | ||
66 | /* bit banging i2c */ | 82 | /* bit banging i2c */ |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 9ec830c77af..5feb6e9edd8 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -108,6 +108,52 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
108 | radeon_irq_set(rdev); | 108 | radeon_irq_set(rdev); |
109 | } | 109 | } |
110 | 110 | ||
111 | static bool radeon_msi_ok(struct radeon_device *rdev) | ||
112 | { | ||
113 | /* RV370/RV380 was first asic with MSI support */ | ||
114 | if (rdev->family < CHIP_RV380) | ||
115 | return false; | ||
116 | |||
117 | /* MSIs don't work on AGP */ | ||
118 | if (rdev->flags & RADEON_IS_AGP) | ||
119 | return false; | ||
120 | |||
121 | /* force MSI on */ | ||
122 | if (radeon_msi == 1) | ||
123 | return true; | ||
124 | else if (radeon_msi == 0) | ||
125 | return false; | ||
126 | |||
127 | /* Quirks */ | ||
128 | /* HP RS690 only seems to work with MSIs. */ | ||
129 | if ((rdev->pdev->device == 0x791f) && | ||
130 | (rdev->pdev->subsystem_vendor == 0x103c) && | ||
131 | (rdev->pdev->subsystem_device == 0x30c2)) | ||
132 | return true; | ||
133 | |||
134 | /* Dell RS690 only seems to work with MSIs. */ | ||
135 | if ((rdev->pdev->device == 0x791f) && | ||
136 | (rdev->pdev->subsystem_vendor == 0x1028) && | ||
137 | (rdev->pdev->subsystem_device == 0x01fc)) | ||
138 | return true; | ||
139 | |||
140 | /* Dell RS690 only seems to work with MSIs. */ | ||
141 | if ((rdev->pdev->device == 0x791f) && | ||
142 | (rdev->pdev->subsystem_vendor == 0x1028) && | ||
143 | (rdev->pdev->subsystem_device == 0x01fd)) | ||
144 | return true; | ||
145 | |||
146 | if (rdev->flags & RADEON_IS_IGP) { | ||
147 | /* APUs work fine with MSIs */ | ||
148 | if (rdev->family >= CHIP_PALM) | ||
149 | return true; | ||
150 | /* lots of IGPs have problems with MSIs */ | ||
151 | return false; | ||
152 | } | ||
153 | |||
154 | return true; | ||
155 | } | ||
156 | |||
111 | int radeon_irq_kms_init(struct radeon_device *rdev) | 157 | int radeon_irq_kms_init(struct radeon_device *rdev) |
112 | { | 158 | { |
113 | int i; | 159 | int i; |
@@ -124,12 +170,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
124 | } | 170 | } |
125 | /* enable msi */ | 171 | /* enable msi */ |
126 | rdev->msi_enabled = 0; | 172 | rdev->msi_enabled = 0; |
127 | /* MSIs don't seem to work reliably on all IGP | 173 | |
128 | * chips. Disable MSI on them for now. | 174 | if (radeon_msi_ok(rdev)) { |
129 | */ | ||
130 | if ((rdev->family >= CHIP_RV380) && | ||
131 | ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && | ||
132 | (!(rdev->flags & RADEON_IS_AGP))) { | ||
133 | int ret = pci_enable_msi(rdev->pdev); | 175 | int ret = pci_enable_msi(rdev->pdev); |
134 | if (!ret) { | 176 | if (!ret) { |
135 | rdev->msi_enabled = 1; | 177 | rdev->msi_enabled = 1; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index bd58af65858..be2c1224e68 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -60,7 +60,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
60 | /* update BUS flag */ | 60 | /* update BUS flag */ |
61 | if (drm_pci_device_is_agp(dev)) { | 61 | if (drm_pci_device_is_agp(dev)) { |
62 | flags |= RADEON_IS_AGP; | 62 | flags |= RADEON_IS_AGP; |
63 | } else if (drm_pci_device_is_pcie(dev)) { | 63 | } else if (pci_is_pcie(dev->pdev)) { |
64 | flags |= RADEON_IS_PCIE; | 64 | flags |= RADEON_IS_PCIE; |
65 | } else { | 65 | } else { |
66 | flags |= RADEON_IS_PCI; | 66 | flags |= RADEON_IS_PCI; |
@@ -237,6 +237,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
237 | case RADEON_INFO_FUSION_GART_WORKING: | 237 | case RADEON_INFO_FUSION_GART_WORKING: |
238 | value = 1; | 238 | value = 1; |
239 | break; | 239 | break; |
240 | case RADEON_INFO_BACKEND_MAP: | ||
241 | if (rdev->family >= CHIP_CAYMAN) | ||
242 | value = rdev->config.cayman.backend_map; | ||
243 | else if (rdev->family >= CHIP_CEDAR) | ||
244 | value = rdev->config.evergreen.backend_map; | ||
245 | else if (rdev->family >= CHIP_RV770) | ||
246 | value = rdev->config.rv770.backend_map; | ||
247 | else if (rdev->family >= CHIP_R600) | ||
248 | value = rdev->config.r600.backend_map; | ||
249 | else { | ||
250 | return -EINVAL; | ||
251 | } | ||
252 | break; | ||
240 | default: | 253 | default: |
241 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 254 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
242 | return -EINVAL; | 255 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c index ed95155c4b1..988548efea9 100644 --- a/drivers/gpu/drm/radeon/radeon_mem.c +++ b/drivers/gpu/drm/radeon/radeon_mem.c | |||
@@ -139,7 +139,7 @@ static int init_heap(struct mem_block **heap, int start, int size) | |||
139 | if (!blocks) | 139 | if (!blocks) |
140 | return -ENOMEM; | 140 | return -ENOMEM; |
141 | 141 | ||
142 | *heap = kmalloc(sizeof(**heap), GFP_KERNEL); | 142 | *heap = kzalloc(sizeof(**heap), GFP_KERNEL); |
143 | if (!*heap) { | 143 | if (!*heap) { |
144 | kfree(blocks); | 144 | kfree(blocks); |
145 | return -ENOMEM; | 145 | return -ENOMEM; |
@@ -150,7 +150,6 @@ static int init_heap(struct mem_block **heap, int start, int size) | |||
150 | blocks->file_priv = NULL; | 150 | blocks->file_priv = NULL; |
151 | blocks->next = blocks->prev = *heap; | 151 | blocks->next = blocks->prev = *heap; |
152 | 152 | ||
153 | memset(*heap, 0, sizeof(**heap)); | ||
154 | (*heap)->file_priv = (struct drm_file *) - 1; | 153 | (*heap)->file_priv = (struct drm_file *) - 1; |
155 | (*heap)->next = (*heap)->prev = blocks; | 154 | (*heap)->next = (*heap)->prev = blocks; |
156 | return 0; | 155 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 6df4e3cec0c..cbf80de2d9c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -438,12 +438,16 @@ struct radeon_connector { | |||
438 | struct radeon_i2c_chan *ddc_bus; | 438 | struct radeon_i2c_chan *ddc_bus; |
439 | /* some systems have an hdmi and vga port with a shared ddc line */ | 439 | /* some systems have an hdmi and vga port with a shared ddc line */ |
440 | bool shared_ddc; | 440 | bool shared_ddc; |
441 | /* for some Radeon chip families we apply an additional EDID header | ||
442 | check as part of the DDC probe */ | ||
443 | bool requires_extended_probe; | ||
441 | bool use_digital; | 444 | bool use_digital; |
442 | /* we need to mind the EDID between detect | 445 | /* we need to mind the EDID between detect |
443 | and get modes due to analog/digital/tvencoder */ | 446 | and get modes due to analog/digital/tvencoder */ |
444 | struct edid *edid; | 447 | struct edid *edid; |
445 | void *con_priv; | 448 | void *con_priv; |
446 | bool dac_load_detect; | 449 | bool dac_load_detect; |
450 | bool detected_by_load; /* if the connection status was determined by load */ | ||
447 | uint16_t connector_object_id; | 451 | uint16_t connector_object_id; |
448 | struct radeon_hpd hpd; | 452 | struct radeon_hpd hpd; |
449 | struct radeon_router router; | 453 | struct radeon_router router; |
@@ -464,8 +468,8 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); | |||
464 | extern struct drm_connector * | 468 | extern struct drm_connector * |
465 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); | 469 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); |
466 | 470 | ||
467 | extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); | 471 | extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); |
468 | extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | 472 | extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); |
469 | extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); | 473 | extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); |
470 | extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); | 474 | extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); |
471 | 475 | ||
@@ -476,6 +480,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector, | |||
476 | struct drm_display_mode *mode); | 480 | struct drm_display_mode *mode); |
477 | extern void radeon_dp_link_train(struct drm_encoder *encoder, | 481 | extern void radeon_dp_link_train(struct drm_encoder *encoder, |
478 | struct drm_connector *connector); | 482 | struct drm_connector *connector); |
483 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | ||
479 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); | 484 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); |
480 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); | 485 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); |
481 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); | 486 | extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); |
@@ -514,7 +519,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
514 | u8 val); | 519 | u8 val); |
515 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); | 520 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
516 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | 521 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); |
517 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 522 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, |
523 | bool requires_extended_probe); | ||
518 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 524 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
519 | 525 | ||
520 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); | 526 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index aaa19dc418a..6fabe89fa6a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -594,6 +594,9 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
594 | if (rdev->pm.default_vddc) | 594 | if (rdev->pm.default_vddc) |
595 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 595 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
596 | SET_VOLTAGE_TYPE_ASIC_VDDC); | 596 | SET_VOLTAGE_TYPE_ASIC_VDDC); |
597 | if (rdev->pm.default_vddci) | ||
598 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
599 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
597 | if (rdev->pm.default_sclk) | 600 | if (rdev->pm.default_sclk) |
598 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 601 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
599 | if (rdev->pm.default_mclk) | 602 | if (rdev->pm.default_mclk) |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index bc44a3d35ec..b4ce8645570 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -3295,7 +3295,7 @@ | |||
3295 | # define RADEON_RB_BUFSZ_MASK (0x3f << 0) | 3295 | # define RADEON_RB_BUFSZ_MASK (0x3f << 0) |
3296 | # define RADEON_RB_BLKSZ_SHIFT 8 | 3296 | # define RADEON_RB_BLKSZ_SHIFT 8 |
3297 | # define RADEON_RB_BLKSZ_MASK (0x3f << 8) | 3297 | # define RADEON_RB_BLKSZ_MASK (0x3f << 8) |
3298 | # define RADEON_BUF_SWAP_32BIT (1 << 17) | 3298 | # define RADEON_BUF_SWAP_32BIT (2 << 16) |
3299 | # define RADEON_MAX_FETCH_SHIFT 18 | 3299 | # define RADEON_MAX_FETCH_SHIFT 18 |
3300 | # define RADEON_MAX_FETCH_MASK (0x3 << 18) | 3300 | # define RADEON_MAX_FETCH_MASK (0x3 << 18) |
3301 | # define RADEON_RB_NO_UPDATE (1 << 27) | 3301 | # define RADEON_RB_NO_UPDATE (1 << 27) |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index dee4a0c1b4b..602fa3541c4 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
40 | size = 1024 * 1024; | 40 | size = 1024 * 1024; |
41 | 41 | ||
42 | /* Number of tests = | 42 | /* Number of tests = |
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
44 | */ | 44 | */ |
45 | n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; |
46 | rdev->cp.ring_size)) / size; | 46 | if (rdev->wb.wb_obj) |
47 | n -= RADEON_GPU_PAGE_SIZE; | ||
48 | if (rdev->ih.ring_obj) | ||
49 | n -= rdev->ih.ring_size; | ||
50 | n /= size; | ||
47 | 51 | ||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | 52 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
49 | if (!gtt_obj) { | 53 | if (!gtt_obj) { |
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
132 | gtt_start++, vram_start++) { | 136 | gtt_start++, vram_start++) { |
133 | if (*vram_start != gtt_start) { | 137 | if (*vram_start != gtt_start) { |
134 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | 138 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " |
135 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | 139 | "expected 0x%p (GTT/VRAM offset " |
136 | i, *vram_start, gtt_start, gtt_map, | 140 | "0x%16llx/0x%16llx)\n", |
137 | gtt_end); | 141 | i, *vram_start, gtt_start, |
142 | (unsigned long long) | ||
143 | (gtt_addr - rdev->mc.gtt_start + | ||
144 | (void*)gtt_start - gtt_map), | ||
145 | (unsigned long long) | ||
146 | (vram_addr - rdev->mc.vram_start + | ||
147 | (void*)gtt_start - gtt_map)); | ||
138 | radeon_bo_kunmap(vram_obj); | 148 | radeon_bo_kunmap(vram_obj); |
139 | goto out_cleanup; | 149 | goto out_cleanup; |
140 | } | 150 | } |
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
175 | gtt_start++, vram_start++) { | 185 | gtt_start++, vram_start++) { |
176 | if (*gtt_start != vram_start) { | 186 | if (*gtt_start != vram_start) { |
177 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | 187 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " |
178 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | 188 | "expected 0x%p (VRAM/GTT offset " |
179 | i, *gtt_start, vram_start, vram_map, | 189 | "0x%16llx/0x%16llx)\n", |
180 | vram_end); | 190 | i, *gtt_start, vram_start, |
191 | (unsigned long long) | ||
192 | (vram_addr - rdev->mc.vram_start + | ||
193 | (void*)vram_start - vram_map), | ||
194 | (unsigned long long) | ||
195 | (gtt_addr - rdev->mc.gtt_start + | ||
196 | (void*)vram_start - vram_map)); | ||
181 | radeon_bo_kunmap(gtt_obj[i]); | 197 | radeon_bo_kunmap(gtt_obj[i]); |
182 | goto out_cleanup; | 198 | goto out_cleanup; |
183 | } | 199 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 60125ddba1e..0b5468bfaf5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | 277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); |
278 | return -EINVAL; | 278 | return -EINVAL; |
279 | } | 279 | } |
280 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | 280 | |
281 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | ||
282 | |||
283 | r = radeon_copy(rdev, old_start, new_start, | ||
284 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | ||
285 | fence); | ||
281 | /* FIXME: handle copy error */ | 286 | /* FIXME: handle copy error */ |
282 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | 287 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, |
283 | evict, no_wait_reserve, no_wait_gpu, new_mem); | 288 | evict, no_wait_reserve, no_wait_gpu, new_mem); |
@@ -450,6 +455,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
450 | return -EINVAL; | 455 | return -EINVAL; |
451 | mem->bus.base = rdev->mc.aper_base; | 456 | mem->bus.base = rdev->mc.aper_base; |
452 | mem->bus.is_iomem = true; | 457 | mem->bus.is_iomem = true; |
458 | #ifdef __alpha__ | ||
459 | /* | ||
460 | * Alpha: use bus.addr to hold the ioremap() return, | ||
461 | * so we can modify bus.base below. | ||
462 | */ | ||
463 | if (mem->placement & TTM_PL_FLAG_WC) | ||
464 | mem->bus.addr = | ||
465 | ioremap_wc(mem->bus.base + mem->bus.offset, | ||
466 | mem->bus.size); | ||
467 | else | ||
468 | mem->bus.addr = | ||
469 | ioremap_nocache(mem->bus.base + mem->bus.offset, | ||
470 | mem->bus.size); | ||
471 | |||
472 | /* | ||
473 | * Alpha: Use just the bus offset plus | ||
474 | * the hose/domain memory base for bus.base. | ||
475 | * It then can be used to build PTEs for VRAM | ||
476 | * access, as done in ttm_bo_vm_fault(). | ||
477 | */ | ||
478 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | ||
479 | rdev->ddev->hose->dense_mem_base; | ||
480 | #endif | ||
453 | break; | 481 | break; |
454 | default: | 482 | default: |
455 | return -EINVAL; | 483 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0aa8e85a945..2316977eb92 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -208,6 +208,7 @@ cayman 0x9400 | |||
208 | 0x0002834C PA_SC_VPORT_ZMAX_15 | 208 | 0x0002834C PA_SC_VPORT_ZMAX_15 |
209 | 0x00028350 SX_MISC | 209 | 0x00028350 SX_MISC |
210 | 0x00028354 SX_SURFACE_SYNC | 210 | 0x00028354 SX_SURFACE_SYNC |
211 | 0x0002835C SX_SCATTER_EXPORT_SIZE | ||
211 | 0x00028380 SQ_VTX_SEMANTIC_0 | 212 | 0x00028380 SQ_VTX_SEMANTIC_0 |
212 | 0x00028384 SQ_VTX_SEMANTIC_1 | 213 | 0x00028384 SQ_VTX_SEMANTIC_1 |
213 | 0x00028388 SQ_VTX_SEMANTIC_2 | 214 | 0x00028388 SQ_VTX_SEMANTIC_2 |
@@ -432,6 +433,7 @@ cayman 0x9400 | |||
432 | 0x00028700 SPI_STACK_MGMT | 433 | 0x00028700 SPI_STACK_MGMT |
433 | 0x00028704 SPI_WAVE_MGMT_1 | 434 | 0x00028704 SPI_WAVE_MGMT_1 |
434 | 0x00028708 SPI_WAVE_MGMT_2 | 435 | 0x00028708 SPI_WAVE_MGMT_2 |
436 | 0x00028720 GDS_ADDR_BASE | ||
435 | 0x00028724 GDS_ADDR_SIZE | 437 | 0x00028724 GDS_ADDR_SIZE |
436 | 0x00028780 CB_BLEND0_CONTROL | 438 | 0x00028780 CB_BLEND0_CONTROL |
437 | 0x00028784 CB_BLEND1_CONTROL | 439 | 0x00028784 CB_BLEND1_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 0e28cae7ea4..161737a28c2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -44,6 +44,7 @@ evergreen 0x9400 | |||
44 | 0x00008E28 SQ_STATIC_THREAD_MGMT_3 | 44 | 0x00008E28 SQ_STATIC_THREAD_MGMT_3 |
45 | 0x00008E2C SQ_LDS_RESOURCE_MGMT | 45 | 0x00008E2C SQ_LDS_RESOURCE_MGMT |
46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
47 | 0x00009014 SX_MEMORY_EXPORT_SIZE | ||
47 | 0x00009100 SPI_CONFIG_CNTL | 48 | 0x00009100 SPI_CONFIG_CNTL |
48 | 0x0000913C SPI_CONFIG_CNTL_1 | 49 | 0x0000913C SPI_CONFIG_CNTL_1 |
49 | 0x00009508 TA_CNTL_AUX | 50 | 0x00009508 TA_CNTL_AUX |
@@ -442,7 +443,9 @@ evergreen 0x9400 | |||
442 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | 443 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X |
443 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | 444 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y |
444 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | 445 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z |
446 | 0x00028720 GDS_ADDR_BASE | ||
445 | 0x00028724 GDS_ADDR_SIZE | 447 | 0x00028724 GDS_ADDR_SIZE |
448 | 0x00028728 GDS_ORDERED_WAVE_PER_SE | ||
446 | 0x00028780 CB_BLEND0_CONTROL | 449 | 0x00028780 CB_BLEND0_CONTROL |
447 | 0x00028784 CB_BLEND1_CONTROL | 450 | 0x00028784 CB_BLEND1_CONTROL |
448 | 0x00028788 CB_BLEND2_CONTROL | 451 | 0x00028788 CB_BLEND2_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index ea49752ee99..0380c5c15f8 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -429,6 +429,7 @@ r600 0x9400 | |||
429 | 0x00028438 SX_ALPHA_REF | 429 | 0x00028438 SX_ALPHA_REF |
430 | 0x00028410 SX_ALPHA_TEST_CONTROL | 430 | 0x00028410 SX_ALPHA_TEST_CONTROL |
431 | 0x00028350 SX_MISC | 431 | 0x00028350 SX_MISC |
432 | 0x00009014 SX_MEMORY_EXPORT_SIZE | ||
432 | 0x00009604 TC_INVALIDATE | 433 | 0x00009604 TC_INVALIDATE |
433 | 0x00009400 TD_FILTER4 | 434 | 0x00009400 TD_FILTER4 |
434 | 0x00009404 TD_FILTER4_1 | 435 | 0x00009404 TD_FILTER4_1 |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 1f5850e473c..d9b0bc4547e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
62 | { | 62 | { |
63 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | 63 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
64 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | 64 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); |
65 | int i; | ||
65 | 66 | ||
66 | /* Lock the graphics update lock */ | 67 | /* Lock the graphics update lock */ |
67 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | 68 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; |
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
74 | (u32)crtc_base); | 75 | (u32)crtc_base); |
75 | 76 | ||
76 | /* Wait for update_pending to go high. */ | 77 | /* Wait for update_pending to go high. */ |
77 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | 78 | for (i = 0; i < rdev->usec_timeout; i++) { |
79 | if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) | ||
80 | break; | ||
81 | udelay(1); | ||
82 | } | ||
78 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | 83 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
79 | 84 | ||
80 | /* Unlock the lock, so double-buffering can take place inside vblank */ | 85 | /* Unlock the lock, so double-buffering can take place inside vblank */ |
@@ -287,6 +292,7 @@ void rs600_hpd_init(struct radeon_device *rdev) | |||
287 | default: | 292 | default: |
288 | break; | 293 | break; |
289 | } | 294 | } |
295 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
290 | } | 296 | } |
291 | if (rdev->irq.installed) | 297 | if (rdev->irq.installed) |
292 | rs600_irq_set(rdev); | 298 | rs600_irq_set(rdev); |
@@ -318,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev) | |||
318 | 324 | ||
319 | void rs600_bm_disable(struct radeon_device *rdev) | 325 | void rs600_bm_disable(struct radeon_device *rdev) |
320 | { | 326 | { |
321 | u32 tmp; | 327 | u16 tmp; |
322 | 328 | ||
323 | /* disable bus mastering */ | 329 | /* disable bus mastering */ |
324 | pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); | 330 | pci_read_config_word(rdev->pdev, 0x4, &tmp); |
325 | pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); | 331 | pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); |
326 | mdelay(1); | 332 | mdelay(1); |
327 | } | 333 | } |
@@ -530,7 +536,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
530 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | 536 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
531 | addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; | 537 | addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; |
532 | addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; | 538 | addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; |
533 | writeq(addr, ((void __iomem *)ptr) + (i * 8)); | 539 | writeq(addr, ptr + (i * 8)); |
534 | return 0; | 540 | return 0; |
535 | } | 541 | } |
536 | 542 | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4de51891aa6..ddc206a1f61 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
47 | { | 47 | { |
48 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | 48 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
49 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | 49 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); |
50 | int i; | ||
50 | 51 | ||
51 | /* Lock the graphics update lock */ | 52 | /* Lock the graphics update lock */ |
52 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | 53 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; |
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
66 | (u32)crtc_base); | 67 | (u32)crtc_base); |
67 | 68 | ||
68 | /* Wait for update_pending to go high. */ | 69 | /* Wait for update_pending to go high. */ |
69 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | 70 | for (i = 0; i < rdev->usec_timeout; i++) { |
71 | if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) | ||
72 | break; | ||
73 | udelay(1); | ||
74 | } | ||
70 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | 75 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
71 | 76 | ||
72 | /* Unlock the lock, so double-buffering can take place inside vblank */ | 77 | /* Unlock the lock, so double-buffering can take place inside vblank */ |
@@ -536,55 +541,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
536 | return backend_map; | 541 | return backend_map; |
537 | } | 542 | } |
538 | 543 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
588 | static void rv770_gpu_init(struct radeon_device *rdev) | 544 | static void rv770_gpu_init(struct radeon_device *rdev) |
589 | { | 545 | { |
590 | int i, j, num_qd_pipes; | 546 | int i, j, num_qd_pipes; |
@@ -778,14 +734,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
778 | (cc_rb_backend_disable >> 16)); | 734 | (cc_rb_backend_disable >> 16)); |
779 | 735 | ||
780 | rdev->config.rv770.tile_config = gb_tiling_config; | 736 | rdev->config.rv770.tile_config = gb_tiling_config; |
737 | rdev->config.rv770.backend_map = backend_map; | ||
781 | gb_tiling_config |= BACKEND_MAP(backend_map); | 738 | gb_tiling_config |= BACKEND_MAP(backend_map); |
782 | 739 | ||
783 | WREG32(GB_TILING_CONFIG, gb_tiling_config); | 740 | WREG32(GB_TILING_CONFIG, gb_tiling_config); |
784 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 741 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
785 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 742 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
786 | 743 | ||
787 | rv770_program_channel_remap(rdev); | ||
788 | |||
789 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 744 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
790 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 745 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
791 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 746 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h index ef940bad63f..194303c177a 100644 --- a/drivers/gpu/drm/sis/sis_drv.h +++ b/drivers/gpu/drm/sis/sis_drv.h | |||
@@ -48,8 +48,8 @@ enum sis_family { | |||
48 | 48 | ||
49 | 49 | ||
50 | #define SIS_BASE (dev_priv->mmio) | 50 | #define SIS_BASE (dev_priv->mmio) |
51 | #define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); | 51 | #define SIS_READ(reg) DRM_READ32(SIS_BASE, reg) |
52 | #define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val); | 52 | #define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val) |
53 | 53 | ||
54 | typedef struct drm_sis_private { | 54 | typedef struct drm_sis_private { |
55 | drm_local_map_t *mmio; | 55 | drm_local_map_t *mmio; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2e618b5ac46..ef06194c5aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/file.h> | 38 | #include <linux/file.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/atomic.h> | 40 | #include <linux/atomic.h> |
41 | 41 | ||
42 | #define TTM_ASSERT_LOCKED(param) | 42 | #define TTM_ASSERT_LOCKED(param) |
43 | #define TTM_DEBUG(fmt, arg...) | 43 | #define TTM_DEBUG(fmt, arg...) |
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
353 | 353 | ||
354 | ret = ttm_tt_set_user(bo->ttm, current, | 354 | ret = ttm_tt_set_user(bo->ttm, current, |
355 | bo->buffer_start, bo->num_pages); | 355 | bo->buffer_start, bo->num_pages); |
356 | if (unlikely(ret != 0)) | 356 | if (unlikely(ret != 0)) { |
357 | ttm_tt_destroy(bo->ttm); | 357 | ttm_tt_destroy(bo->ttm); |
358 | bo->ttm = NULL; | ||
359 | } | ||
358 | break; | 360 | break; |
359 | default: | 361 | default: |
360 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | 362 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); |
@@ -390,10 +392,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
390 | * Create and bind a ttm if required. | 392 | * Create and bind a ttm if required. |
391 | */ | 393 | */ |
392 | 394 | ||
393 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { | 395 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
394 | ret = ttm_bo_add_ttm(bo, false); | 396 | if (bo->ttm == NULL) { |
395 | if (ret) | 397 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
396 | goto out_err; | 398 | ret = ttm_bo_add_ttm(bo, zero); |
399 | if (ret) | ||
400 | goto out_err; | ||
401 | } | ||
397 | 402 | ||
398 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); | 403 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
399 | if (ret) | 404 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 77dbf408c0d..082fcaea583 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
322 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
323 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
324 | struct ttm_mem_reg old_copy; | 324 | struct ttm_mem_reg old_copy = *old_mem; |
325 | void *old_iomap; | 325 | void *old_iomap; |
326 | void *new_iomap; | 326 | void *new_iomap; |
327 | int ret; | 327 | int ret; |
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
635 | if (ret) | 635 | if (ret) |
636 | return ret; | 636 | return ret; |
637 | 637 | ||
638 | ttm_bo_free_old_node(bo); | ||
639 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 638 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
640 | (bo->ttm != NULL)) { | 639 | (bo->ttm != NULL)) { |
641 | ttm_tt_unbind(bo->ttm); | 640 | ttm_tt_unbind(bo->ttm); |
642 | ttm_tt_destroy(bo->ttm); | 641 | ttm_tt_destroy(bo->ttm); |
643 | bo->ttm = NULL; | 642 | bo->ttm = NULL; |
644 | } | 643 | } |
644 | ttm_bo_free_old_node(bo); | ||
645 | } else { | 645 | } else { |
646 | /** | 646 | /** |
647 | * This should help pipeline ordinary buffer moves. | 647 | * This should help pipeline ordinary buffer moves. |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index de41e55a944..075daf44bce 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #include "ttm/ttm_lock.h" | 31 | #include "ttm/ttm_lock.h" |
32 | #include "ttm/ttm_module.h" | 32 | #include "ttm/ttm_module.h" |
33 | #include <asm/atomic.h> | 33 | #include <linux/atomic.h> |
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/wait.h> | 35 | #include <linux/wait.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index ebddd443d91..93577f2e295 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <linux/spinlock.h> | 55 | #include <linux/spinlock.h> |
56 | #include <linux/slab.h> | 56 | #include <linux/slab.h> |
57 | #include <linux/module.h> | 57 | #include <linux/module.h> |
58 | #include <asm/atomic.h> | 58 | #include <linux/atomic.h> |
59 | 59 | ||
60 | struct ttm_object_file { | 60 | struct ttm_object_file { |
61 | struct ttm_object_device *tdev; | 61 | struct ttm_object_device *tdev; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index d948575717b..727e93daac3 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
42 | 42 | ||
43 | #include <asm/atomic.h> | 43 | #include <linux/atomic.h> |
44 | 44 | ||
45 | #include "ttm/ttm_bo_driver.h" | 45 | #include "ttm/ttm_bo_driver.h" |
46 | #include "ttm/ttm_page_alloc.h" | 46 | #include "ttm/ttm_page_alloc.h" |
@@ -355,7 +355,7 @@ restart: | |||
355 | if (nr_free) | 355 | if (nr_free) |
356 | goto restart; | 356 | goto restart; |
357 | 357 | ||
358 | /* Not allowed to fall tough or break because | 358 | /* Not allowed to fall through or break because |
359 | * following context is inside spinlock while we are | 359 | * following context is inside spinlock while we are |
360 | * outside here. | 360 | * outside here. |
361 | */ | 361 | */ |
@@ -556,7 +556,7 @@ out: | |||
556 | } | 556 | } |
557 | 557 | ||
558 | /** | 558 | /** |
559 | * Fill the given pool if there isn't enough pages and requested number of | 559 | * Fill the given pool if there aren't enough pages and the requested number of |
560 | * pages is small. | 560 | * pages is small. |
561 | */ | 561 | */ |
562 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | 562 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, |
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
576 | 576 | ||
577 | pool->fill_lock = true; | 577 | pool->fill_lock = true; |
578 | 578 | ||
579 | /* If allocation request is small and there is not enough | 579 | /* If allocation request is small and there are not enough |
580 | * pages in pool we fill the pool first */ | 580 | * pages in a pool we fill the pool up first. */ |
581 | if (count < _manager->options.small | 581 | if (count < _manager->options.small |
582 | && count > pool->npages) { | 582 | && count > pool->npages) { |
583 | struct list_head new_pages; | 583 | struct list_head new_pages; |
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
614 | } | 614 | } |
615 | 615 | ||
616 | /** | 616 | /** |
617 | * Cut count nubmer of pages from the pool and put them to return list | 617 | * Cut 'count' number of pages from the pool and put them on the return list. |
618 | * | 618 | * |
619 | * @return count of pages still to allocate to fill the request. | 619 | * @return count of pages still required to fulfill the request. |
620 | */ | 620 | */ |
621 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | 621 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
622 | struct list_head *pages, int ttm_flags, | 622 | struct list_head *pages, int ttm_flags, |
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | |||
637 | goto out; | 637 | goto out; |
638 | } | 638 | } |
639 | /* find the last pages to include for requested number of pages. Split | 639 | /* find the last pages to include for requested number of pages. Split |
640 | * pool to begin and halves to reduce search space. */ | 640 | * pool to begin and halve it to reduce search space. */ |
641 | if (count <= pool->npages/2) { | 641 | if (count <= pool->npages/2) { |
642 | i = 0; | 642 | i = 0; |
643 | list_for_each(p, &pool->list) { | 643 | list_for_each(p, &pool->list) { |
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | |||
651 | break; | 651 | break; |
652 | } | 652 | } |
653 | } | 653 | } |
654 | /* Cut count number of pages from pool */ | 654 | /* Cut 'count' number of pages from the pool */ |
655 | list_cut_position(pages, &pool->list, p); | 655 | list_cut_position(pages, &pool->list, p); |
656 | pool->npages -= count; | 656 | pool->npages -= count; |
657 | count = 0; | 657 | count = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index dfe32e62bd9..8a38c91f4c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -313,7 +313,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | |||
313 | unsigned int *handle) | 313 | unsigned int *handle) |
314 | { | 314 | { |
315 | if (handle) | 315 | if (handle) |
316 | handle = 0; | 316 | *handle = 0; |
317 | 317 | ||
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index f1a52f9e729..07ce02da78a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -585,11 +585,10 @@ int vmw_overlay_init(struct vmw_private *dev_priv) | |||
585 | return -ENOSYS; | 585 | return -ENOSYS; |
586 | } | 586 | } |
587 | 587 | ||
588 | overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); | 588 | overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); |
589 | if (!overlay) | 589 | if (!overlay) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||
592 | memset(overlay, 0, sizeof(*overlay)); | ||
593 | mutex_init(&overlay->mutex); | 592 | mutex_init(&overlay->mutex); |
594 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | 593 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { |
595 | overlay->stream[i].buf = NULL; | 594 | overlay->stream[i].buf = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5408b1b7996..bfe1bcce7f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -612,11 +612,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
612 | srf->sizes[0].height == 64 && | 612 | srf->sizes[0].height == 64 && |
613 | srf->format == SVGA3D_A8R8G8B8) { | 613 | srf->format == SVGA3D_A8R8G8B8) { |
614 | 614 | ||
615 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | 615 | /* allocate image area and clear it */ |
616 | /* clear the image */ | 616 | srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); |
617 | if (srf->snooper.image) { | 617 | if (!srf->snooper.image) { |
618 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
619 | } else { | ||
620 | DRM_ERROR("Failed to allocate cursor_image\n"); | 618 | DRM_ERROR("Failed to allocate cursor_image\n"); |
621 | ret = -ENOMEM; | 619 | ret = -ENOMEM; |
622 | goto out_err1; | 620 | goto out_err1; |
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 00000000000..9a8cbdd9836 --- /dev/null +++ b/drivers/gpu/ion/Kconfig | |||
@@ -0,0 +1,17 @@ | |||
1 | menuconfig ION | ||
2 | tristate "Ion Memory Manager" | ||
3 | select GENERIC_ALLOCATOR | ||
4 | help | ||
5 | Chose this option to enable the ION Memory Manager. | ||
6 | |||
7 | config ION_IOMMU | ||
8 | bool | ||
9 | |||
10 | config ION_TEGRA | ||
11 | tristate "Ion for Tegra" | ||
12 | depends on ARCH_TEGRA && ION | ||
13 | select TEGRA_IOMMU_SMMU if !ARCH_TEGRA_2x_SOC | ||
14 | select ION_IOMMU if TEGRA_IOMMU_SMMU | ||
15 | help | ||
16 | Choose this option if you wish to use ion on an nVidia Tegra. | ||
17 | |||
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 00000000000..4ddc78e9d41 --- /dev/null +++ b/drivers/gpu/ion/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o | ||
2 | obj-$(CONFIG_ION_IOMMU) += ion_iommu_heap.o | ||
3 | obj-$(CONFIG_ION_TEGRA) += tegra/ | ||
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 00000000000..512ebc5cc8e --- /dev/null +++ b/drivers/gpu/ion/ion.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
18 | |||
19 | #include <linux/device.h> | ||
20 | #include <linux/file.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/anon_inodes.h> | ||
23 | #include <linux/ion.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/mm_types.h> | ||
28 | #include <linux/rbtree.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | |||
35 | #include "ion_priv.h" | ||
36 | #define DEBUG | ||
37 | |||
38 | /* this function should only be called while dev->lock is held */ | ||
39 | static void ion_buffer_add(struct ion_device *dev, | ||
40 | struct ion_buffer *buffer) | ||
41 | { | ||
42 | struct rb_node **p = &dev->buffers.rb_node; | ||
43 | struct rb_node *parent = NULL; | ||
44 | struct ion_buffer *entry; | ||
45 | |||
46 | while (*p) { | ||
47 | parent = *p; | ||
48 | entry = rb_entry(parent, struct ion_buffer, node); | ||
49 | |||
50 | if (buffer < entry) { | ||
51 | p = &(*p)->rb_left; | ||
52 | } else if (buffer > entry) { | ||
53 | p = &(*p)->rb_right; | ||
54 | } else { | ||
55 | pr_err("buffer already found."); | ||
56 | BUG(); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | rb_link_node(&buffer->node, parent, p); | ||
61 | rb_insert_color(&buffer->node, &dev->buffers); | ||
62 | } | ||
63 | |||
64 | /* this function should only be called while dev->lock is held */ | ||
65 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | ||
66 | struct ion_device *dev, | ||
67 | unsigned long len, | ||
68 | unsigned long align, | ||
69 | unsigned long flags) | ||
70 | { | ||
71 | struct ion_buffer *buffer; | ||
72 | int ret; | ||
73 | |||
74 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | ||
75 | if (!buffer) | ||
76 | return ERR_PTR(-ENOMEM); | ||
77 | |||
78 | buffer->heap = heap; | ||
79 | kref_init(&buffer->ref); | ||
80 | |||
81 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | ||
82 | if (ret) { | ||
83 | kfree(buffer); | ||
84 | return ERR_PTR(ret); | ||
85 | } | ||
86 | buffer->dev = dev; | ||
87 | buffer->size = len; | ||
88 | mutex_init(&buffer->lock); | ||
89 | ion_buffer_add(dev, buffer); | ||
90 | return buffer; | ||
91 | } | ||
92 | |||
93 | static void ion_buffer_destroy(struct kref *kref) | ||
94 | { | ||
95 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | ||
96 | struct ion_device *dev = buffer->dev; | ||
97 | |||
98 | buffer->heap->ops->free(buffer); | ||
99 | mutex_lock(&dev->lock); | ||
100 | rb_erase(&buffer->node, &dev->buffers); | ||
101 | mutex_unlock(&dev->lock); | ||
102 | kfree(buffer); | ||
103 | } | ||
104 | |||
105 | void ion_buffer_get(struct ion_buffer *buffer) | ||
106 | { | ||
107 | kref_get(&buffer->ref); | ||
108 | } | ||
109 | |||
110 | static int ion_buffer_put(struct ion_buffer *buffer) | ||
111 | { | ||
112 | return kref_put(&buffer->ref, ion_buffer_destroy); | ||
113 | } | ||
114 | |||
115 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
116 | struct ion_buffer *buffer) | ||
117 | { | ||
118 | struct ion_handle *handle; | ||
119 | |||
120 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | ||
121 | if (!handle) | ||
122 | return ERR_PTR(-ENOMEM); | ||
123 | kref_init(&handle->ref); | ||
124 | rb_init_node(&handle->node); | ||
125 | handle->client = client; | ||
126 | ion_buffer_get(buffer); | ||
127 | handle->buffer = buffer; | ||
128 | |||
129 | return handle; | ||
130 | } | ||
131 | |||
132 | static void ion_handle_destroy(struct kref *kref) | ||
133 | { | ||
134 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | ||
135 | /* XXX Can a handle be destroyed while it's map count is non-zero?: | ||
136 | if (handle->map_cnt) unmap | ||
137 | */ | ||
138 | ion_buffer_put(handle->buffer); | ||
139 | mutex_lock(&handle->client->lock); | ||
140 | if (!RB_EMPTY_NODE(&handle->node)) | ||
141 | rb_erase(&handle->node, &handle->client->handles); | ||
142 | mutex_unlock(&handle->client->lock); | ||
143 | kfree(handle); | ||
144 | } | ||
145 | |||
146 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | ||
147 | { | ||
148 | return handle->buffer; | ||
149 | } | ||
150 | |||
151 | void ion_handle_get(struct ion_handle *handle) | ||
152 | { | ||
153 | kref_get(&handle->ref); | ||
154 | } | ||
155 | |||
156 | int ion_handle_put(struct ion_handle *handle) | ||
157 | { | ||
158 | return kref_put(&handle->ref, ion_handle_destroy); | ||
159 | } | ||
160 | |||
161 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | ||
162 | struct ion_buffer *buffer) | ||
163 | { | ||
164 | struct rb_node *n; | ||
165 | |||
166 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
167 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
168 | node); | ||
169 | if (handle->buffer == buffer) | ||
170 | return handle; | ||
171 | } | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | ||
176 | { | ||
177 | struct rb_node *n = client->handles.rb_node; | ||
178 | |||
179 | while (n) { | ||
180 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | ||
181 | node); | ||
182 | if (handle < handle_node) | ||
183 | n = n->rb_left; | ||
184 | else if (handle > handle_node) | ||
185 | n = n->rb_right; | ||
186 | else | ||
187 | return true; | ||
188 | } | ||
189 | WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, | ||
190 | current->group_leader->comm); | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | ||
195 | { | ||
196 | struct rb_node **p = &client->handles.rb_node; | ||
197 | struct rb_node *parent = NULL; | ||
198 | struct ion_handle *entry; | ||
199 | |||
200 | while (*p) { | ||
201 | parent = *p; | ||
202 | entry = rb_entry(parent, struct ion_handle, node); | ||
203 | |||
204 | if (handle < entry) | ||
205 | p = &(*p)->rb_left; | ||
206 | else if (handle > entry) | ||
207 | p = &(*p)->rb_right; | ||
208 | else | ||
209 | WARN(1, "%s: buffer already found.", __func__); | ||
210 | } | ||
211 | |||
212 | rb_link_node(&handle->node, parent, p); | ||
213 | rb_insert_color(&handle->node, &client->handles); | ||
214 | } | ||
215 | |||
216 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | ||
217 | size_t align, unsigned int flags) | ||
218 | { | ||
219 | struct rb_node *n; | ||
220 | struct ion_handle *handle; | ||
221 | struct ion_device *dev = client->dev; | ||
222 | struct ion_buffer *buffer = NULL; | ||
223 | |||
224 | /* | ||
225 | * traverse the list of heaps available in this system in priority | ||
226 | * order. If the heap type is supported by the client, and matches the | ||
227 | * request of the caller allocate from it. Repeat until allocate has | ||
228 | * succeeded or all heaps have been tried | ||
229 | */ | ||
230 | mutex_lock(&dev->lock); | ||
231 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | ||
232 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | ||
233 | /* if the client doesn't support this heap type */ | ||
234 | if (!((1 << heap->type) & client->heap_mask)) | ||
235 | continue; | ||
236 | /* if the caller didn't specify this heap type */ | ||
237 | if (!((1 << heap->id) & flags)) | ||
238 | continue; | ||
239 | buffer = ion_buffer_create(heap, dev, len, align, flags); | ||
240 | if (!IS_ERR_OR_NULL(buffer)) | ||
241 | break; | ||
242 | } | ||
243 | mutex_unlock(&dev->lock); | ||
244 | |||
245 | if (IS_ERR_OR_NULL(buffer)) | ||
246 | return ERR_PTR(PTR_ERR(buffer)); | ||
247 | |||
248 | handle = ion_handle_create(client, buffer); | ||
249 | |||
250 | if (IS_ERR_OR_NULL(handle)) | ||
251 | goto end; | ||
252 | |||
253 | /* | ||
254 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | ||
255 | * and ion_handle_create will take a second reference, drop one here | ||
256 | */ | ||
257 | ion_buffer_put(buffer); | ||
258 | |||
259 | mutex_lock(&client->lock); | ||
260 | ion_handle_add(client, handle); | ||
261 | mutex_unlock(&client->lock); | ||
262 | return handle; | ||
263 | |||
264 | end: | ||
265 | ion_buffer_put(buffer); | ||
266 | return handle; | ||
267 | } | ||
268 | |||
269 | void ion_free(struct ion_client *client, struct ion_handle *handle) | ||
270 | { | ||
271 | bool valid_handle; | ||
272 | |||
273 | BUG_ON(client != handle->client); | ||
274 | |||
275 | mutex_lock(&client->lock); | ||
276 | valid_handle = ion_handle_validate(client, handle); | ||
277 | mutex_unlock(&client->lock); | ||
278 | |||
279 | if (!valid_handle) { | ||
280 | WARN(1, "%s: invalid handle passed to free.\n", __func__); | ||
281 | return; | ||
282 | } | ||
283 | ion_handle_put(handle); | ||
284 | } | ||
285 | |||
286 | static bool _ion_map(int *buffer_cnt, int *handle_cnt) | ||
287 | { | ||
288 | bool map; | ||
289 | |||
290 | BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); | ||
291 | |||
292 | if (*buffer_cnt) | ||
293 | map = false; | ||
294 | else | ||
295 | map = true; | ||
296 | if (*handle_cnt == 0) | ||
297 | (*buffer_cnt)++; | ||
298 | (*handle_cnt)++; | ||
299 | return map; | ||
300 | } | ||
301 | |||
302 | static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) | ||
303 | { | ||
304 | BUG_ON(*handle_cnt == 0); | ||
305 | (*handle_cnt)--; | ||
306 | if (*handle_cnt != 0) | ||
307 | return false; | ||
308 | BUG_ON(*buffer_cnt == 0); | ||
309 | (*buffer_cnt)--; | ||
310 | if (*buffer_cnt == 0) | ||
311 | return true; | ||
312 | return false; | ||
313 | } | ||
314 | |||
315 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
316 | ion_phys_addr_t *addr, size_t *len) | ||
317 | { | ||
318 | struct ion_buffer *buffer; | ||
319 | int ret; | ||
320 | |||
321 | mutex_lock(&client->lock); | ||
322 | if (!ion_handle_validate(client, handle)) { | ||
323 | mutex_unlock(&client->lock); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | |||
327 | buffer = handle->buffer; | ||
328 | |||
329 | if (!buffer->heap->ops->phys) { | ||
330 | pr_err("ion_phys is not implemented by this heap.\n"); | ||
331 | mutex_unlock(&client->lock); | ||
332 | return -ENODEV; | ||
333 | } | ||
334 | mutex_unlock(&client->lock); | ||
335 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) | ||
340 | { | ||
341 | struct ion_buffer *buffer; | ||
342 | void *vaddr; | ||
343 | |||
344 | mutex_lock(&client->lock); | ||
345 | if (!ion_handle_validate(client, handle)) { | ||
346 | WARN(1, "invalid handle passed to map_kernel.\n"); | ||
347 | mutex_unlock(&client->lock); | ||
348 | return ERR_PTR(-EINVAL); | ||
349 | } | ||
350 | |||
351 | buffer = handle->buffer; | ||
352 | mutex_lock(&buffer->lock); | ||
353 | |||
354 | if (!handle->buffer->heap->ops->map_kernel) { | ||
355 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
356 | mutex_unlock(&buffer->lock); | ||
357 | mutex_unlock(&client->lock); | ||
358 | return ERR_PTR(-ENODEV); | ||
359 | } | ||
360 | |||
361 | if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
362 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | ||
363 | if (IS_ERR_OR_NULL(vaddr)) | ||
364 | _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); | ||
365 | buffer->vaddr = vaddr; | ||
366 | } else { | ||
367 | vaddr = buffer->vaddr; | ||
368 | } | ||
369 | mutex_unlock(&buffer->lock); | ||
370 | mutex_unlock(&client->lock); | ||
371 | return vaddr; | ||
372 | } | ||
373 | |||
374 | struct scatterlist *ion_map_dma(struct ion_client *client, | ||
375 | struct ion_handle *handle) | ||
376 | { | ||
377 | struct ion_buffer *buffer; | ||
378 | struct scatterlist *sglist; | ||
379 | |||
380 | mutex_lock(&client->lock); | ||
381 | if (!ion_handle_validate(client, handle)) { | ||
382 | WARN(1, "invalid handle passed to map_dma.\n"); | ||
383 | mutex_unlock(&client->lock); | ||
384 | return ERR_PTR(-EINVAL); | ||
385 | } | ||
386 | buffer = handle->buffer; | ||
387 | mutex_lock(&buffer->lock); | ||
388 | |||
389 | if (!handle->buffer->heap->ops->map_dma) { | ||
390 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
391 | mutex_unlock(&buffer->lock); | ||
392 | mutex_unlock(&client->lock); | ||
393 | return ERR_PTR(-ENODEV); | ||
394 | } | ||
395 | if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
396 | sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); | ||
397 | if (IS_ERR_OR_NULL(sglist)) | ||
398 | _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); | ||
399 | buffer->sglist = sglist; | ||
400 | } else { | ||
401 | sglist = buffer->sglist; | ||
402 | } | ||
403 | mutex_unlock(&buffer->lock); | ||
404 | mutex_unlock(&client->lock); | ||
405 | return sglist; | ||
406 | } | ||
407 | |||
408 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
409 | struct ion_buffer *buf, | ||
410 | unsigned long addr); | ||
411 | int ion_remap_dma(struct ion_client *client, | ||
412 | struct ion_handle *handle, | ||
413 | unsigned long addr) | ||
414 | { | ||
415 | struct ion_buffer *buffer; | ||
416 | int ret; | ||
417 | |||
418 | mutex_lock(&client->lock); | ||
419 | if (!ion_handle_validate(client, handle)) { | ||
420 | pr_err("invalid handle passed to map_dma.\n"); | ||
421 | mutex_unlock(&client->lock); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | buffer = handle->buffer; | ||
425 | mutex_lock(&buffer->lock); | ||
426 | |||
427 | ret = iommu_heap_remap_dma(buffer->heap, buffer, addr); | ||
428 | |||
429 | mutex_unlock(&buffer->lock); | ||
430 | mutex_unlock(&client->lock); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) | ||
435 | { | ||
436 | struct ion_buffer *buffer; | ||
437 | |||
438 | mutex_lock(&client->lock); | ||
439 | buffer = handle->buffer; | ||
440 | mutex_lock(&buffer->lock); | ||
441 | if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
442 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | ||
443 | buffer->vaddr = NULL; | ||
444 | } | ||
445 | mutex_unlock(&buffer->lock); | ||
446 | mutex_unlock(&client->lock); | ||
447 | } | ||
448 | |||
449 | void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) | ||
450 | { | ||
451 | struct ion_buffer *buffer; | ||
452 | |||
453 | mutex_lock(&client->lock); | ||
454 | buffer = handle->buffer; | ||
455 | mutex_lock(&buffer->lock); | ||
456 | if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
457 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | ||
458 | buffer->sglist = NULL; | ||
459 | } | ||
460 | mutex_unlock(&buffer->lock); | ||
461 | mutex_unlock(&client->lock); | ||
462 | } | ||
463 | |||
464 | |||
465 | struct ion_buffer *ion_share(struct ion_client *client, | ||
466 | struct ion_handle *handle) | ||
467 | { | ||
468 | bool valid_handle; | ||
469 | |||
470 | mutex_lock(&client->lock); | ||
471 | valid_handle = ion_handle_validate(client, handle); | ||
472 | mutex_unlock(&client->lock); | ||
473 | if (!valid_handle) { | ||
474 | WARN(1, "%s: invalid handle passed to share.\n", __func__); | ||
475 | return ERR_PTR(-EINVAL); | ||
476 | } | ||
477 | |||
478 | /* do not take an extra reference here, the burden is on the caller | ||
479 | * to make sure the buffer doesn't go away while it's passing it | ||
480 | * to another client -- ion_free should not be called on this handle | ||
481 | * until the buffer has been imported into the other client | ||
482 | */ | ||
483 | return handle->buffer; | ||
484 | } | ||
485 | |||
486 | struct ion_handle *ion_import(struct ion_client *client, | ||
487 | struct ion_buffer *buffer) | ||
488 | { | ||
489 | struct ion_handle *handle = NULL; | ||
490 | |||
491 | mutex_lock(&client->lock); | ||
492 | /* if a handle exists for this buffer just take a reference to it */ | ||
493 | handle = ion_handle_lookup(client, buffer); | ||
494 | if (!IS_ERR_OR_NULL(handle)) { | ||
495 | ion_handle_get(handle); | ||
496 | goto end; | ||
497 | } | ||
498 | handle = ion_handle_create(client, buffer); | ||
499 | if (IS_ERR_OR_NULL(handle)) { | ||
500 | pr_err("error during handle create\n"); | ||
501 | goto end; | ||
502 | } | ||
503 | ion_handle_add(client, handle); | ||
504 | end: | ||
505 | mutex_unlock(&client->lock); | ||
506 | return handle; | ||
507 | } | ||
508 | |||
509 | static const struct file_operations ion_share_fops; | ||
510 | |||
511 | struct ion_handle *ion_import_fd(struct ion_client *client, int fd) | ||
512 | { | ||
513 | struct file *file = fget(fd); | ||
514 | struct ion_handle *handle; | ||
515 | |||
516 | if (!file) { | ||
517 | pr_err("imported fd not found in file table.\n"); | ||
518 | return ERR_PTR(-EINVAL); | ||
519 | } | ||
520 | if (file->f_op != &ion_share_fops) { | ||
521 | pr_err("imported file is not a shared ion file.\n"); | ||
522 | handle = ERR_PTR(-EINVAL); | ||
523 | goto end; | ||
524 | } | ||
525 | handle = ion_import(client, file->private_data); | ||
526 | end: | ||
527 | fput(file); | ||
528 | return handle; | ||
529 | } | ||
530 | |||
531 | static int ion_debug_client_show(struct seq_file *s, void *unused) | ||
532 | { | ||
533 | struct ion_client *client = s->private; | ||
534 | struct rb_node *n; | ||
535 | size_t sizes[ION_NUM_HEAPS] = {0}; | ||
536 | const char *names[ION_NUM_HEAPS] = {0}; | ||
537 | int i; | ||
538 | |||
539 | mutex_lock(&client->lock); | ||
540 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
541 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
542 | node); | ||
543 | enum ion_heap_type type = handle->buffer->heap->type; | ||
544 | |||
545 | if (!names[type]) | ||
546 | names[type] = handle->buffer->heap->name; | ||
547 | sizes[type] += handle->buffer->size; | ||
548 | } | ||
549 | mutex_unlock(&client->lock); | ||
550 | |||
551 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | ||
552 | for (i = 0; i < ION_NUM_HEAPS; i++) { | ||
553 | if (!names[i]) | ||
554 | continue; | ||
555 | seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], | ||
556 | atomic_read(&client->ref.refcount)); | ||
557 | } | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static int ion_debug_client_open(struct inode *inode, struct file *file) | ||
562 | { | ||
563 | return single_open(file, ion_debug_client_show, inode->i_private); | ||
564 | } | ||
565 | |||
566 | static const struct file_operations debug_client_fops = { | ||
567 | .open = ion_debug_client_open, | ||
568 | .read = seq_read, | ||
569 | .llseek = seq_lseek, | ||
570 | .release = single_release, | ||
571 | }; | ||
572 | |||
573 | static struct ion_client *ion_client_lookup(struct ion_device *dev, | ||
574 | struct task_struct *task) | ||
575 | { | ||
576 | struct rb_node *n = dev->user_clients.rb_node; | ||
577 | struct ion_client *client; | ||
578 | |||
579 | mutex_lock(&dev->lock); | ||
580 | while (n) { | ||
581 | client = rb_entry(n, struct ion_client, node); | ||
582 | if (task == client->task) { | ||
583 | ion_client_get(client); | ||
584 | mutex_unlock(&dev->lock); | ||
585 | return client; | ||
586 | } else if (task < client->task) { | ||
587 | n = n->rb_left; | ||
588 | } else if (task > client->task) { | ||
589 | n = n->rb_right; | ||
590 | } | ||
591 | } | ||
592 | mutex_unlock(&dev->lock); | ||
593 | return NULL; | ||
594 | } | ||
595 | |||
596 | struct ion_client *ion_client_create(struct ion_device *dev, | ||
597 | unsigned int heap_mask, | ||
598 | const char *name) | ||
599 | { | ||
600 | struct ion_client *client; | ||
601 | struct task_struct *task; | ||
602 | struct rb_node **p; | ||
603 | struct rb_node *parent = NULL; | ||
604 | struct ion_client *entry; | ||
605 | char debug_name[64]; | ||
606 | pid_t pid; | ||
607 | |||
608 | get_task_struct(current->group_leader); | ||
609 | task_lock(current->group_leader); | ||
610 | pid = task_pid_nr(current->group_leader); | ||
611 | /* don't bother to store task struct for kernel threads, | ||
612 | they can't be killed anyway */ | ||
613 | if (current->group_leader->flags & PF_KTHREAD) { | ||
614 | put_task_struct(current->group_leader); | ||
615 | task = NULL; | ||
616 | } else { | ||
617 | task = current->group_leader; | ||
618 | } | ||
619 | task_unlock(current->group_leader); | ||
620 | |||
621 | /* if this isn't a kernel thread, see if a client already | ||
622 | exists */ | ||
623 | if (task) { | ||
624 | client = ion_client_lookup(dev, task); | ||
625 | if (!IS_ERR_OR_NULL(client)) { | ||
626 | put_task_struct(current->group_leader); | ||
627 | return client; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); | ||
632 | if (!client) { | ||
633 | put_task_struct(current->group_leader); | ||
634 | return ERR_PTR(-ENOMEM); | ||
635 | } | ||
636 | |||
637 | client->dev = dev; | ||
638 | client->handles = RB_ROOT; | ||
639 | mutex_init(&client->lock); | ||
640 | client->name = name; | ||
641 | client->heap_mask = heap_mask; | ||
642 | client->task = task; | ||
643 | client->pid = pid; | ||
644 | kref_init(&client->ref); | ||
645 | |||
646 | mutex_lock(&dev->lock); | ||
647 | if (task) { | ||
648 | p = &dev->user_clients.rb_node; | ||
649 | while (*p) { | ||
650 | parent = *p; | ||
651 | entry = rb_entry(parent, struct ion_client, node); | ||
652 | |||
653 | if (task < entry->task) | ||
654 | p = &(*p)->rb_left; | ||
655 | else if (task > entry->task) | ||
656 | p = &(*p)->rb_right; | ||
657 | } | ||
658 | rb_link_node(&client->node, parent, p); | ||
659 | rb_insert_color(&client->node, &dev->user_clients); | ||
660 | } else { | ||
661 | p = &dev->kernel_clients.rb_node; | ||
662 | while (*p) { | ||
663 | parent = *p; | ||
664 | entry = rb_entry(parent, struct ion_client, node); | ||
665 | |||
666 | if (client < entry) | ||
667 | p = &(*p)->rb_left; | ||
668 | else if (client > entry) | ||
669 | p = &(*p)->rb_right; | ||
670 | } | ||
671 | rb_link_node(&client->node, parent, p); | ||
672 | rb_insert_color(&client->node, &dev->kernel_clients); | ||
673 | } | ||
674 | |||
675 | snprintf(debug_name, 64, "%u", client->pid); | ||
676 | client->debug_root = debugfs_create_file(debug_name, 0664, | ||
677 | dev->debug_root, client, | ||
678 | &debug_client_fops); | ||
679 | mutex_unlock(&dev->lock); | ||
680 | |||
681 | return client; | ||
682 | } | ||
683 | |||
684 | static void _ion_client_destroy(struct kref *kref) | ||
685 | { | ||
686 | struct ion_client *client = container_of(kref, struct ion_client, ref); | ||
687 | struct ion_device *dev = client->dev; | ||
688 | struct rb_node *n; | ||
689 | |||
690 | pr_debug("\n"); | ||
691 | while ((n = rb_first(&client->handles))) { | ||
692 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
693 | node); | ||
694 | ion_handle_destroy(&handle->ref); | ||
695 | } | ||
696 | mutex_lock(&dev->lock); | ||
697 | if (client->task) { | ||
698 | rb_erase(&client->node, &dev->user_clients); | ||
699 | put_task_struct(client->task); | ||
700 | } else { | ||
701 | rb_erase(&client->node, &dev->kernel_clients); | ||
702 | } | ||
703 | debugfs_remove_recursive(client->debug_root); | ||
704 | mutex_unlock(&dev->lock); | ||
705 | |||
706 | kfree(client); | ||
707 | } | ||
708 | |||
709 | void ion_client_get(struct ion_client *client) | ||
710 | { | ||
711 | kref_get(&client->ref); | ||
712 | } | ||
713 | |||
714 | int ion_client_put(struct ion_client *client) | ||
715 | { | ||
716 | return kref_put(&client->ref, _ion_client_destroy); | ||
717 | } | ||
718 | |||
719 | void ion_client_destroy(struct ion_client *client) | ||
720 | { | ||
721 | ion_client_put(client); | ||
722 | } | ||
723 | |||
724 | static int ion_share_release(struct inode *inode, struct file* file) | ||
725 | { | ||
726 | struct ion_buffer *buffer = file->private_data; | ||
727 | |||
728 | pr_debug("\n"); | ||
729 | /* drop the reference to the buffer -- this prevents the | ||
730 | buffer from going away because the client holding it exited | ||
731 | while it was being passed */ | ||
732 | ion_buffer_put(buffer); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static void ion_vma_open(struct vm_area_struct *vma) | ||
737 | { | ||
738 | |||
739 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
740 | struct ion_handle *handle = vma->vm_private_data; | ||
741 | struct ion_client *client; | ||
742 | |||
743 | pr_debug("\n"); | ||
744 | /* check that the client still exists and take a reference so | ||
745 | it can't go away until this vma is closed */ | ||
746 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
747 | if (IS_ERR_OR_NULL(client)) { | ||
748 | vma->vm_private_data = NULL; | ||
749 | return; | ||
750 | } | ||
751 | ion_buffer_get(buffer); | ||
752 | ion_handle_get(handle); | ||
753 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
754 | atomic_read(&client->ref.refcount), | ||
755 | atomic_read(&handle->ref.refcount), | ||
756 | atomic_read(&buffer->ref.refcount)); | ||
757 | } | ||
758 | |||
759 | static void ion_vma_close(struct vm_area_struct *vma) | ||
760 | { | ||
761 | struct ion_handle *handle = vma->vm_private_data; | ||
762 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
763 | struct ion_client *client; | ||
764 | |||
765 | pr_debug("\n"); | ||
766 | /* this indicates the client is gone, nothing to do here */ | ||
767 | if (!handle) | ||
768 | return; | ||
769 | client = handle->client; | ||
770 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
771 | atomic_read(&client->ref.refcount), | ||
772 | atomic_read(&handle->ref.refcount), | ||
773 | atomic_read(&buffer->ref.refcount)); | ||
774 | ion_handle_put(handle); | ||
775 | ion_client_put(client); | ||
776 | ion_buffer_put(buffer); | ||
777 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
778 | atomic_read(&client->ref.refcount), | ||
779 | atomic_read(&handle->ref.refcount), | ||
780 | atomic_read(&buffer->ref.refcount)); | ||
781 | } | ||
782 | |||
783 | static struct vm_operations_struct ion_vm_ops = { | ||
784 | .open = ion_vma_open, | ||
785 | .close = ion_vma_close, | ||
786 | }; | ||
787 | |||
788 | static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) | ||
789 | { | ||
790 | struct ion_buffer *buffer = file->private_data; | ||
791 | unsigned long size = vma->vm_end - vma->vm_start; | ||
792 | struct ion_client *client; | ||
793 | struct ion_handle *handle; | ||
794 | int ret; | ||
795 | |||
796 | pr_debug("\n"); | ||
797 | /* make sure the client still exists, it's possible for the client to | ||
798 | have gone away but the map/share fd still to be around, take | ||
799 | a reference to it so it can't go away while this mapping exists */ | ||
800 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
801 | if (IS_ERR_OR_NULL(client)) { | ||
802 | WARN(1, "trying to mmap an ion handle in a process with no " | ||
803 | "ion client\n"); | ||
804 | return -EINVAL; | ||
805 | } | ||
806 | |||
807 | if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > | ||
808 | buffer->size)) { | ||
809 | WARN(1, "trying to map larger area than handle has available" | ||
810 | "\n"); | ||
811 | ret = -EINVAL; | ||
812 | goto err; | ||
813 | } | ||
814 | |||
815 | /* find the handle and take a reference to it */ | ||
816 | handle = ion_import(client, buffer); | ||
817 | if (IS_ERR_OR_NULL(handle)) { | ||
818 | ret = -EINVAL; | ||
819 | goto err; | ||
820 | } | ||
821 | ion_buffer_get(buffer); | ||
822 | |||
823 | if (!handle->buffer->heap->ops->map_user) { | ||
824 | pr_err("this heap does not define a method for mapping " | ||
825 | "to userspace\n"); | ||
826 | ret = -EINVAL; | ||
827 | goto err1; | ||
828 | } | ||
829 | |||
830 | mutex_lock(&buffer->lock); | ||
831 | /* now map it to userspace */ | ||
832 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | ||
833 | mutex_unlock(&buffer->lock); | ||
834 | if (ret) { | ||
835 | pr_err("failure mapping buffer to userspace\n"); | ||
836 | goto err1; | ||
837 | } | ||
838 | |||
839 | vma->vm_ops = &ion_vm_ops; | ||
840 | /* move the handle into the vm_private_data so we can access it from | ||
841 | vma_open/close */ | ||
842 | vma->vm_private_data = handle; | ||
843 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
844 | atomic_read(&client->ref.refcount), | ||
845 | atomic_read(&handle->ref.refcount), | ||
846 | atomic_read(&buffer->ref.refcount)); | ||
847 | return 0; | ||
848 | |||
849 | err1: | ||
850 | /* drop the reference to the handle */ | ||
851 | ion_handle_put(handle); | ||
852 | err: | ||
853 | /* drop the reference to the client */ | ||
854 | ion_client_put(client); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | static const struct file_operations ion_share_fops = { | ||
859 | .owner = THIS_MODULE, | ||
860 | .release = ion_share_release, | ||
861 | .mmap = ion_share_mmap, | ||
862 | }; | ||
863 | |||
864 | static int ion_ioctl_share(struct file *parent, struct ion_client *client, | ||
865 | struct ion_handle *handle) | ||
866 | { | ||
867 | int fd = get_unused_fd(); | ||
868 | struct file *file; | ||
869 | |||
870 | if (fd < 0) | ||
871 | return -ENFILE; | ||
872 | |||
873 | file = anon_inode_getfile("ion_share_fd", &ion_share_fops, | ||
874 | handle->buffer, O_RDWR); | ||
875 | if (IS_ERR_OR_NULL(file)) | ||
876 | goto err; | ||
877 | ion_buffer_get(handle->buffer); | ||
878 | fd_install(fd, file); | ||
879 | |||
880 | return fd; | ||
881 | |||
882 | err: | ||
883 | put_unused_fd(fd); | ||
884 | return -ENFILE; | ||
885 | } | ||
886 | |||
887 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
888 | { | ||
889 | struct ion_client *client = filp->private_data; | ||
890 | |||
891 | switch (cmd) { | ||
892 | case ION_IOC_ALLOC: | ||
893 | { | ||
894 | struct ion_allocation_data data; | ||
895 | |||
896 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
897 | return -EFAULT; | ||
898 | data.handle = ion_alloc(client, data.len, data.align, | ||
899 | data.flags); | ||
900 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
901 | return -EFAULT; | ||
902 | break; | ||
903 | } | ||
904 | case ION_IOC_FREE: | ||
905 | { | ||
906 | struct ion_handle_data data; | ||
907 | bool valid; | ||
908 | |||
909 | if (copy_from_user(&data, (void __user *)arg, | ||
910 | sizeof(struct ion_handle_data))) | ||
911 | return -EFAULT; | ||
912 | mutex_lock(&client->lock); | ||
913 | valid = ion_handle_validate(client, data.handle); | ||
914 | mutex_unlock(&client->lock); | ||
915 | if (!valid) | ||
916 | return -EINVAL; | ||
917 | ion_free(client, data.handle); | ||
918 | break; | ||
919 | } | ||
920 | case ION_IOC_MAP: | ||
921 | case ION_IOC_SHARE: | ||
922 | { | ||
923 | struct ion_fd_data data; | ||
924 | |||
925 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
926 | return -EFAULT; | ||
927 | mutex_lock(&client->lock); | ||
928 | if (!ion_handle_validate(client, data.handle)) { | ||
929 | WARN(1, "invalid handle passed to share ioctl.\n"); | ||
930 | mutex_unlock(&client->lock); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | data.fd = ion_ioctl_share(filp, client, data.handle); | ||
934 | mutex_unlock(&client->lock); | ||
935 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
936 | return -EFAULT; | ||
937 | break; | ||
938 | } | ||
939 | case ION_IOC_IMPORT: | ||
940 | { | ||
941 | struct ion_fd_data data; | ||
942 | if (copy_from_user(&data, (void __user *)arg, | ||
943 | sizeof(struct ion_fd_data))) | ||
944 | return -EFAULT; | ||
945 | |||
946 | data.handle = ion_import_fd(client, data.fd); | ||
947 | if (IS_ERR(data.handle)) | ||
948 | data.handle = NULL; | ||
949 | if (copy_to_user((void __user *)arg, &data, | ||
950 | sizeof(struct ion_fd_data))) | ||
951 | return -EFAULT; | ||
952 | break; | ||
953 | } | ||
954 | case ION_IOC_CUSTOM: | ||
955 | { | ||
956 | struct ion_device *dev = client->dev; | ||
957 | struct ion_custom_data data; | ||
958 | |||
959 | if (!dev->custom_ioctl) | ||
960 | return -ENOTTY; | ||
961 | if (copy_from_user(&data, (void __user *)arg, | ||
962 | sizeof(struct ion_custom_data))) | ||
963 | return -EFAULT; | ||
964 | return dev->custom_ioctl(client, data.cmd, data.arg); | ||
965 | } | ||
966 | default: | ||
967 | return -ENOTTY; | ||
968 | } | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | static int ion_release(struct inode *inode, struct file *file) | ||
973 | { | ||
974 | struct ion_client *client = file->private_data; | ||
975 | |||
976 | pr_debug("\n"); | ||
977 | ion_client_put(client); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int ion_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | struct miscdevice *miscdev = file->private_data; | ||
984 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | ||
985 | struct ion_client *client; | ||
986 | |||
987 | pr_debug("\n"); | ||
988 | client = ion_client_create(dev, -1, "user"); | ||
989 | if (IS_ERR_OR_NULL(client)) | ||
990 | return PTR_ERR(client); | ||
991 | file->private_data = client; | ||
992 | |||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | static const struct file_operations ion_fops = { | ||
997 | .owner = THIS_MODULE, | ||
998 | .open = ion_open, | ||
999 | .release = ion_release, | ||
1000 | .unlocked_ioctl = ion_ioctl, | ||
1001 | }; | ||
1002 | |||
1003 | static size_t ion_debug_heap_total(struct ion_client *client, | ||
1004 | enum ion_heap_type type) | ||
1005 | { | ||
1006 | size_t size = 0; | ||
1007 | struct rb_node *n; | ||
1008 | |||
1009 | mutex_lock(&client->lock); | ||
1010 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
1011 | struct ion_handle *handle = rb_entry(n, | ||
1012 | struct ion_handle, | ||
1013 | node); | ||
1014 | if (handle->buffer->heap->type == type) | ||
1015 | size += handle->buffer->size; | ||
1016 | } | ||
1017 | mutex_unlock(&client->lock); | ||
1018 | return size; | ||
1019 | } | ||
1020 | |||
1021 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | ||
1022 | { | ||
1023 | struct ion_heap *heap = s->private; | ||
1024 | struct ion_device *dev = heap->dev; | ||
1025 | struct rb_node *n; | ||
1026 | |||
1027 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | ||
1028 | for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { | ||
1029 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1030 | node); | ||
1031 | char task_comm[TASK_COMM_LEN]; | ||
1032 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1033 | if (!size) | ||
1034 | continue; | ||
1035 | |||
1036 | get_task_comm(task_comm, client->task); | ||
1037 | seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, | ||
1038 | size); | ||
1039 | } | ||
1040 | |||
1041 | for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { | ||
1042 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1043 | node); | ||
1044 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1045 | if (!size) | ||
1046 | continue; | ||
1047 | seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, | ||
1048 | size); | ||
1049 | } | ||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | ||
1054 | { | ||
1055 | return single_open(file, ion_debug_heap_show, inode->i_private); | ||
1056 | } | ||
1057 | |||
1058 | static const struct file_operations debug_heap_fops = { | ||
1059 | .open = ion_debug_heap_open, | ||
1060 | .read = seq_read, | ||
1061 | .llseek = seq_lseek, | ||
1062 | .release = single_release, | ||
1063 | }; | ||
1064 | |||
1065 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | ||
1066 | { | ||
1067 | struct rb_node **p = &dev->heaps.rb_node; | ||
1068 | struct rb_node *parent = NULL; | ||
1069 | struct ion_heap *entry; | ||
1070 | |||
1071 | heap->dev = dev; | ||
1072 | mutex_lock(&dev->lock); | ||
1073 | while (*p) { | ||
1074 | parent = *p; | ||
1075 | entry = rb_entry(parent, struct ion_heap, node); | ||
1076 | |||
1077 | if (heap->id < entry->id) { | ||
1078 | p = &(*p)->rb_left; | ||
1079 | } else if (heap->id > entry->id ) { | ||
1080 | p = &(*p)->rb_right; | ||
1081 | } else { | ||
1082 | pr_err("can not insert multiple heaps with " | ||
1083 | "id %d\n", heap->id); | ||
1084 | goto end; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | rb_link_node(&heap->node, parent, p); | ||
1089 | rb_insert_color(&heap->node, &dev->heaps); | ||
1090 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | ||
1091 | &debug_heap_fops); | ||
1092 | end: | ||
1093 | mutex_unlock(&dev->lock); | ||
1094 | } | ||
1095 | |||
1096 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
1097 | (struct ion_client *client, | ||
1098 | unsigned int cmd, | ||
1099 | unsigned long arg)) | ||
1100 | { | ||
1101 | struct ion_device *idev; | ||
1102 | int ret; | ||
1103 | |||
1104 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | ||
1105 | if (!idev) | ||
1106 | return ERR_PTR(-ENOMEM); | ||
1107 | |||
1108 | idev->dev.minor = MISC_DYNAMIC_MINOR; | ||
1109 | idev->dev.name = "ion"; | ||
1110 | idev->dev.fops = &ion_fops; | ||
1111 | idev->dev.parent = NULL; | ||
1112 | ret = misc_register(&idev->dev); | ||
1113 | if (ret) { | ||
1114 | pr_err("ion: failed to register misc device.\n"); | ||
1115 | return ERR_PTR(ret); | ||
1116 | } | ||
1117 | |||
1118 | idev->debug_root = debugfs_create_dir("ion", NULL); | ||
1119 | if (IS_ERR_OR_NULL(idev->debug_root)) | ||
1120 | pr_err("ion: failed to create debug files.\n"); | ||
1121 | |||
1122 | idev->custom_ioctl = custom_ioctl; | ||
1123 | idev->buffers = RB_ROOT; | ||
1124 | mutex_init(&idev->lock); | ||
1125 | idev->heaps = RB_ROOT; | ||
1126 | idev->user_clients = RB_ROOT; | ||
1127 | idev->kernel_clients = RB_ROOT; | ||
1128 | return idev; | ||
1129 | } | ||
1130 | |||
1131 | void ion_device_destroy(struct ion_device *dev) | ||
1132 | { | ||
1133 | misc_deregister(&dev->dev); | ||
1134 | /* XXX need to free the heaps and clients ? */ | ||
1135 | kfree(dev); | ||
1136 | } | ||
1137 | |||
1138 | struct ion_client *ion_client_get_file(int fd) | ||
1139 | { | ||
1140 | struct ion_client *client = ERR_PTR(-EFAULT); | ||
1141 | struct file *f = fget(fd); | ||
1142 | if (!f) | ||
1143 | return ERR_PTR(-EINVAL); | ||
1144 | |||
1145 | if (f->f_op == &ion_fops) { | ||
1146 | client = f->private_data; | ||
1147 | ion_client_get(client); | ||
1148 | } | ||
1149 | |||
1150 | fput(f); | ||
1151 | return client; | ||
1152 | } | ||
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 00000000000..606adae13f4 --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_carveout_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/spinlock.h> | ||
17 | |||
18 | #include <linux/err.h> | ||
19 | #include <linux/genalloc.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/ion.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include "ion_priv.h" | ||
27 | |||
28 | #include <asm/mach/map.h> | ||
29 | |||
30 | struct ion_carveout_heap { | ||
31 | struct ion_heap heap; | ||
32 | struct gen_pool *pool; | ||
33 | ion_phys_addr_t base; | ||
34 | }; | ||
35 | |||
36 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, | ||
37 | unsigned long size, | ||
38 | unsigned long align) | ||
39 | { | ||
40 | struct ion_carveout_heap *carveout_heap = | ||
41 | container_of(heap, struct ion_carveout_heap, heap); | ||
42 | unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); | ||
43 | |||
44 | if (!offset) | ||
45 | return ION_CARVEOUT_ALLOCATE_FAIL; | ||
46 | |||
47 | return offset; | ||
48 | } | ||
49 | |||
50 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
51 | unsigned long size) | ||
52 | { | ||
53 | struct ion_carveout_heap *carveout_heap = | ||
54 | container_of(heap, struct ion_carveout_heap, heap); | ||
55 | |||
56 | if (addr == ION_CARVEOUT_ALLOCATE_FAIL) | ||
57 | return; | ||
58 | gen_pool_free(carveout_heap->pool, addr, size); | ||
59 | } | ||
60 | |||
61 | static int ion_carveout_heap_phys(struct ion_heap *heap, | ||
62 | struct ion_buffer *buffer, | ||
63 | ion_phys_addr_t *addr, size_t *len) | ||
64 | { | ||
65 | *addr = buffer->priv_phys; | ||
66 | *len = buffer->size; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int ion_carveout_heap_allocate(struct ion_heap *heap, | ||
71 | struct ion_buffer *buffer, | ||
72 | unsigned long size, unsigned long align, | ||
73 | unsigned long flags) | ||
74 | { | ||
75 | buffer->priv_phys = ion_carveout_allocate(heap, size, align); | ||
76 | return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; | ||
77 | } | ||
78 | |||
79 | static void ion_carveout_heap_free(struct ion_buffer *buffer) | ||
80 | { | ||
81 | struct ion_heap *heap = buffer->heap; | ||
82 | |||
83 | ion_carveout_free(heap, buffer->priv_phys, buffer->size); | ||
84 | buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; | ||
85 | } | ||
86 | |||
87 | struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, | ||
88 | struct ion_buffer *buffer) | ||
89 | { | ||
90 | return ERR_PTR(-EINVAL); | ||
91 | } | ||
92 | |||
93 | void ion_carveout_heap_unmap_dma(struct ion_heap *heap, | ||
94 | struct ion_buffer *buffer) | ||
95 | { | ||
96 | return; | ||
97 | } | ||
98 | |||
99 | void *ion_carveout_heap_map_kernel(struct ion_heap *heap, | ||
100 | struct ion_buffer *buffer) | ||
101 | { | ||
102 | return __arch_ioremap(buffer->priv_phys, buffer->size, | ||
103 | MT_MEMORY_NONCACHED); | ||
104 | } | ||
105 | |||
106 | void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, | ||
107 | struct ion_buffer *buffer) | ||
108 | { | ||
109 | __arch_iounmap(buffer->vaddr); | ||
110 | buffer->vaddr = NULL; | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
115 | struct vm_area_struct *vma) | ||
116 | { | ||
117 | return remap_pfn_range(vma, vma->vm_start, | ||
118 | __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, | ||
119 | buffer->size, | ||
120 | pgprot_noncached(vma->vm_page_prot)); | ||
121 | } | ||
122 | |||
123 | static struct ion_heap_ops carveout_heap_ops = { | ||
124 | .allocate = ion_carveout_heap_allocate, | ||
125 | .free = ion_carveout_heap_free, | ||
126 | .phys = ion_carveout_heap_phys, | ||
127 | .map_user = ion_carveout_heap_map_user, | ||
128 | .map_kernel = ion_carveout_heap_map_kernel, | ||
129 | .unmap_kernel = ion_carveout_heap_unmap_kernel, | ||
130 | }; | ||
131 | |||
132 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) | ||
133 | { | ||
134 | struct ion_carveout_heap *carveout_heap; | ||
135 | |||
136 | carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); | ||
137 | if (!carveout_heap) | ||
138 | return ERR_PTR(-ENOMEM); | ||
139 | |||
140 | carveout_heap->pool = gen_pool_create(12, -1); | ||
141 | if (!carveout_heap->pool) { | ||
142 | kfree(carveout_heap); | ||
143 | return ERR_PTR(-ENOMEM); | ||
144 | } | ||
145 | carveout_heap->base = heap_data->base; | ||
146 | gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, | ||
147 | -1); | ||
148 | carveout_heap->heap.ops = &carveout_heap_ops; | ||
149 | carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; | ||
150 | |||
151 | return &carveout_heap->heap; | ||
152 | } | ||
153 | |||
154 | void ion_carveout_heap_destroy(struct ion_heap *heap) | ||
155 | { | ||
156 | struct ion_carveout_heap *carveout_heap = | ||
157 | container_of(heap, struct ion_carveout_heap, heap); | ||
158 | |||
159 | gen_pool_destroy(carveout_heap->pool); | ||
160 | kfree(carveout_heap); | ||
161 | carveout_heap = NULL; | ||
162 | } | ||
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 00000000000..6d09778745c --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include "ion_priv.h" | ||
20 | |||
21 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) | ||
22 | { | ||
23 | struct ion_heap *heap = NULL; | ||
24 | |||
25 | switch (heap_data->type) { | ||
26 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
27 | heap = ion_system_contig_heap_create(heap_data); | ||
28 | break; | ||
29 | case ION_HEAP_TYPE_SYSTEM: | ||
30 | heap = ion_system_heap_create(heap_data); | ||
31 | break; | ||
32 | case ION_HEAP_TYPE_CARVEOUT: | ||
33 | heap = ion_carveout_heap_create(heap_data); | ||
34 | break; | ||
35 | case ION_HEAP_TYPE_IOMMU: | ||
36 | heap = ion_iommu_heap_create(heap_data); | ||
37 | break; | ||
38 | default: | ||
39 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
40 | heap_data->type); | ||
41 | return ERR_PTR(-EINVAL); | ||
42 | } | ||
43 | |||
44 | if (IS_ERR_OR_NULL(heap)) { | ||
45 | pr_err("%s: error creating heap %s type %d base %lu size %u\n", | ||
46 | __func__, heap_data->name, heap_data->type, | ||
47 | heap_data->base, heap_data->size); | ||
48 | return ERR_PTR(-EINVAL); | ||
49 | } | ||
50 | |||
51 | heap->name = heap_data->name; | ||
52 | heap->id = heap_data->id; | ||
53 | return heap; | ||
54 | } | ||
55 | |||
56 | void ion_heap_destroy(struct ion_heap *heap) | ||
57 | { | ||
58 | if (!heap) | ||
59 | return; | ||
60 | |||
61 | switch (heap->type) { | ||
62 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | ||
63 | ion_system_contig_heap_destroy(heap); | ||
64 | break; | ||
65 | case ION_HEAP_TYPE_SYSTEM: | ||
66 | ion_system_heap_destroy(heap); | ||
67 | break; | ||
68 | case ION_HEAP_TYPE_CARVEOUT: | ||
69 | ion_carveout_heap_destroy(heap); | ||
70 | break; | ||
71 | case ION_HEAP_TYPE_IOMMU: | ||
72 | ion_iommu_heap_destroy(heap); | ||
73 | break; | ||
74 | default: | ||
75 | pr_err("%s: Invalid heap type %d\n", __func__, | ||
76 | heap->type); | ||
77 | } | ||
78 | } | ||
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c new file mode 100644 index 00000000000..a3d2d726bda --- /dev/null +++ b/drivers/gpu/ion/ion_iommu_heap.c | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_iommu_heap.c | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | ||
19 | |||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | ||
21 | |||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/genalloc.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/ion.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/scatterlist.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/iommu.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | |||
35 | #include <asm/cacheflush.h> | ||
36 | |||
37 | #include "ion_priv.h" | ||
38 | |||
39 | #define NUM_PAGES(buf) (PAGE_ALIGN((buf)->size) >> PAGE_SHIFT) | ||
40 | |||
41 | #define GFP_ION (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) | ||
42 | |||
43 | struct ion_iommu_heap { | ||
44 | struct ion_heap heap; | ||
45 | struct gen_pool *pool; | ||
46 | struct iommu_domain *domain; | ||
47 | struct device *dev; | ||
48 | }; | ||
49 | |||
50 | static struct scatterlist *iommu_heap_map_dma(struct ion_heap *heap, | ||
51 | struct ion_buffer *buf) | ||
52 | { | ||
53 | struct ion_iommu_heap *h = | ||
54 | container_of(heap, struct ion_iommu_heap, heap); | ||
55 | int err, npages = NUM_PAGES(buf); | ||
56 | unsigned int i; | ||
57 | struct scatterlist *sg; | ||
58 | unsigned long da = (unsigned long)buf->priv_virt; | ||
59 | |||
60 | for_each_sg(buf->sglist, sg, npages, i) { | ||
61 | phys_addr_t pa; | ||
62 | |||
63 | pa = sg_phys(sg); | ||
64 | BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); | ||
65 | err = iommu_map(h->domain, da, pa, PAGE_SIZE, 0); | ||
66 | if (err) | ||
67 | goto err_out; | ||
68 | |||
69 | sg->dma_address = da; | ||
70 | da += PAGE_SIZE; | ||
71 | } | ||
72 | |||
73 | pr_debug("da:%p pa:%08x va:%p\n", | ||
74 | buf->priv_virt, sg_phys(buf->sglist), buf->vaddr); | ||
75 | |||
76 | return buf->sglist; | ||
77 | |||
78 | err_out: | ||
79 | if (i-- > 0) { | ||
80 | unsigned int j; | ||
81 | for_each_sg(buf->sglist, sg, i, j) | ||
82 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
83 | } | ||
84 | return ERR_PTR(err); | ||
85 | } | ||
86 | |||
87 | static void iommu_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buf) | ||
88 | { | ||
89 | struct ion_iommu_heap *h = | ||
90 | container_of(heap, struct ion_iommu_heap, heap); | ||
91 | unsigned int i; | ||
92 | struct scatterlist *sg; | ||
93 | int npages = NUM_PAGES(buf); | ||
94 | |||
95 | for_each_sg(buf->sglist, sg, npages, i) | ||
96 | iommu_unmap(h->domain, sg_dma_address(sg), 0); | ||
97 | |||
98 | pr_debug("da:%p\n", buf->priv_virt); | ||
99 | } | ||
100 | |||
101 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
102 | struct ion_buffer *buf, | ||
103 | unsigned long addr) | ||
104 | { | ||
105 | struct ion_iommu_heap *h = | ||
106 | container_of(heap, struct ion_iommu_heap, heap); | ||
107 | int err; | ||
108 | unsigned int i; | ||
109 | unsigned long da, da_to_free = (unsigned long)buf->priv_virt; | ||
110 | int npages = NUM_PAGES(buf); | ||
111 | |||
112 | BUG_ON(!buf->priv_virt); | ||
113 | |||
114 | da = gen_pool_alloc_addr(h->pool, buf->size, addr); | ||
115 | if (da == 0) { | ||
116 | pr_err("dma address alloc failed, addr=0x%lx", addr); | ||
117 | return ERR_PTR(-ENOMEM); | ||
118 | } else { | ||
119 | pr_err("iommu_heap_remap_dma passed, addr=0x%lx", | ||
120 | addr); | ||
121 | iommu_heap_unmap_dma(heap, buf); | ||
122 | gen_pool_free(h->pool, da_to_free, buf->size); | ||
123 | buf->priv_virt = (void *)da; | ||
124 | } | ||
125 | for (i = 0; i < npages; i++) { | ||
126 | phys_addr_t pa; | ||
127 | |||
128 | pa = page_to_phys(buf->pages[i]); | ||
129 | err = iommu_map(h->domain, da, pa, 0, 0); | ||
130 | if (err) | ||
131 | goto err_out; | ||
132 | da += PAGE_SIZE; | ||
133 | } | ||
134 | |||
135 | pr_debug("da:%p pa:%08x va:%p\n", | ||
136 | buf->priv_virt, page_to_phys(buf->pages[0]), buf->vaddr); | ||
137 | |||
138 | return (struct scatterlist *)buf->pages; | ||
139 | |||
140 | err_out: | ||
141 | if (i-- > 0) { | ||
142 | da = (unsigned long)buf->priv_virt; | ||
143 | iommu_unmap(h->domain, da + (i << PAGE_SHIFT), 0); | ||
144 | } | ||
145 | return ERR_PTR(err); | ||
146 | } | ||
147 | |||
148 | static int ion_buffer_allocate(struct ion_buffer *buf) | ||
149 | { | ||
150 | int i, npages = NUM_PAGES(buf); | ||
151 | |||
152 | buf->pages = kmalloc(npages * sizeof(*buf->pages), GFP_KERNEL); | ||
153 | if (!buf->pages) | ||
154 | goto err_pages; | ||
155 | |||
156 | buf->sglist = vzalloc(npages * sizeof(*buf->sglist)); | ||
157 | if (!buf->sglist) | ||
158 | goto err_sgl; | ||
159 | |||
160 | sg_init_table(buf->sglist, npages); | ||
161 | |||
162 | for (i = 0; i < npages; i++) { | ||
163 | struct page *page; | ||
164 | phys_addr_t pa; | ||
165 | |||
166 | page = alloc_page(GFP_ION); | ||
167 | if (!page) | ||
168 | goto err_pgalloc; | ||
169 | pa = page_to_phys(page); | ||
170 | |||
171 | sg_set_page(&buf->sglist[i], page, PAGE_SIZE, 0); | ||
172 | |||
173 | flush_dcache_page(page); | ||
174 | outer_flush_range(pa, pa + PAGE_SIZE); | ||
175 | |||
176 | buf->pages[i] = page; | ||
177 | |||
178 | pr_debug_once("pa:%08x\n", pa); | ||
179 | } | ||
180 | return 0; | ||
181 | |||
182 | err_pgalloc: | ||
183 | while (i-- > 0) | ||
184 | __free_page(buf->pages[i]); | ||
185 | vfree(buf->sglist); | ||
186 | err_sgl: | ||
187 | kfree(buf->pages); | ||
188 | err_pages: | ||
189 | return -ENOMEM; | ||
190 | } | ||
191 | |||
192 | static void ion_buffer_free(struct ion_buffer *buf) | ||
193 | { | ||
194 | int i, npages = NUM_PAGES(buf); | ||
195 | |||
196 | for (i = 0; i < npages; i++) | ||
197 | __free_page(buf->pages[i]); | ||
198 | vfree(buf->sglist); | ||
199 | kfree(buf->pages); | ||
200 | } | ||
201 | |||
202 | static int iommu_heap_allocate(struct ion_heap *heap, struct ion_buffer *buf, | ||
203 | unsigned long len, unsigned long align, | ||
204 | unsigned long flags) | ||
205 | { | ||
206 | int err; | ||
207 | struct ion_iommu_heap *h = | ||
208 | container_of(heap, struct ion_iommu_heap, heap); | ||
209 | unsigned long da; | ||
210 | struct scatterlist *sgl; | ||
211 | |||
212 | len = round_up(len, PAGE_SIZE); | ||
213 | |||
214 | da = gen_pool_alloc(h->pool, len); | ||
215 | if (!da) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | buf->priv_virt = (void *)da; | ||
219 | buf->size = len; | ||
220 | |||
221 | WARN_ON(!IS_ALIGNED(da, PAGE_SIZE)); | ||
222 | |||
223 | err = ion_buffer_allocate(buf); | ||
224 | if (err) | ||
225 | goto err_alloc_buf; | ||
226 | |||
227 | sgl = iommu_heap_map_dma(heap, buf); | ||
228 | if (IS_ERR_OR_NULL(sgl)) | ||
229 | goto err_heap_map_dma; | ||
230 | buf->vaddr = 0; | ||
231 | return 0; | ||
232 | |||
233 | err_heap_map_dma: | ||
234 | ion_buffer_free(buf); | ||
235 | err_alloc_buf: | ||
236 | gen_pool_free(h->pool, da, len); | ||
237 | buf->size = 0; | ||
238 | buf->pages = NULL; | ||
239 | buf->priv_virt = NULL; | ||
240 | return err; | ||
241 | } | ||
242 | |||
243 | static void iommu_heap_free(struct ion_buffer *buf) | ||
244 | { | ||
245 | struct ion_heap *heap = buf->heap; | ||
246 | struct ion_iommu_heap *h = | ||
247 | container_of(heap, struct ion_iommu_heap, heap); | ||
248 | void *da = buf->priv_virt; | ||
249 | |||
250 | iommu_heap_unmap_dma(heap, buf); | ||
251 | ion_buffer_free(buf); | ||
252 | gen_pool_free(h->pool, (unsigned long)da, buf->size); | ||
253 | |||
254 | buf->pages = NULL; | ||
255 | buf->priv_virt = NULL; | ||
256 | pr_debug("da:%p\n", da); | ||
257 | } | ||
258 | |||
259 | static int iommu_heap_phys(struct ion_heap *heap, struct ion_buffer *buf, | ||
260 | ion_phys_addr_t *addr, size_t *len) | ||
261 | { | ||
262 | *addr = (unsigned long)buf->priv_virt; | ||
263 | *len = buf->size; | ||
264 | pr_debug("da:%08lx(%x)\n", *addr, *len); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static void *iommu_heap_map_kernel(struct ion_heap *heap, | ||
269 | struct ion_buffer *buf) | ||
270 | { | ||
271 | int npages = NUM_PAGES(buf); | ||
272 | |||
273 | BUG_ON(!buf->pages); | ||
274 | buf->vaddr = vm_map_ram(buf->pages, npages, -1, | ||
275 | pgprot_noncached(pgprot_kernel)); | ||
276 | pr_debug("va:%p\n", buf->vaddr); | ||
277 | WARN_ON(!buf->vaddr); | ||
278 | return buf->vaddr; | ||
279 | } | ||
280 | |||
281 | static void iommu_heap_unmap_kernel(struct ion_heap *heap, | ||
282 | struct ion_buffer *buf) | ||
283 | { | ||
284 | int npages = NUM_PAGES(buf); | ||
285 | |||
286 | BUG_ON(!buf->pages); | ||
287 | WARN_ON(!buf->vaddr); | ||
288 | vm_unmap_ram(buf->vaddr, npages); | ||
289 | buf->vaddr = NULL; | ||
290 | pr_debug("va:%p\n", buf->vaddr); | ||
291 | } | ||
292 | |||
293 | static int iommu_heap_map_user(struct ion_heap *mapper, | ||
294 | struct ion_buffer *buf, | ||
295 | struct vm_area_struct *vma) | ||
296 | { | ||
297 | int i = vma->vm_pgoff >> PAGE_SHIFT; | ||
298 | unsigned long uaddr = vma->vm_start; | ||
299 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
300 | |||
301 | pr_debug("vma:%08lx-%08lx\n", vma->vm_start, vma->vm_end); | ||
302 | BUG_ON(!buf->pages); | ||
303 | |||
304 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
305 | do { | ||
306 | int ret; | ||
307 | struct page *page = buf->pages[i++]; | ||
308 | |||
309 | ret = vm_insert_page(vma, uaddr, page); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | |||
313 | uaddr += PAGE_SIZE; | ||
314 | usize -= PAGE_SIZE; | ||
315 | } while (usize > 0); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | static struct ion_heap_ops iommu_heap_ops = { | ||
321 | .allocate = iommu_heap_allocate, | ||
322 | .free = iommu_heap_free, | ||
323 | .phys = iommu_heap_phys, | ||
324 | .map_dma = iommu_heap_map_dma, | ||
325 | .unmap_dma = iommu_heap_unmap_dma, | ||
326 | .map_kernel = iommu_heap_map_kernel, | ||
327 | .unmap_kernel = iommu_heap_unmap_kernel, | ||
328 | .map_user = iommu_heap_map_user, | ||
329 | }; | ||
330 | |||
331 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *data) | ||
332 | { | ||
333 | struct ion_iommu_heap *h; | ||
334 | int err; | ||
335 | |||
336 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
337 | if (!h) { | ||
338 | err = -ENOMEM; | ||
339 | goto err_heap; | ||
340 | } | ||
341 | |||
342 | h->pool = gen_pool_create(12, -1); | ||
343 | if (!h->pool) { | ||
344 | err = -ENOMEM; | ||
345 | goto err_genpool; | ||
346 | } | ||
347 | gen_pool_add(h->pool, data->base, data->size, -1); | ||
348 | |||
349 | h->heap.ops = &iommu_heap_ops; | ||
350 | h->domain = iommu_domain_alloc(&platform_bus_type); | ||
351 | h->dev = data->priv; | ||
352 | if (!h->domain) { | ||
353 | err = -ENOMEM; | ||
354 | goto err_iommu_alloc; | ||
355 | } | ||
356 | |||
357 | err = iommu_attach_device(h->domain, h->dev); | ||
358 | if (err) | ||
359 | goto err_iommu_attach; | ||
360 | |||
361 | return &h->heap; | ||
362 | |||
363 | err_iommu_attach: | ||
364 | iommu_domain_free(h->domain); | ||
365 | err_iommu_alloc: | ||
366 | gen_pool_destroy(h->pool); | ||
367 | err_genpool: | ||
368 | kfree(h); | ||
369 | err_heap: | ||
370 | return ERR_PTR(err); | ||
371 | } | ||
372 | |||
373 | void ion_iommu_heap_destroy(struct ion_heap *heap) | ||
374 | { | ||
375 | struct ion_iommu_heap *h = | ||
376 | container_of(heap, struct ion_iommu_heap, heap); | ||
377 | |||
378 | iommu_detach_device(h->domain, h->dev); | ||
379 | gen_pool_destroy(h->pool); | ||
380 | iommu_domain_free(h->domain); | ||
381 | kfree(h); | ||
382 | } | ||
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 00000000000..bfe26da9c04 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_priv.h | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef _ION_PRIV_H | ||
18 | #define _ION_PRIV_H | ||
19 | |||
20 | #include <linux/kref.h> | ||
21 | #include <linux/mm_types.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/ion.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | |||
27 | struct ion_mapping; | ||
28 | |||
29 | struct ion_dma_mapping { | ||
30 | struct kref ref; | ||
31 | struct scatterlist *sglist; | ||
32 | }; | ||
33 | |||
34 | struct ion_kernel_mapping { | ||
35 | struct kref ref; | ||
36 | void *vaddr; | ||
37 | }; | ||
38 | |||
39 | /** | ||
40 | * struct ion_device - the metadata of the ion device node | ||
41 | * @dev: the actual misc device | ||
42 | * @buffers: an rb tree of all the existing buffers | ||
43 | * @lock: lock protecting the buffers & heaps trees | ||
44 | * @heaps: list of all the heaps in the system | ||
45 | * @user_clients: list of all the clients created from userspace | ||
46 | */ | ||
47 | struct ion_device { | ||
48 | struct miscdevice dev; | ||
49 | struct rb_root buffers; | ||
50 | struct mutex lock; | ||
51 | struct rb_root heaps; | ||
52 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, | ||
53 | unsigned long arg); | ||
54 | struct rb_root user_clients; | ||
55 | struct rb_root kernel_clients; | ||
56 | struct dentry *debug_root; | ||
57 | }; | ||
58 | |||
59 | /** | ||
60 | * struct ion_client - a process/hw block local address space | ||
61 | * @ref: for reference counting the client | ||
62 | * @node: node in the tree of all clients | ||
63 | * @dev: backpointer to ion device | ||
64 | * @handles: an rb tree of all the handles in this client | ||
65 | * @lock: lock protecting the tree of handles | ||
66 | * @heap_mask: mask of all supported heaps | ||
67 | * @name: used for debugging | ||
68 | * @task: used for debugging | ||
69 | * | ||
70 | * A client represents a list of buffers this client may access. | ||
71 | * The mutex stored here is used to protect both handles tree | ||
72 | * as well as the handles themselves, and should be held while modifying either. | ||
73 | */ | ||
74 | struct ion_client { | ||
75 | struct kref ref; | ||
76 | struct rb_node node; | ||
77 | struct ion_device *dev; | ||
78 | struct rb_root handles; | ||
79 | struct mutex lock; | ||
80 | unsigned int heap_mask; | ||
81 | const char *name; | ||
82 | struct task_struct *task; | ||
83 | pid_t pid; | ||
84 | struct dentry *debug_root; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * ion_handle - a client local reference to a buffer | ||
89 | * @ref: reference count | ||
90 | * @client: back pointer to the client the buffer resides in | ||
91 | * @buffer: pointer to the buffer | ||
92 | * @node: node in the client's handle rbtree | ||
93 | * @kmap_cnt: count of times this client has mapped to kernel | ||
94 | * @dmap_cnt: count of times this client has mapped for dma | ||
95 | * @usermap_cnt: count of times this client has mapped for userspace | ||
96 | * | ||
97 | * Modifications to node, map_cnt or mapping should be protected by the | ||
98 | * lock in the client. Other fields are never changed after initialization. | ||
99 | */ | ||
100 | struct ion_handle { | ||
101 | struct kref ref; | ||
102 | struct ion_client *client; | ||
103 | struct ion_buffer *buffer; | ||
104 | struct rb_node node; | ||
105 | unsigned int kmap_cnt; | ||
106 | unsigned int dmap_cnt; | ||
107 | unsigned int usermap_cnt; | ||
108 | }; | ||
109 | |||
110 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle); | ||
111 | |||
112 | void ion_buffer_get(struct ion_buffer *buffer); | ||
113 | |||
114 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); | ||
115 | |||
116 | struct ion_client *ion_client_get_file(int fd); | ||
117 | |||
118 | void ion_client_get(struct ion_client *client); | ||
119 | |||
120 | int ion_client_put(struct ion_client *client); | ||
121 | |||
122 | void ion_handle_get(struct ion_handle *handle); | ||
123 | |||
124 | int ion_handle_put(struct ion_handle *handle); | ||
125 | |||
126 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
127 | struct ion_buffer *buffer); | ||
128 | |||
129 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle); | ||
130 | |||
131 | int ion_remap_dma(struct ion_client *client, | ||
132 | struct ion_handle *handle, | ||
133 | unsigned long addr); | ||
134 | /** | ||
135 | * struct ion_buffer - metadata for a particular buffer | ||
136 | * @ref: refernce count | ||
137 | * @node: node in the ion_device buffers tree | ||
138 | * @dev: back pointer to the ion_device | ||
139 | * @heap: back pointer to the heap the buffer came from | ||
140 | * @flags: buffer specific flags | ||
141 | * @size: size of the buffer | ||
142 | * @priv_virt: private data to the buffer representable as | ||
143 | * a void * | ||
144 | * @priv_phys: private data to the buffer representable as | ||
145 | * an ion_phys_addr_t (and someday a phys_addr_t) | ||
146 | * @lock: protects the buffers cnt fields | ||
147 | * @kmap_cnt: number of times the buffer is mapped to the kernel | ||
148 | * @vaddr: the kenrel mapping if kmap_cnt is not zero | ||
149 | * @dmap_cnt: number of times the buffer is mapped for dma | ||
150 | * @sglist: the scatterlist for the buffer is dmap_cnt is not zero | ||
151 | * @pages: list for allocated pages for the buffer | ||
152 | */ | ||
153 | struct ion_buffer { | ||
154 | struct kref ref; | ||
155 | struct rb_node node; | ||
156 | struct ion_device *dev; | ||
157 | struct ion_heap *heap; | ||
158 | unsigned long flags; | ||
159 | size_t size; | ||
160 | union { | ||
161 | void *priv_virt; | ||
162 | ion_phys_addr_t priv_phys; | ||
163 | }; | ||
164 | struct mutex lock; | ||
165 | int kmap_cnt; | ||
166 | void *vaddr; | ||
167 | int dmap_cnt; | ||
168 | struct scatterlist *sglist; | ||
169 | struct page **pages; | ||
170 | }; | ||
171 | |||
172 | /** | ||
173 | * struct ion_heap_ops - ops to operate on a given heap | ||
174 | * @allocate: allocate memory | ||
175 | * @free: free memory | ||
176 | * @phys get physical address of a buffer (only define on | ||
177 | * physically contiguous heaps) | ||
178 | * @map_dma map the memory for dma to a scatterlist | ||
179 | * @unmap_dma unmap the memory for dma | ||
180 | * @map_kernel map memory to the kernel | ||
181 | * @unmap_kernel unmap memory to the kernel | ||
182 | * @map_user map memory to userspace | ||
183 | */ | ||
184 | struct ion_heap_ops { | ||
185 | int (*allocate) (struct ion_heap *heap, | ||
186 | struct ion_buffer *buffer, unsigned long len, | ||
187 | unsigned long align, unsigned long flags); | ||
188 | void (*free) (struct ion_buffer *buffer); | ||
189 | int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, | ||
190 | ion_phys_addr_t *addr, size_t *len); | ||
191 | struct scatterlist *(*map_dma) (struct ion_heap *heap, | ||
192 | struct ion_buffer *buffer); | ||
193 | void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
194 | void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
195 | void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); | ||
196 | int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, | ||
197 | struct vm_area_struct *vma); | ||
198 | }; | ||
199 | |||
200 | /** | ||
201 | * struct ion_heap - represents a heap in the system | ||
202 | * @node: rb node to put the heap on the device's tree of heaps | ||
203 | * @dev: back pointer to the ion_device | ||
204 | * @type: type of heap | ||
205 | * @ops: ops struct as above | ||
206 | * @id: id of heap, also indicates priority of this heap when | ||
207 | * allocating. These are specified by platform data and | ||
208 | * MUST be unique | ||
209 | * @name: used for debugging | ||
210 | * | ||
211 | * Represents a pool of memory from which buffers can be made. In some | ||
212 | * systems the only heap is regular system memory allocated via vmalloc. | ||
213 | * On others, some blocks might require large physically contiguous buffers | ||
214 | * that are allocated from a specially reserved heap. | ||
215 | */ | ||
216 | struct ion_heap { | ||
217 | struct rb_node node; | ||
218 | struct ion_device *dev; | ||
219 | enum ion_heap_type type; | ||
220 | struct ion_heap_ops *ops; | ||
221 | int id; | ||
222 | const char *name; | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * ion_device_create - allocates and returns an ion device | ||
227 | * @custom_ioctl: arch specific ioctl function if applicable | ||
228 | * | ||
229 | * returns a valid device or -PTR_ERR | ||
230 | */ | ||
231 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
232 | (struct ion_client *client, | ||
233 | unsigned int cmd, | ||
234 | unsigned long arg)); | ||
235 | |||
236 | /** | ||
237 | * ion_device_destroy - free and device and it's resource | ||
238 | * @dev: the device | ||
239 | */ | ||
240 | void ion_device_destroy(struct ion_device *dev); | ||
241 | |||
242 | /** | ||
243 | * ion_device_add_heap - adds a heap to the ion device | ||
244 | * @dev: the device | ||
245 | * @heap: the heap to add | ||
246 | */ | ||
247 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); | ||
248 | |||
249 | /** | ||
250 | * functions for creating and destroying the built in ion heaps. | ||
251 | * architectures can add their own custom architecture specific | ||
252 | * heaps as appropriate. | ||
253 | */ | ||
254 | |||
255 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); | ||
256 | void ion_heap_destroy(struct ion_heap *); | ||
257 | |||
258 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); | ||
259 | void ion_system_heap_destroy(struct ion_heap *); | ||
260 | |||
261 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); | ||
262 | void ion_system_contig_heap_destroy(struct ion_heap *); | ||
263 | |||
264 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); | ||
265 | void ion_carveout_heap_destroy(struct ion_heap *); | ||
266 | /** | ||
267 | * kernel api to allocate/free from carveout -- used when carveout is | ||
268 | * used to back an architecture specific custom heap | ||
269 | */ | ||
270 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, | ||
271 | unsigned long align); | ||
272 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, | ||
273 | unsigned long size); | ||
274 | |||
275 | #ifdef CONFIG_ION_IOMMU | ||
276 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); | ||
277 | void ion_iommu_heap_destroy(struct ion_heap *); | ||
278 | #else | ||
279 | static inline struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *) | ||
280 | { | ||
281 | return NULL; | ||
282 | } | ||
283 | static inline void ion_iommu_heap_destroy(struct ion_heap *) | ||
284 | { | ||
285 | } | ||
286 | #endif | ||
287 | /** | ||
288 | * The carveout heap returns physical addresses, since 0 may be a valid | ||
289 | * physical address, this is used to indicate allocation failed | ||
290 | */ | ||
291 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 | ||
292 | |||
293 | #endif /* _ION_PRIV_H */ | ||
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 00000000000..c046cf1a321 --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_system_heap.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "ion_priv.h" | ||
24 | |||
25 | static int ion_system_heap_allocate(struct ion_heap *heap, | ||
26 | struct ion_buffer *buffer, | ||
27 | unsigned long size, unsigned long align, | ||
28 | unsigned long flags) | ||
29 | { | ||
30 | buffer->priv_virt = vmalloc_user(size); | ||
31 | if (!buffer->priv_virt) | ||
32 | return -ENOMEM; | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void ion_system_heap_free(struct ion_buffer *buffer) | ||
37 | { | ||
38 | vfree(buffer->priv_virt); | ||
39 | } | ||
40 | |||
41 | struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, | ||
42 | struct ion_buffer *buffer) | ||
43 | { | ||
44 | struct scatterlist *sglist; | ||
45 | struct page *page; | ||
46 | int i; | ||
47 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | ||
48 | void *vaddr = buffer->priv_virt; | ||
49 | |||
50 | sglist = vmalloc(npages * sizeof(struct scatterlist)); | ||
51 | if (!sglist) | ||
52 | return ERR_PTR(-ENOMEM); | ||
53 | memset(sglist, 0, npages * sizeof(struct scatterlist)); | ||
54 | sg_init_table(sglist, npages); | ||
55 | for (i = 0; i < npages; i++) { | ||
56 | page = vmalloc_to_page(vaddr); | ||
57 | if (!page) | ||
58 | goto end; | ||
59 | sg_set_page(&sglist[i], page, PAGE_SIZE, 0); | ||
60 | vaddr += PAGE_SIZE; | ||
61 | } | ||
62 | /* XXX do cache maintenance for dma? */ | ||
63 | return sglist; | ||
64 | end: | ||
65 | vfree(sglist); | ||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | void ion_system_heap_unmap_dma(struct ion_heap *heap, | ||
70 | struct ion_buffer *buffer) | ||
71 | { | ||
72 | /* XXX undo cache maintenance for dma? */ | ||
73 | if (buffer->sglist) | ||
74 | vfree(buffer->sglist); | ||
75 | } | ||
76 | |||
77 | void *ion_system_heap_map_kernel(struct ion_heap *heap, | ||
78 | struct ion_buffer *buffer) | ||
79 | { | ||
80 | return buffer->priv_virt; | ||
81 | } | ||
82 | |||
83 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, | ||
84 | struct ion_buffer *buffer) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | ||
89 | struct vm_area_struct *vma) | ||
90 | { | ||
91 | return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); | ||
92 | } | ||
93 | |||
94 | static struct ion_heap_ops vmalloc_ops = { | ||
95 | .allocate = ion_system_heap_allocate, | ||
96 | .free = ion_system_heap_free, | ||
97 | .map_dma = ion_system_heap_map_dma, | ||
98 | .unmap_dma = ion_system_heap_unmap_dma, | ||
99 | .map_kernel = ion_system_heap_map_kernel, | ||
100 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
101 | .map_user = ion_system_heap_map_user, | ||
102 | }; | ||
103 | |||
104 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) | ||
105 | { | ||
106 | struct ion_heap *heap; | ||
107 | |||
108 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
109 | if (!heap) | ||
110 | return ERR_PTR(-ENOMEM); | ||
111 | heap->ops = &vmalloc_ops; | ||
112 | heap->type = ION_HEAP_TYPE_SYSTEM; | ||
113 | return heap; | ||
114 | } | ||
115 | |||
116 | void ion_system_heap_destroy(struct ion_heap *heap) | ||
117 | { | ||
118 | kfree(heap); | ||
119 | } | ||
120 | |||
121 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, | ||
122 | struct ion_buffer *buffer, | ||
123 | unsigned long len, | ||
124 | unsigned long align, | ||
125 | unsigned long flags) | ||
126 | { | ||
127 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); | ||
128 | if (!buffer->priv_virt) | ||
129 | return -ENOMEM; | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | void ion_system_contig_heap_free(struct ion_buffer *buffer) | ||
134 | { | ||
135 | kfree(buffer->priv_virt); | ||
136 | } | ||
137 | |||
138 | static int ion_system_contig_heap_phys(struct ion_heap *heap, | ||
139 | struct ion_buffer *buffer, | ||
140 | ion_phys_addr_t *addr, size_t *len) | ||
141 | { | ||
142 | *addr = virt_to_phys(buffer->priv_virt); | ||
143 | *len = buffer->size; | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, | ||
148 | struct ion_buffer *buffer) | ||
149 | { | ||
150 | struct scatterlist *sglist; | ||
151 | |||
152 | sglist = vmalloc(sizeof(struct scatterlist)); | ||
153 | if (!sglist) | ||
154 | return ERR_PTR(-ENOMEM); | ||
155 | sg_init_table(sglist, 1); | ||
156 | sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); | ||
157 | return sglist; | ||
158 | } | ||
159 | |||
160 | int ion_system_contig_heap_map_user(struct ion_heap *heap, | ||
161 | struct ion_buffer *buffer, | ||
162 | struct vm_area_struct *vma) | ||
163 | { | ||
164 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); | ||
165 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
166 | vma->vm_end - vma->vm_start, | ||
167 | vma->vm_page_prot); | ||
168 | |||
169 | } | ||
170 | |||
171 | static struct ion_heap_ops kmalloc_ops = { | ||
172 | .allocate = ion_system_contig_heap_allocate, | ||
173 | .free = ion_system_contig_heap_free, | ||
174 | .phys = ion_system_contig_heap_phys, | ||
175 | .map_dma = ion_system_contig_heap_map_dma, | ||
176 | .unmap_dma = ion_system_heap_unmap_dma, | ||
177 | .map_kernel = ion_system_heap_map_kernel, | ||
178 | .unmap_kernel = ion_system_heap_unmap_kernel, | ||
179 | .map_user = ion_system_contig_heap_map_user, | ||
180 | }; | ||
181 | |||
182 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) | ||
183 | { | ||
184 | struct ion_heap *heap; | ||
185 | |||
186 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | ||
187 | if (!heap) | ||
188 | return ERR_PTR(-ENOMEM); | ||
189 | heap->ops = &kmalloc_ops; | ||
190 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; | ||
191 | return heap; | ||
192 | } | ||
193 | |||
194 | void ion_system_contig_heap_destroy(struct ion_heap *heap) | ||
195 | { | ||
196 | kfree(heap); | ||
197 | } | ||
198 | |||
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 00000000000..692458e07b5 --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion_system_mapper.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/ion.h> | ||
19 | #include <linux/memory.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "ion_priv.h" | ||
24 | /* | ||
25 | * This mapper is valid for any heap that allocates memory that already has | ||
26 | * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, | ||
27 | * pages obtained via io_remap, etc. | ||
28 | */ | ||
29 | static void *ion_kernel_mapper_map(struct ion_mapper *mapper, | ||
30 | struct ion_buffer *buffer, | ||
31 | struct ion_mapping **mapping) | ||
32 | { | ||
33 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
34 | pr_err("%s: attempting to map an unsupported heap\n", __func__); | ||
35 | return ERR_PTR(-EINVAL); | ||
36 | } | ||
37 | /* XXX REVISIT ME!!! */ | ||
38 | *((unsigned long *)mapping) = (unsigned long)buffer->priv; | ||
39 | return buffer->priv; | ||
40 | } | ||
41 | |||
42 | static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, | ||
43 | struct ion_buffer *buffer, | ||
44 | struct ion_mapping *mapping) | ||
45 | { | ||
46 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) | ||
47 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
48 | __func__); | ||
49 | } | ||
50 | |||
51 | static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, | ||
52 | struct ion_buffer *buffer, | ||
53 | struct ion_mapping *mapping) | ||
54 | { | ||
55 | if (!((1 << buffer->heap->type) & mapper->heap_mask)) { | ||
56 | pr_err("%s: attempting to unmap an unsupported heap\n", | ||
57 | __func__); | ||
58 | return ERR_PTR(-EINVAL); | ||
59 | } | ||
60 | return buffer->priv; | ||
61 | } | ||
62 | |||
63 | static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, | ||
64 | struct ion_buffer *buffer, | ||
65 | struct vm_area_struct *vma, | ||
66 | struct ion_mapping *mapping) | ||
67 | { | ||
68 | int ret; | ||
69 | |||
70 | switch (buffer->heap->type) { | ||
71 | case ION_HEAP_KMALLOC: | ||
72 | { | ||
73 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); | ||
74 | ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | ||
75 | vma->vm_end - vma->vm_start, | ||
76 | vma->vm_page_prot); | ||
77 | break; | ||
78 | } | ||
79 | case ION_HEAP_VMALLOC: | ||
80 | ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); | ||
81 | break; | ||
82 | default: | ||
83 | pr_err("%s: attempting to map unsupported heap to userspace\n", | ||
84 | __func__); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | return ret; | ||
89 | } | ||
90 | |||
91 | static struct ion_mapper_ops ops = { | ||
92 | .map = ion_kernel_mapper_map, | ||
93 | .map_kernel = ion_kernel_mapper_map_kernel, | ||
94 | .map_user = ion_kernel_mapper_map_user, | ||
95 | .unmap = ion_kernel_mapper_unmap, | ||
96 | }; | ||
97 | |||
98 | struct ion_mapper *ion_system_mapper_create(void) | ||
99 | { | ||
100 | struct ion_mapper *mapper; | ||
101 | mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); | ||
102 | if (!mapper) | ||
103 | return ERR_PTR(-ENOMEM); | ||
104 | mapper->type = ION_SYSTEM_MAPPER; | ||
105 | mapper->ops = &ops; | ||
106 | mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); | ||
107 | return mapper; | ||
108 | } | ||
109 | |||
110 | void ion_system_mapper_destroy(struct ion_mapper *mapper) | ||
111 | { | ||
112 | kfree(mapper); | ||
113 | } | ||
114 | |||
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 00000000000..11cd003fb08 --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y += tegra_ion.o | |||
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000..2252079279e --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c | |||
@@ -0,0 +1,599 @@ | |||
1 | /* | ||
2 | * drivers/gpu/tegra/tegra_ion.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * Copyright (C) 2011, NVIDIA Corporation. | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
19 | |||
20 | #include <linux/err.h> | ||
21 | #include <linux/ion.h> | ||
22 | #include <linux/tegra_ion.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/io.h> | ||
28 | #include "../ion_priv.h" | ||
29 | |||
30 | #define CLIENT_HEAP_MASK 0xFFFFFFFF | ||
31 | #define HEAP_FLAGS 0xFF | ||
32 | |||
33 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
34 | #include "mach/nvmap.h" | ||
35 | struct nvmap_device *nvmap_dev; | ||
36 | #endif | ||
37 | |||
38 | static struct ion_device *idev; | ||
39 | static int num_heaps; | ||
40 | static struct ion_heap **heaps; | ||
41 | |||
42 | static int tegra_ion_pin(struct ion_client *client, | ||
43 | unsigned int cmd, | ||
44 | unsigned long arg) | ||
45 | { | ||
46 | struct tegra_ion_pin_data data; | ||
47 | int ret; | ||
48 | struct ion_handle *on_stack[16]; | ||
49 | struct ion_handle **refs = on_stack; | ||
50 | int i; | ||
51 | bool valid_handle; | ||
52 | |||
53 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
54 | return -EFAULT; | ||
55 | if (data.count) { | ||
56 | size_t bytes = data.count * sizeof(struct ion_handle *); | ||
57 | |||
58 | if (data.count > ARRAY_SIZE(on_stack)) | ||
59 | refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL); | ||
60 | else | ||
61 | refs = on_stack; | ||
62 | if (!refs) | ||
63 | return -ENOMEM; | ||
64 | if (copy_from_user(refs, (void *)data.handles, bytes)) { | ||
65 | ret = -EFAULT; | ||
66 | goto err; | ||
67 | } | ||
68 | } else | ||
69 | return -EINVAL; | ||
70 | |||
71 | mutex_lock(&client->lock); | ||
72 | for (i = 0; i < data.count; i++) { | ||
73 | /* Ignore NULL pointers during unpin operation. */ | ||
74 | if (!refs[i] && cmd == TEGRA_ION_UNPIN) | ||
75 | continue; | ||
76 | valid_handle = ion_handle_validate(client, refs[i]); | ||
77 | if (!valid_handle) { | ||
78 | WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]); | ||
79 | mutex_unlock(&client->lock); | ||
80 | ret = -EINVAL; | ||
81 | goto err; | ||
82 | } | ||
83 | } | ||
84 | mutex_unlock(&client->lock); | ||
85 | |||
86 | if (cmd == TEGRA_ION_PIN) { | ||
87 | ion_phys_addr_t addr; | ||
88 | size_t len; | ||
89 | |||
90 | for (i = 0; i < data.count; i++) { | ||
91 | ret = ion_phys(client, refs[i], &addr, &len); | ||
92 | if (ret) | ||
93 | goto err; | ||
94 | ion_handle_get(refs[i]); | ||
95 | ret = put_user(addr, &data.addr[i]); | ||
96 | if (ret) | ||
97 | return ret; | ||
98 | } | ||
99 | } else if (cmd == TEGRA_ION_UNPIN) { | ||
100 | for (i = 0; i < data.count; i++) { | ||
101 | if (refs[i]) | ||
102 | ion_handle_put(refs[i]); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | err: | ||
107 | if (ret) { | ||
108 | pr_err("error, ret=0x%x", ret); | ||
109 | /* FIXME: undo pinning. */ | ||
110 | } | ||
111 | if (refs != on_stack) | ||
112 | kfree(refs); | ||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | static int tegra_ion_alloc_from_id(struct ion_client *client, | ||
117 | unsigned int cmd, | ||
118 | unsigned long arg) | ||
119 | { | ||
120 | struct tegra_ion_id_data data; | ||
121 | struct ion_buffer *buffer; | ||
122 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
123 | |||
124 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
125 | return -EFAULT; | ||
126 | buffer = (struct ion_buffer *)data.id; | ||
127 | data.handle = ion_import(client, buffer); | ||
128 | data.size = buffer->size; | ||
129 | if (put_user(data.handle, &user_data->handle)) | ||
130 | return -EFAULT; | ||
131 | if (put_user(data.size, &user_data->size)) | ||
132 | return -EFAULT; | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int tegra_ion_get_id(struct ion_client *client, | ||
137 | unsigned int cmd, | ||
138 | unsigned long arg) | ||
139 | { | ||
140 | bool valid_handle; | ||
141 | struct tegra_ion_id_data data; | ||
142 | struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg; | ||
143 | |||
144 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
145 | return -EFAULT; | ||
146 | |||
147 | mutex_lock(&client->lock); | ||
148 | valid_handle = ion_handle_validate(client, data.handle); | ||
149 | mutex_unlock(&client->lock); | ||
150 | |||
151 | if (!valid_handle) { | ||
152 | WARN(1, "invalid handle passed\n"); | ||
153 | return -EINVAL; | ||
154 | } | ||
155 | |||
156 | pr_debug("h=0x%x, b=0x%x, bref=%d", | ||
157 | (u32)data.handle, (u32)data.handle->buffer, | ||
158 | atomic_read(&data.handle->buffer->ref.refcount)); | ||
159 | if (put_user((unsigned long)ion_handle_buffer(data.handle), | ||
160 | &user_data->id)) | ||
161 | return -EFAULT; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int tegra_ion_cache_maint(struct ion_client *client, | ||
166 | unsigned int cmd, | ||
167 | unsigned long arg) | ||
168 | { | ||
169 | wmb(); | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int tegra_ion_rw(struct ion_client *client, | ||
174 | unsigned int cmd, | ||
175 | unsigned long arg) | ||
176 | { | ||
177 | bool valid_handle; | ||
178 | struct tegra_ion_rw_data data; | ||
179 | char *kern_addr, *src; | ||
180 | int ret = 0; | ||
181 | size_t copied = 0; | ||
182 | |||
183 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
184 | return -EFAULT; | ||
185 | |||
186 | if (!data.handle || !data.addr || !data.count || !data.elem_size) | ||
187 | return -EINVAL; | ||
188 | |||
189 | mutex_lock(&client->lock); | ||
190 | valid_handle = ion_handle_validate(client, data.handle); | ||
191 | mutex_unlock(&client->lock); | ||
192 | |||
193 | if (!valid_handle) { | ||
194 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
195 | return -EINVAL; | ||
196 | } | ||
197 | |||
198 | if (data.elem_size == data.mem_stride && | ||
199 | data.elem_size == data.user_stride) { | ||
200 | data.elem_size *= data.count; | ||
201 | data.mem_stride = data.elem_size; | ||
202 | data.user_stride = data.elem_size; | ||
203 | data.count = 1; | ||
204 | } | ||
205 | |||
206 | kern_addr = ion_map_kernel(client, data.handle); | ||
207 | |||
208 | while (data.count--) { | ||
209 | if (data.offset + data.elem_size > data.handle->buffer->size) { | ||
210 | WARN(1, "read/write outside of handle\n"); | ||
211 | ret = -EFAULT; | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | src = kern_addr + data.offset; | ||
216 | if (cmd == TEGRA_ION_READ) | ||
217 | ret = copy_to_user((void *)data.addr, | ||
218 | src, data.elem_size); | ||
219 | else | ||
220 | ret = copy_from_user(src, | ||
221 | (void *)data.addr, data.elem_size); | ||
222 | |||
223 | if (ret) | ||
224 | break; | ||
225 | |||
226 | copied += data.elem_size; | ||
227 | data.addr += data.user_stride; | ||
228 | data.offset += data.mem_stride; | ||
229 | } | ||
230 | |||
231 | ion_unmap_kernel(client, data.handle); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | static int tegra_ion_get_param(struct ion_client *client, | ||
236 | unsigned int cmd, | ||
237 | unsigned long arg) | ||
238 | { | ||
239 | bool valid_handle; | ||
240 | struct tegra_ion_get_params_data data; | ||
241 | struct tegra_ion_get_params_data *user_data = | ||
242 | (struct tegra_ion_get_params_data *)arg; | ||
243 | struct ion_buffer *buffer; | ||
244 | |||
245 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
246 | return -EFAULT; | ||
247 | |||
248 | mutex_lock(&client->lock); | ||
249 | valid_handle = ion_handle_validate(client, data.handle); | ||
250 | mutex_unlock(&client->lock); | ||
251 | |||
252 | if (!valid_handle) { | ||
253 | WARN(1, "%s: invalid handle passed to get id.\n", __func__); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | buffer = ion_handle_buffer(data.handle); | ||
258 | data.align = 4096; | ||
259 | data.heap = 1; | ||
260 | ion_phys(client, data.handle, &data.addr, &data.size); | ||
261 | |||
262 | if (copy_to_user(user_data, &data, sizeof(data))) | ||
263 | return -EFAULT; | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static long tegra_ion_ioctl(struct ion_client *client, | ||
269 | unsigned int cmd, | ||
270 | unsigned long arg) | ||
271 | { | ||
272 | int ret = -ENOTTY; | ||
273 | |||
274 | switch (cmd) { | ||
275 | case TEGRA_ION_ALLOC_FROM_ID: | ||
276 | ret = tegra_ion_alloc_from_id(client, cmd, arg); | ||
277 | break; | ||
278 | case TEGRA_ION_GET_ID: | ||
279 | ret = tegra_ion_get_id(client, cmd, arg); | ||
280 | break; | ||
281 | case TEGRA_ION_PIN: | ||
282 | case TEGRA_ION_UNPIN: | ||
283 | ret = tegra_ion_pin(client, cmd, arg); | ||
284 | break; | ||
285 | case TEGRA_ION_CACHE_MAINT: | ||
286 | ret = tegra_ion_cache_maint(client, cmd, arg); | ||
287 | break; | ||
288 | case TEGRA_ION_READ: | ||
289 | case TEGRA_ION_WRITE: | ||
290 | ret = tegra_ion_rw(client, cmd, arg); | ||
291 | break; | ||
292 | case TEGRA_ION_GET_PARAM: | ||
293 | ret = tegra_ion_get_param(client, cmd, arg); | ||
294 | break; | ||
295 | default: | ||
296 | WARN(1, "Unknown custom ioctl\n"); | ||
297 | return -ENOTTY; | ||
298 | } | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | int tegra_ion_probe(struct platform_device *pdev) | ||
303 | { | ||
304 | struct ion_platform_data *pdata = pdev->dev.platform_data; | ||
305 | int i; | ||
306 | |||
307 | num_heaps = pdata->nr; | ||
308 | |||
309 | heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); | ||
310 | |||
311 | idev = ion_device_create(tegra_ion_ioctl); | ||
312 | if (IS_ERR_OR_NULL(idev)) { | ||
313 | kfree(heaps); | ||
314 | return PTR_ERR(idev); | ||
315 | } | ||
316 | |||
317 | /* create the heaps as specified in the board file */ | ||
318 | for (i = 0; i < num_heaps; i++) { | ||
319 | struct ion_platform_heap *heap_data = &pdata->heaps[i]; | ||
320 | |||
321 | heaps[i] = ion_heap_create(heap_data); | ||
322 | if (IS_ERR_OR_NULL(heaps[i])) { | ||
323 | pr_warn("%s(type:%d id:%d) isn't supported\n", | ||
324 | heap_data->name, | ||
325 | heap_data->type, heap_data->id); | ||
326 | continue; | ||
327 | } | ||
328 | ion_device_add_heap(idev, heaps[i]); | ||
329 | } | ||
330 | platform_set_drvdata(pdev, idev); | ||
331 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
332 | nvmap_dev = (struct nvmap_device *)idev; | ||
333 | #endif | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | int tegra_ion_remove(struct platform_device *pdev) | ||
338 | { | ||
339 | struct ion_device *idev = platform_get_drvdata(pdev); | ||
340 | int i; | ||
341 | |||
342 | ion_device_destroy(idev); | ||
343 | for (i = 0; i < num_heaps; i++) | ||
344 | ion_heap_destroy(heaps[i]); | ||
345 | kfree(heaps); | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static struct platform_driver ion_driver = { | ||
350 | .probe = tegra_ion_probe, | ||
351 | .remove = tegra_ion_remove, | ||
352 | .driver = { .name = "ion-tegra" } | ||
353 | }; | ||
354 | |||
355 | static int __init ion_init(void) | ||
356 | { | ||
357 | return platform_driver_register(&ion_driver); | ||
358 | } | ||
359 | |||
360 | static void __exit ion_exit(void) | ||
361 | { | ||
362 | platform_driver_unregister(&ion_driver); | ||
363 | } | ||
364 | |||
365 | fs_initcall(ion_init); | ||
366 | module_exit(ion_exit); | ||
367 | |||
368 | #if !defined(CONFIG_TEGRA_NVMAP) | ||
369 | struct nvmap_client *nvmap_create_client(struct nvmap_device *dev, | ||
370 | const char *name) | ||
371 | { | ||
372 | return ion_client_create(dev, CLIENT_HEAP_MASK, name); | ||
373 | } | ||
374 | |||
375 | struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, | ||
376 | size_t align, unsigned int flags, | ||
377 | unsigned int heap_mask) | ||
378 | { | ||
379 | return ion_alloc(client, size, align, HEAP_FLAGS); | ||
380 | } | ||
381 | |||
382 | void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
383 | { | ||
384 | ion_free(client, r); | ||
385 | } | ||
386 | |||
387 | void *nvmap_mmap(struct nvmap_handle_ref *r) | ||
388 | { | ||
389 | return ion_map_kernel(r->client, r); | ||
390 | } | ||
391 | |||
392 | void nvmap_munmap(struct nvmap_handle_ref *r, void *addr) | ||
393 | { | ||
394 | ion_unmap_kernel(r->client, r); | ||
395 | } | ||
396 | |||
397 | struct nvmap_client *nvmap_client_get_file(int fd) | ||
398 | { | ||
399 | return ion_client_get_file(fd); | ||
400 | } | ||
401 | |||
402 | struct nvmap_client *nvmap_client_get(struct nvmap_client *client) | ||
403 | { | ||
404 | ion_client_get(client); | ||
405 | return client; | ||
406 | } | ||
407 | |||
408 | void nvmap_client_put(struct nvmap_client *c) | ||
409 | { | ||
410 | ion_client_put(c); | ||
411 | } | ||
412 | |||
413 | phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r) | ||
414 | { | ||
415 | ion_phys_addr_t addr; | ||
416 | size_t len; | ||
417 | |||
418 | ion_handle_get(r); | ||
419 | ion_phys(c, r, &addr, &len); | ||
420 | wmb(); | ||
421 | return addr; | ||
422 | } | ||
423 | |||
424 | phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) | ||
425 | { | ||
426 | struct ion_handle *handle; | ||
427 | ion_phys_addr_t addr; | ||
428 | size_t len; | ||
429 | |||
430 | handle = nvmap_convert_handle_u2k(id); | ||
431 | ion_phys(c, handle, &addr, &len); | ||
432 | return addr; | ||
433 | } | ||
434 | |||
435 | void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
436 | { | ||
437 | if (r) | ||
438 | ion_handle_put(r); | ||
439 | } | ||
440 | |||
441 | static int nvmap_reloc_pin_array(struct ion_client *client, | ||
442 | const struct nvmap_pinarray_elem *arr, | ||
443 | int nr, struct ion_handle *gather) | ||
444 | { | ||
445 | struct ion_handle *last_patch = NULL; | ||
446 | void *patch_addr; | ||
447 | ion_phys_addr_t pin_addr; | ||
448 | size_t len; | ||
449 | int i; | ||
450 | |||
451 | for (i = 0; i < nr; i++) { | ||
452 | struct ion_handle *patch; | ||
453 | struct ion_handle *pin; | ||
454 | ion_phys_addr_t reloc_addr; | ||
455 | |||
456 | /* all of the handles are validated and get'ted prior to | ||
457 | * calling this function, so casting is safe here */ | ||
458 | pin = (struct ion_handle *)arr[i].pin_mem; | ||
459 | |||
460 | if (arr[i].patch_mem == (unsigned long)last_patch) { | ||
461 | patch = last_patch; | ||
462 | } else if (arr[i].patch_mem == (unsigned long)gather) { | ||
463 | patch = gather; | ||
464 | } else { | ||
465 | if (last_patch) | ||
466 | ion_handle_put(last_patch); | ||
467 | |||
468 | ion_handle_get((struct ion_handle *)arr[i].patch_mem); | ||
469 | patch = (struct ion_handle *)arr[i].patch_mem; | ||
470 | if (!patch) | ||
471 | return -EPERM; | ||
472 | last_patch = patch; | ||
473 | } | ||
474 | |||
475 | patch_addr = ion_map_kernel(client, patch); | ||
476 | patch_addr = patch_addr + arr[i].patch_offset; | ||
477 | |||
478 | ion_phys(client, pin, &pin_addr, &len); | ||
479 | reloc_addr = pin_addr + arr[i].pin_offset; | ||
480 | __raw_writel(reloc_addr, patch_addr); | ||
481 | ion_unmap_kernel(client, patch); | ||
482 | } | ||
483 | |||
484 | if (last_patch) | ||
485 | ion_handle_put(last_patch); | ||
486 | |||
487 | wmb(); | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, | ||
492 | const struct nvmap_pinarray_elem *arr, int nr, | ||
493 | struct nvmap_handle **unique) | ||
494 | { | ||
495 | int i; | ||
496 | int count = 0; | ||
497 | |||
498 | /* FIXME: take care of duplicate ones & validation. */ | ||
499 | for (i = 0; i < nr; i++) { | ||
500 | unique[i] = (struct nvmap_handle *)arr[i].pin_mem; | ||
501 | nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]); | ||
502 | count++; | ||
503 | } | ||
504 | nvmap_reloc_pin_array((struct ion_client *)client, | ||
505 | arr, nr, (struct ion_handle *)gather); | ||
506 | return nr; | ||
507 | } | ||
508 | |||
509 | void nvmap_unpin_handles(struct nvmap_client *client, | ||
510 | struct nvmap_handle **h, int nr) | ||
511 | { | ||
512 | int i; | ||
513 | |||
514 | for (i = 0; i < nr; i++) | ||
515 | nvmap_unpin(client, h[i]); | ||
516 | } | ||
517 | |||
518 | int nvmap_patch_word(struct nvmap_client *client, | ||
519 | struct nvmap_handle *patch, | ||
520 | u32 patch_offset, u32 patch_value) | ||
521 | { | ||
522 | void *vaddr; | ||
523 | u32 *patch_addr; | ||
524 | |||
525 | vaddr = ion_map_kernel(client, patch); | ||
526 | patch_addr = vaddr + patch_offset; | ||
527 | __raw_writel(patch_value, patch_addr); | ||
528 | wmb(); | ||
529 | ion_unmap_kernel(client, patch); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h); | ||
534 | struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client, | ||
535 | unsigned long id) | ||
536 | { | ||
537 | struct ion_handle *handle; | ||
538 | |||
539 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
540 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
541 | (u32)id, (u32)handle, (u32)client); | ||
542 | nvmap_handle_get(handle); | ||
543 | return handle; | ||
544 | } | ||
545 | |||
546 | struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client, | ||
547 | unsigned long id) | ||
548 | { | ||
549 | struct ion_buffer *buffer; | ||
550 | struct ion_handle *handle; | ||
551 | struct ion_client *ion_client = client; | ||
552 | |||
553 | handle = (struct ion_handle *)nvmap_convert_handle_u2k(id); | ||
554 | pr_debug("id=0x%x, h=0x%x,c=0x%x", | ||
555 | (u32)id, (u32)handle, (u32)client); | ||
556 | buffer = handle->buffer; | ||
557 | |||
558 | handle = ion_handle_create(client, buffer); | ||
559 | |||
560 | mutex_lock(&ion_client->lock); | ||
561 | ion_handle_add(ion_client, handle); | ||
562 | mutex_unlock(&ion_client->lock); | ||
563 | |||
564 | pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle); | ||
565 | return handle; | ||
566 | } | ||
567 | |||
568 | void _nvmap_handle_free(struct nvmap_handle *h) | ||
569 | { | ||
570 | ion_handle_put(h); | ||
571 | } | ||
572 | |||
573 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
574 | size_t size, size_t align, unsigned int flags, unsigned int iova_start) | ||
575 | { | ||
576 | struct ion_handle *h; | ||
577 | |||
578 | h = ion_alloc(client, size, align, 0xFF); | ||
579 | ion_remap_dma(client, h, iova_start); | ||
580 | return h; | ||
581 | } | ||
582 | |||
583 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
584 | { | ||
585 | ion_free(client, r); | ||
586 | } | ||
587 | |||
588 | struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h) | ||
589 | { | ||
590 | ion_handle_get(h); | ||
591 | return h; | ||
592 | } | ||
593 | |||
594 | void nvmap_handle_put(struct nvmap_handle *h) | ||
595 | { | ||
596 | ion_handle_put(h); | ||
597 | } | ||
598 | |||
599 | #endif | ||
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 8a1021f2e31..c72f1c0b5e6 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -1171,10 +1171,9 @@ static int vga_arb_open(struct inode *inode, struct file *file) | |||
1171 | 1171 | ||
1172 | pr_debug("%s\n", __func__); | 1172 | pr_debug("%s\n", __func__); |
1173 | 1173 | ||
1174 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); | 1174 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
1175 | if (priv == NULL) | 1175 | if (priv == NULL) |
1176 | return -ENOMEM; | 1176 | return -ENOMEM; |
1177 | memset(priv, 0, sizeof(*priv)); | ||
1178 | spin_lock_init(&priv->lock); | 1177 | spin_lock_init(&priv->lock); |
1179 | file->private_data = priv; | 1178 | file->private_data = priv; |
1180 | 1179 | ||