diff options
Diffstat (limited to 'drivers/gpu')
68 files changed, 19672 insertions, 1060 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a8b33c2ec8d2..4be3acbaaf9a 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -7,6 +7,8 @@ | |||
7 | menuconfig DRM | 7 | menuconfig DRM |
8 | tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" | 8 | tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" |
9 | depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU | 9 | depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU |
10 | select I2C | ||
11 | select I2C_ALGOBIT | ||
10 | help | 12 | help |
11 | Kernel-level support for the Direct Rendering Infrastructure (DRI) | 13 | Kernel-level support for the Direct Rendering Infrastructure (DRI) |
12 | introduced in XFree86 4.0. If you say Y here, you need to select | 14 | introduced in XFree86 4.0. If you say Y here, you need to select |
@@ -65,6 +67,10 @@ config DRM_I830 | |||
65 | will load the correct one. | 67 | will load the correct one. |
66 | 68 | ||
67 | config DRM_I915 | 69 | config DRM_I915 |
70 | select FB_CFB_FILLRECT | ||
71 | select FB_CFB_COPYAREA | ||
72 | select FB_CFB_IMAGEBLIT | ||
73 | select FB | ||
68 | tristate "i915 driver" | 74 | tristate "i915 driver" |
69 | help | 75 | help |
70 | Choose this option if you have a system that has Intel 830M, 845G, | 76 | Choose this option if you have a system that has Intel 830M, 845G, |
@@ -76,6 +82,17 @@ config DRM_I915 | |||
76 | 82 | ||
77 | endchoice | 83 | endchoice |
78 | 84 | ||
85 | config DRM_I915_KMS | ||
86 | bool "Enable modesetting on intel by default" | ||
87 | depends on DRM_I915 | ||
88 | help | ||
89 | Choose this option if you want kernel modesetting enabled by default, | ||
90 | and you have a new enough userspace to support this. Running old | ||
91 | userspaces with this enabled will cause pain. Note that this causes | ||
92 | the driver to bind to PCI devices, which precludes loading things | ||
93 | like intelfb. | ||
94 | |||
95 | |||
79 | config DRM_MGA | 96 | config DRM_MGA |
80 | tristate "Matrox g200/g400" | 97 | tristate "Matrox g200/g400" |
81 | depends on DRM | 98 | depends on DRM |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 74da99495e21..30022c4a5c12 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
9 | drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ | 9 | drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ |
10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ | 10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ |
11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | 11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ |
12 | drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o | 12 | drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ |
13 | drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o | ||
13 | 14 | ||
14 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 15 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
15 | 16 | ||
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 3d33b8252b58..14796594e5d9 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c | |||
@@ -33,10 +33,11 @@ | |||
33 | 33 | ||
34 | #include "drmP.h" | 34 | #include "drmP.h" |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <asm/agp.h> | ||
37 | 36 | ||
38 | #if __OS_HAS_AGP | 37 | #if __OS_HAS_AGP |
39 | 38 | ||
39 | #include <asm/agp.h> | ||
40 | |||
40 | /** | 41 | /** |
41 | * Get AGP information. | 42 | * Get AGP information. |
42 | * | 43 | * |
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index a73462723d2d..ca7a9ef5007b 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c | |||
@@ -45,14 +45,15 @@ | |||
45 | * the one with matching magic number, while holding the drm_device::struct_mutex | 45 | * the one with matching magic number, while holding the drm_device::struct_mutex |
46 | * lock. | 46 | * lock. |
47 | */ | 47 | */ |
48 | static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) | 48 | static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) |
49 | { | 49 | { |
50 | struct drm_file *retval = NULL; | 50 | struct drm_file *retval = NULL; |
51 | struct drm_magic_entry *pt; | 51 | struct drm_magic_entry *pt; |
52 | struct drm_hash_item *hash; | 52 | struct drm_hash_item *hash; |
53 | struct drm_device *dev = master->minor->dev; | ||
53 | 54 | ||
54 | mutex_lock(&dev->struct_mutex); | 55 | mutex_lock(&dev->struct_mutex); |
55 | if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { | 56 | if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { |
56 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); | 57 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); |
57 | retval = pt->priv; | 58 | retval = pt->priv; |
58 | } | 59 | } |
@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic | |||
71 | * associated the magic number hash key in drm_device::magiclist, while holding | 72 | * associated the magic number hash key in drm_device::magiclist, while holding |
72 | * the drm_device::struct_mutex lock. | 73 | * the drm_device::struct_mutex lock. |
73 | */ | 74 | */ |
74 | static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | 75 | static int drm_add_magic(struct drm_master *master, struct drm_file *priv, |
75 | drm_magic_t magic) | 76 | drm_magic_t magic) |
76 | { | 77 | { |
77 | struct drm_magic_entry *entry; | 78 | struct drm_magic_entry *entry; |
78 | 79 | struct drm_device *dev = master->minor->dev; | |
79 | DRM_DEBUG("%d\n", magic); | 80 | DRM_DEBUG("%d\n", magic); |
80 | 81 | ||
81 | entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); | 82 | entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); |
@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | |||
83 | return -ENOMEM; | 84 | return -ENOMEM; |
84 | memset(entry, 0, sizeof(*entry)); | 85 | memset(entry, 0, sizeof(*entry)); |
85 | entry->priv = priv; | 86 | entry->priv = priv; |
86 | |||
87 | entry->hash_item.key = (unsigned long)magic; | 87 | entry->hash_item.key = (unsigned long)magic; |
88 | mutex_lock(&dev->struct_mutex); | 88 | mutex_lock(&dev->struct_mutex); |
89 | drm_ht_insert_item(&dev->magiclist, &entry->hash_item); | 89 | drm_ht_insert_item(&master->magiclist, &entry->hash_item); |
90 | list_add_tail(&entry->head, &dev->magicfree); | 90 | list_add_tail(&entry->head, &master->magicfree); |
91 | mutex_unlock(&dev->struct_mutex); | 91 | mutex_unlock(&dev->struct_mutex); |
92 | 92 | ||
93 | return 0; | 93 | return 0; |
@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | |||
102 | * Searches and unlinks the entry in drm_device::magiclist with the magic | 102 | * Searches and unlinks the entry in drm_device::magiclist with the magic |
103 | * number hash key, while holding the drm_device::struct_mutex lock. | 103 | * number hash key, while holding the drm_device::struct_mutex lock. |
104 | */ | 104 | */ |
105 | static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) | 105 | static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) |
106 | { | 106 | { |
107 | struct drm_magic_entry *pt; | 107 | struct drm_magic_entry *pt; |
108 | struct drm_hash_item *hash; | 108 | struct drm_hash_item *hash; |
109 | struct drm_device *dev = master->minor->dev; | ||
109 | 110 | ||
110 | DRM_DEBUG("%d\n", magic); | 111 | DRM_DEBUG("%d\n", magic); |
111 | 112 | ||
112 | mutex_lock(&dev->struct_mutex); | 113 | mutex_lock(&dev->struct_mutex); |
113 | if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { | 114 | if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { |
114 | mutex_unlock(&dev->struct_mutex); | 115 | mutex_unlock(&dev->struct_mutex); |
115 | return -EINVAL; | 116 | return -EINVAL; |
116 | } | 117 | } |
117 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); | 118 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); |
118 | drm_ht_remove_item(&dev->magiclist, hash); | 119 | drm_ht_remove_item(&master->magiclist, hash); |
119 | list_del(&pt->head); | 120 | list_del(&pt->head); |
120 | mutex_unlock(&dev->struct_mutex); | 121 | mutex_unlock(&dev->struct_mutex); |
121 | 122 | ||
@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
153 | ++sequence; /* reserve 0 */ | 154 | ++sequence; /* reserve 0 */ |
154 | auth->magic = sequence++; | 155 | auth->magic = sequence++; |
155 | spin_unlock(&lock); | 156 | spin_unlock(&lock); |
156 | } while (drm_find_file(dev, auth->magic)); | 157 | } while (drm_find_file(file_priv->master, auth->magic)); |
157 | file_priv->magic = auth->magic; | 158 | file_priv->magic = auth->magic; |
158 | drm_add_magic(dev, file_priv, auth->magic); | 159 | drm_add_magic(file_priv->master, file_priv, auth->magic); |
159 | } | 160 | } |
160 | 161 | ||
161 | DRM_DEBUG("%u\n", auth->magic); | 162 | DRM_DEBUG("%u\n", auth->magic); |
@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data, | |||
181 | struct drm_file *file; | 182 | struct drm_file *file; |
182 | 183 | ||
183 | DRM_DEBUG("%u\n", auth->magic); | 184 | DRM_DEBUG("%u\n", auth->magic); |
184 | if ((file = drm_find_file(dev, auth->magic))) { | 185 | if ((file = drm_find_file(file_priv->master, auth->magic))) { |
185 | file->authenticated = 1; | 186 | file->authenticated = 1; |
186 | drm_remove_magic(dev, auth->magic); | 187 | drm_remove_magic(file_priv->master, auth->magic); |
187 | return 0; | 188 | return 0; |
188 | } | 189 | } |
189 | return -EINVAL; | 190 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index bde64b84166e..72c667f9bee1 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, | |||
54 | { | 54 | { |
55 | struct drm_map_list *entry; | 55 | struct drm_map_list *entry; |
56 | list_for_each_entry(entry, &dev->maplist, head) { | 56 | list_for_each_entry(entry, &dev->maplist, head) { |
57 | if (entry->map && map->type == entry->map->type && | 57 | if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) && |
58 | ((entry->map->offset == map->offset) || | 58 | ((entry->map->offset == map->offset) || |
59 | (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { | 59 | ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) { |
60 | return entry; | 60 | return entry; |
61 | } | 61 | } |
62 | } | 62 | } |
@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
210 | map->offset = (unsigned long)map->handle; | 210 | map->offset = (unsigned long)map->handle; |
211 | if (map->flags & _DRM_CONTAINS_LOCK) { | 211 | if (map->flags & _DRM_CONTAINS_LOCK) { |
212 | /* Prevent a 2nd X Server from creating a 2nd lock */ | 212 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
213 | if (dev->lock.hw_lock != NULL) { | 213 | if (dev->primary->master->lock.hw_lock != NULL) { |
214 | vfree(map->handle); | 214 | vfree(map->handle); |
215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
216 | return -EBUSY; | 216 | return -EBUSY; |
217 | } | 217 | } |
218 | dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ | 218 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
219 | } | 219 | } |
220 | break; | 220 | break; |
221 | case _DRM_AGP: { | 221 | case _DRM_AGP: { |
@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
262 | DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); | 262 | DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); |
263 | 263 | ||
264 | break; | 264 | break; |
265 | case _DRM_GEM: | ||
266 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
267 | break; | ||
265 | } | 268 | } |
266 | case _DRM_SCATTER_GATHER: | 269 | case _DRM_SCATTER_GATHER: |
267 | if (!dev->sg) { | 270 | if (!dev->sg) { |
@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
319 | list->user_token = list->hash.key << PAGE_SHIFT; | 322 | list->user_token = list->hash.key << PAGE_SHIFT; |
320 | mutex_unlock(&dev->struct_mutex); | 323 | mutex_unlock(&dev->struct_mutex); |
321 | 324 | ||
325 | list->master = dev->primary->master; | ||
322 | *maplist = list; | 326 | *maplist = list; |
323 | return 0; | 327 | return 0; |
324 | } | 328 | } |
@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, | |||
345 | struct drm_map_list *maplist; | 349 | struct drm_map_list *maplist; |
346 | int err; | 350 | int err; |
347 | 351 | ||
348 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) | 352 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
349 | return -EPERM; | 353 | return -EPERM; |
350 | 354 | ||
351 | err = drm_addmap_core(dev, map->offset, map->size, map->type, | 355 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
380 | struct drm_map_list *r_list = NULL, *list_t; | 384 | struct drm_map_list *r_list = NULL, *list_t; |
381 | drm_dma_handle_t dmah; | 385 | drm_dma_handle_t dmah; |
382 | int found = 0; | 386 | int found = 0; |
387 | struct drm_master *master; | ||
383 | 388 | ||
384 | /* Find the list entry for the map and remove it */ | 389 | /* Find the list entry for the map and remove it */ |
385 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { | 390 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { |
386 | if (r_list->map == map) { | 391 | if (r_list->map == map) { |
392 | master = r_list->master; | ||
387 | list_del(&r_list->head); | 393 | list_del(&r_list->head); |
388 | drm_ht_remove_key(&dev->map_hash, | 394 | drm_ht_remove_key(&dev->map_hash, |
389 | r_list->user_token >> PAGE_SHIFT); | 395 | r_list->user_token >> PAGE_SHIFT); |
@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
409 | break; | 415 | break; |
410 | case _DRM_SHM: | 416 | case _DRM_SHM: |
411 | vfree(map->handle); | 417 | vfree(map->handle); |
418 | if (master) { | ||
419 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
420 | dev->sigdata.lock = NULL; | ||
421 | master->lock.hw_lock = NULL; /* SHM removed */ | ||
422 | master->lock.file_priv = NULL; | ||
423 | wake_up_interruptible(&master->lock.lock_queue); | ||
424 | } | ||
412 | break; | 425 | break; |
413 | case _DRM_AGP: | 426 | case _DRM_AGP: |
414 | case _DRM_SCATTER_GATHER: | 427 | case _DRM_SCATTER_GATHER: |
@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
419 | dmah.size = map->size; | 432 | dmah.size = map->size; |
420 | __drm_pci_free(dev, &dmah); | 433 | __drm_pci_free(dev, &dmah); |
421 | break; | 434 | break; |
435 | case _DRM_GEM: | ||
436 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
437 | break; | ||
422 | } | 438 | } |
423 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 439 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
424 | 440 | ||
425 | return 0; | 441 | return 0; |
426 | } | 442 | } |
443 | EXPORT_SYMBOL(drm_rmmap_locked); | ||
427 | 444 | ||
428 | int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) | 445 | int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) |
429 | { | 446 | { |
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index d505f695421f..809ec0f03452 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c | |||
@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new) | |||
256 | * hardware lock is held, clears the drm_device::context_flag and wakes up | 256 | * hardware lock is held, clears the drm_device::context_flag and wakes up |
257 | * drm_device::context_wait. | 257 | * drm_device::context_wait. |
258 | */ | 258 | */ |
259 | static int drm_context_switch_complete(struct drm_device * dev, int new) | 259 | static int drm_context_switch_complete(struct drm_device *dev, |
260 | struct drm_file *file_priv, int new) | ||
260 | { | 261 | { |
261 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ | 262 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ |
262 | dev->last_switch = jiffies; | 263 | dev->last_switch = jiffies; |
263 | 264 | ||
264 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | 265 | if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { |
265 | DRM_ERROR("Lock isn't held after context switch\n"); | 266 | DRM_ERROR("Lock isn't held after context switch\n"); |
266 | } | 267 | } |
267 | 268 | ||
@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data, | |||
420 | struct drm_ctx *ctx = data; | 421 | struct drm_ctx *ctx = data; |
421 | 422 | ||
422 | DRM_DEBUG("%d\n", ctx->handle); | 423 | DRM_DEBUG("%d\n", ctx->handle); |
423 | drm_context_switch_complete(dev, ctx->handle); | 424 | drm_context_switch_complete(dev, file_priv, ctx->handle); |
424 | 425 | ||
425 | return 0; | 426 | return 0; |
426 | } | 427 | } |
@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data, | |||
442 | struct drm_ctx *ctx = data; | 443 | struct drm_ctx *ctx = data; |
443 | 444 | ||
444 | DRM_DEBUG("%d\n", ctx->handle); | 445 | DRM_DEBUG("%d\n", ctx->handle); |
445 | if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { | ||
446 | file_priv->remove_auth_on_close = 1; | ||
447 | } | ||
448 | if (ctx->handle != DRM_KERNEL_CONTEXT) { | 446 | if (ctx->handle != DRM_KERNEL_CONTEXT) { |
449 | if (dev->driver->context_dtor) | 447 | if (dev->driver->context_dtor) |
450 | dev->driver->context_dtor(dev, ctx->handle); | 448 | dev->driver->context_dtor(dev, ctx->handle); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c new file mode 100644 index 000000000000..bfce0992fefb --- /dev/null +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -0,0 +1,2450 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006-2008 Intel Corporation | ||
3 | * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> | ||
4 | * Copyright (c) 2008 Red Hat Inc. | ||
5 | * | ||
6 | * DRM core CRTC related functions | ||
7 | * | ||
8 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
9 | * documentation for any purpose is hereby granted without fee, provided that | ||
10 | * the above copyright notice appear in all copies and that both that copyright | ||
11 | * notice and this permission notice appear in supporting documentation, and | ||
12 | * that the name of the copyright holders not be used in advertising or | ||
13 | * publicity pertaining to distribution of the software without specific, | ||
14 | * written prior permission. The copyright holders make no representations | ||
15 | * about the suitability of this software for any purpose. It is provided "as | ||
16 | * is" without express or implied warranty. | ||
17 | * | ||
18 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
19 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
20 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
21 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
22 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
23 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
24 | * OF THIS SOFTWARE. | ||
25 | * | ||
26 | * Authors: | ||
27 | * Keith Packard | ||
28 | * Eric Anholt <eric@anholt.net> | ||
29 | * Dave Airlie <airlied@linux.ie> | ||
30 | * Jesse Barnes <jesse.barnes@intel.com> | ||
31 | */ | ||
32 | #include <linux/list.h> | ||
33 | #include "drm.h" | ||
34 | #include "drmP.h" | ||
35 | #include "drm_crtc.h" | ||
36 | |||
37 | struct drm_prop_enum_list { | ||
38 | int type; | ||
39 | char *name; | ||
40 | }; | ||
41 | |||
42 | /* Avoid boilerplate. I'm tired of typing. */ | ||
43 | #define DRM_ENUM_NAME_FN(fnname, list) \ | ||
44 | char *fnname(int val) \ | ||
45 | { \ | ||
46 | int i; \ | ||
47 | for (i = 0; i < ARRAY_SIZE(list); i++) { \ | ||
48 | if (list[i].type == val) \ | ||
49 | return list[i].name; \ | ||
50 | } \ | ||
51 | return "(unknown)"; \ | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Global properties | ||
56 | */ | ||
57 | static struct drm_prop_enum_list drm_dpms_enum_list[] = | ||
58 | { { DRM_MODE_DPMS_ON, "On" }, | ||
59 | { DRM_MODE_DPMS_STANDBY, "Standby" }, | ||
60 | { DRM_MODE_DPMS_SUSPEND, "Suspend" }, | ||
61 | { DRM_MODE_DPMS_OFF, "Off" } | ||
62 | }; | ||
63 | |||
64 | DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) | ||
65 | |||
66 | /* | ||
67 | * Optional properties | ||
68 | */ | ||
69 | static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = | ||
70 | { | ||
71 | { DRM_MODE_SCALE_NON_GPU, "Non-GPU" }, | ||
72 | { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" }, | ||
73 | { DRM_MODE_SCALE_NO_SCALE, "No scale" }, | ||
74 | { DRM_MODE_SCALE_ASPECT, "Aspect" }, | ||
75 | }; | ||
76 | |||
77 | static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = | ||
78 | { | ||
79 | { DRM_MODE_DITHERING_OFF, "Off" }, | ||
80 | { DRM_MODE_DITHERING_ON, "On" }, | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * Non-global properties, but "required" for certain connectors. | ||
85 | */ | ||
86 | static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = | ||
87 | { | ||
88 | { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ | ||
89 | { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ | ||
90 | { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ | ||
91 | }; | ||
92 | |||
93 | DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) | ||
94 | |||
95 | static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = | ||
96 | { | ||
97 | { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ | ||
98 | { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ | ||
99 | { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ | ||
100 | }; | ||
101 | |||
102 | DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, | ||
103 | drm_dvi_i_subconnector_enum_list) | ||
104 | |||
105 | static struct drm_prop_enum_list drm_tv_select_enum_list[] = | ||
106 | { | ||
107 | { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ | ||
108 | { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ | ||
109 | { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ | ||
110 | { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ | ||
111 | }; | ||
112 | |||
113 | DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) | ||
114 | |||
115 | static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = | ||
116 | { | ||
117 | { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ | ||
118 | { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ | ||
119 | { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ | ||
120 | { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ | ||
121 | }; | ||
122 | |||
123 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, | ||
124 | drm_tv_subconnector_enum_list) | ||
125 | |||
126 | struct drm_conn_prop_enum_list { | ||
127 | int type; | ||
128 | char *name; | ||
129 | int count; | ||
130 | }; | ||
131 | |||
132 | /* | ||
133 | * Connector and encoder types. | ||
134 | */ | ||
135 | static struct drm_conn_prop_enum_list drm_connector_enum_list[] = | ||
136 | { { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, | ||
137 | { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, | ||
138 | { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, | ||
139 | { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, | ||
140 | { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, | ||
141 | { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, | ||
142 | { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, | ||
143 | { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, | ||
144 | { DRM_MODE_CONNECTOR_Component, "Component", 0 }, | ||
145 | { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, | ||
146 | { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, | ||
147 | { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, | ||
148 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, | ||
149 | }; | ||
150 | |||
151 | static struct drm_prop_enum_list drm_encoder_enum_list[] = | ||
152 | { { DRM_MODE_ENCODER_NONE, "None" }, | ||
153 | { DRM_MODE_ENCODER_DAC, "DAC" }, | ||
154 | { DRM_MODE_ENCODER_TMDS, "TMDS" }, | ||
155 | { DRM_MODE_ENCODER_LVDS, "LVDS" }, | ||
156 | { DRM_MODE_ENCODER_TVDAC, "TV" }, | ||
157 | }; | ||
158 | |||
159 | char *drm_get_encoder_name(struct drm_encoder *encoder) | ||
160 | { | ||
161 | static char buf[32]; | ||
162 | |||
163 | snprintf(buf, 32, "%s-%d", | ||
164 | drm_encoder_enum_list[encoder->encoder_type].name, | ||
165 | encoder->base.id); | ||
166 | return buf; | ||
167 | } | ||
168 | |||
169 | char *drm_get_connector_name(struct drm_connector *connector) | ||
170 | { | ||
171 | static char buf[32]; | ||
172 | |||
173 | snprintf(buf, 32, "%s-%d", | ||
174 | drm_connector_enum_list[connector->connector_type].name, | ||
175 | connector->connector_type_id); | ||
176 | return buf; | ||
177 | } | ||
178 | EXPORT_SYMBOL(drm_get_connector_name); | ||
179 | |||
180 | char *drm_get_connector_status_name(enum drm_connector_status status) | ||
181 | { | ||
182 | if (status == connector_status_connected) | ||
183 | return "connected"; | ||
184 | else if (status == connector_status_disconnected) | ||
185 | return "disconnected"; | ||
186 | else | ||
187 | return "unknown"; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * drm_mode_object_get - allocate a new identifier | ||
192 | * @dev: DRM device | ||
193 | * @ptr: object pointer, used to generate unique ID | ||
194 | * @type: object type | ||
195 | * | ||
196 | * LOCKING: | ||
197 | * | ||
198 | * Create a unique identifier based on @ptr in @dev's identifier space. Used | ||
199 | * for tracking modes, CRTCs and connectors. | ||
200 | * | ||
201 | * RETURNS: | ||
202 | * New unique (relative to other objects in @dev) integer identifier for the | ||
203 | * object. | ||
204 | */ | ||
205 | static int drm_mode_object_get(struct drm_device *dev, | ||
206 | struct drm_mode_object *obj, uint32_t obj_type) | ||
207 | { | ||
208 | int new_id = 0; | ||
209 | int ret; | ||
210 | |||
211 | again: | ||
212 | if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { | ||
213 | DRM_ERROR("Ran out memory getting a mode number\n"); | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | |||
217 | mutex_lock(&dev->mode_config.idr_mutex); | ||
218 | ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); | ||
219 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
220 | if (ret == -EAGAIN) | ||
221 | goto again; | ||
222 | |||
223 | obj->id = new_id; | ||
224 | obj->type = obj_type; | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * drm_mode_object_put - free an identifer | ||
230 | * @dev: DRM device | ||
231 | * @id: ID to free | ||
232 | * | ||
233 | * LOCKING: | ||
234 | * Caller must hold DRM mode_config lock. | ||
235 | * | ||
236 | * Free @id from @dev's unique identifier pool. | ||
237 | */ | ||
238 | static void drm_mode_object_put(struct drm_device *dev, | ||
239 | struct drm_mode_object *object) | ||
240 | { | ||
241 | mutex_lock(&dev->mode_config.idr_mutex); | ||
242 | idr_remove(&dev->mode_config.crtc_idr, object->id); | ||
243 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
244 | } | ||
245 | |||
246 | void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) | ||
247 | { | ||
248 | struct drm_mode_object *obj = NULL; | ||
249 | |||
250 | mutex_lock(&dev->mode_config.idr_mutex); | ||
251 | obj = idr_find(&dev->mode_config.crtc_idr, id); | ||
252 | if (!obj || (obj->type != type) || (obj->id != id)) | ||
253 | obj = NULL; | ||
254 | mutex_unlock(&dev->mode_config.idr_mutex); | ||
255 | |||
256 | return obj; | ||
257 | } | ||
258 | EXPORT_SYMBOL(drm_mode_object_find); | ||
259 | |||
260 | /** | ||
261 | * drm_crtc_from_fb - find the CRTC structure associated with an fb | ||
262 | * @dev: DRM device | ||
263 | * @fb: framebuffer in question | ||
264 | * | ||
265 | * LOCKING: | ||
266 | * Caller must hold mode_config lock. | ||
267 | * | ||
268 | * Find CRTC in the mode_config structure that matches @fb. | ||
269 | * | ||
270 | * RETURNS: | ||
271 | * Pointer to the CRTC or NULL if it wasn't found. | ||
272 | */ | ||
273 | struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, | ||
274 | struct drm_framebuffer *fb) | ||
275 | { | ||
276 | struct drm_crtc *crtc; | ||
277 | |||
278 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
279 | if (crtc->fb == fb) | ||
280 | return crtc; | ||
281 | } | ||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * drm_framebuffer_init - initialize a framebuffer | ||
287 | * @dev: DRM device | ||
288 | * | ||
289 | * LOCKING: | ||
290 | * Caller must hold mode config lock. | ||
291 | * | ||
292 | * Allocates an ID for the framebuffer's parent mode object, sets its mode | ||
293 | * functions & device file and adds it to the master fd list. | ||
294 | * | ||
295 | * RETURNS: | ||
296 | * Zero on success, error code on falure. | ||
297 | */ | ||
298 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, | ||
299 | const struct drm_framebuffer_funcs *funcs) | ||
300 | { | ||
301 | int ret; | ||
302 | |||
303 | ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); | ||
304 | if (ret) { | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | fb->dev = dev; | ||
309 | fb->funcs = funcs; | ||
310 | dev->mode_config.num_fb++; | ||
311 | list_add(&fb->head, &dev->mode_config.fb_list); | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | EXPORT_SYMBOL(drm_framebuffer_init); | ||
316 | |||
317 | /** | ||
318 | * drm_framebuffer_cleanup - remove a framebuffer object | ||
319 | * @fb: framebuffer to remove | ||
320 | * | ||
321 | * LOCKING: | ||
322 | * Caller must hold mode config lock. | ||
323 | * | ||
324 | * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes | ||
325 | * it, setting it to NULL. | ||
326 | */ | ||
327 | void drm_framebuffer_cleanup(struct drm_framebuffer *fb) | ||
328 | { | ||
329 | struct drm_device *dev = fb->dev; | ||
330 | struct drm_crtc *crtc; | ||
331 | |||
332 | /* remove from any CRTC */ | ||
333 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
334 | if (crtc->fb == fb) | ||
335 | crtc->fb = NULL; | ||
336 | } | ||
337 | |||
338 | drm_mode_object_put(dev, &fb->base); | ||
339 | list_del(&fb->head); | ||
340 | dev->mode_config.num_fb--; | ||
341 | } | ||
342 | EXPORT_SYMBOL(drm_framebuffer_cleanup); | ||
343 | |||
344 | /** | ||
345 | * drm_crtc_init - Initialise a new CRTC object | ||
346 | * @dev: DRM device | ||
347 | * @crtc: CRTC object to init | ||
348 | * @funcs: callbacks for the new CRTC | ||
349 | * | ||
350 | * LOCKING: | ||
351 | * Caller must hold mode config lock. | ||
352 | * | ||
353 | * Inits a new object created as base part of an driver crtc object. | ||
354 | */ | ||
355 | void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, | ||
356 | const struct drm_crtc_funcs *funcs) | ||
357 | { | ||
358 | crtc->dev = dev; | ||
359 | crtc->funcs = funcs; | ||
360 | |||
361 | mutex_lock(&dev->mode_config.mutex); | ||
362 | drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); | ||
363 | |||
364 | list_add_tail(&crtc->head, &dev->mode_config.crtc_list); | ||
365 | dev->mode_config.num_crtc++; | ||
366 | mutex_unlock(&dev->mode_config.mutex); | ||
367 | } | ||
368 | EXPORT_SYMBOL(drm_crtc_init); | ||
369 | |||
370 | /** | ||
371 | * drm_crtc_cleanup - Cleans up the core crtc usage. | ||
372 | * @crtc: CRTC to cleanup | ||
373 | * | ||
374 | * LOCKING: | ||
375 | * Caller must hold mode config lock. | ||
376 | * | ||
377 | * Cleanup @crtc. Removes from drm modesetting space | ||
378 | * does NOT free object, caller does that. | ||
379 | */ | ||
380 | void drm_crtc_cleanup(struct drm_crtc *crtc) | ||
381 | { | ||
382 | struct drm_device *dev = crtc->dev; | ||
383 | |||
384 | if (crtc->gamma_store) { | ||
385 | kfree(crtc->gamma_store); | ||
386 | crtc->gamma_store = NULL; | ||
387 | } | ||
388 | |||
389 | drm_mode_object_put(dev, &crtc->base); | ||
390 | list_del(&crtc->head); | ||
391 | dev->mode_config.num_crtc--; | ||
392 | } | ||
393 | EXPORT_SYMBOL(drm_crtc_cleanup); | ||
394 | |||
395 | /** | ||
396 | * drm_mode_probed_add - add a mode to a connector's probed mode list | ||
397 | * @connector: connector the new mode | ||
398 | * @mode: mode data | ||
399 | * | ||
400 | * LOCKING: | ||
401 | * Caller must hold mode config lock. | ||
402 | * | ||
403 | * Add @mode to @connector's mode list for later use. | ||
404 | */ | ||
405 | void drm_mode_probed_add(struct drm_connector *connector, | ||
406 | struct drm_display_mode *mode) | ||
407 | { | ||
408 | list_add(&mode->head, &connector->probed_modes); | ||
409 | } | ||
410 | EXPORT_SYMBOL(drm_mode_probed_add); | ||
411 | |||
412 | /** | ||
413 | * drm_mode_remove - remove and free a mode | ||
414 | * @connector: connector list to modify | ||
415 | * @mode: mode to remove | ||
416 | * | ||
417 | * LOCKING: | ||
418 | * Caller must hold mode config lock. | ||
419 | * | ||
420 | * Remove @mode from @connector's mode list, then free it. | ||
421 | */ | ||
422 | void drm_mode_remove(struct drm_connector *connector, | ||
423 | struct drm_display_mode *mode) | ||
424 | { | ||
425 | list_del(&mode->head); | ||
426 | kfree(mode); | ||
427 | } | ||
428 | EXPORT_SYMBOL(drm_mode_remove); | ||
429 | |||
430 | /** | ||
431 | * drm_connector_init - Init a preallocated connector | ||
432 | * @dev: DRM device | ||
433 | * @connector: the connector to init | ||
434 | * @funcs: callbacks for this connector | ||
435 | * @name: user visible name of the connector | ||
436 | * | ||
437 | * LOCKING: | ||
438 | * Caller must hold @dev's mode_config lock. | ||
439 | * | ||
440 | * Initialises a preallocated connector. Connectors should be | ||
441 | * subclassed as part of driver connector objects. | ||
442 | */ | ||
443 | void drm_connector_init(struct drm_device *dev, | ||
444 | struct drm_connector *connector, | ||
445 | const struct drm_connector_funcs *funcs, | ||
446 | int connector_type) | ||
447 | { | ||
448 | mutex_lock(&dev->mode_config.mutex); | ||
449 | |||
450 | connector->dev = dev; | ||
451 | connector->funcs = funcs; | ||
452 | drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); | ||
453 | connector->connector_type = connector_type; | ||
454 | connector->connector_type_id = | ||
455 | ++drm_connector_enum_list[connector_type].count; /* TODO */ | ||
456 | INIT_LIST_HEAD(&connector->user_modes); | ||
457 | INIT_LIST_HEAD(&connector->probed_modes); | ||
458 | INIT_LIST_HEAD(&connector->modes); | ||
459 | connector->edid_blob_ptr = NULL; | ||
460 | |||
461 | list_add_tail(&connector->head, &dev->mode_config.connector_list); | ||
462 | dev->mode_config.num_connector++; | ||
463 | |||
464 | drm_connector_attach_property(connector, | ||
465 | dev->mode_config.edid_property, 0); | ||
466 | |||
467 | drm_connector_attach_property(connector, | ||
468 | dev->mode_config.dpms_property, 0); | ||
469 | |||
470 | mutex_unlock(&dev->mode_config.mutex); | ||
471 | } | ||
472 | EXPORT_SYMBOL(drm_connector_init); | ||
473 | |||
474 | /** | ||
475 | * drm_connector_cleanup - cleans up an initialised connector | ||
476 | * @connector: connector to cleanup | ||
477 | * | ||
478 | * LOCKING: | ||
479 | * Caller must hold @dev's mode_config lock. | ||
480 | * | ||
481 | * Cleans up the connector but doesn't free the object. | ||
482 | */ | ||
483 | void drm_connector_cleanup(struct drm_connector *connector) | ||
484 | { | ||
485 | struct drm_device *dev = connector->dev; | ||
486 | struct drm_display_mode *mode, *t; | ||
487 | |||
488 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) | ||
489 | drm_mode_remove(connector, mode); | ||
490 | |||
491 | list_for_each_entry_safe(mode, t, &connector->modes, head) | ||
492 | drm_mode_remove(connector, mode); | ||
493 | |||
494 | list_for_each_entry_safe(mode, t, &connector->user_modes, head) | ||
495 | drm_mode_remove(connector, mode); | ||
496 | |||
497 | mutex_lock(&dev->mode_config.mutex); | ||
498 | drm_mode_object_put(dev, &connector->base); | ||
499 | list_del(&connector->head); | ||
500 | mutex_unlock(&dev->mode_config.mutex); | ||
501 | } | ||
502 | EXPORT_SYMBOL(drm_connector_cleanup); | ||
503 | |||
504 | void drm_encoder_init(struct drm_device *dev, | ||
505 | struct drm_encoder *encoder, | ||
506 | const struct drm_encoder_funcs *funcs, | ||
507 | int encoder_type) | ||
508 | { | ||
509 | mutex_lock(&dev->mode_config.mutex); | ||
510 | |||
511 | encoder->dev = dev; | ||
512 | |||
513 | drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); | ||
514 | encoder->encoder_type = encoder_type; | ||
515 | encoder->funcs = funcs; | ||
516 | |||
517 | list_add_tail(&encoder->head, &dev->mode_config.encoder_list); | ||
518 | dev->mode_config.num_encoder++; | ||
519 | |||
520 | mutex_unlock(&dev->mode_config.mutex); | ||
521 | } | ||
522 | EXPORT_SYMBOL(drm_encoder_init); | ||
523 | |||
524 | void drm_encoder_cleanup(struct drm_encoder *encoder) | ||
525 | { | ||
526 | struct drm_device *dev = encoder->dev; | ||
527 | mutex_lock(&dev->mode_config.mutex); | ||
528 | drm_mode_object_put(dev, &encoder->base); | ||
529 | list_del(&encoder->head); | ||
530 | mutex_unlock(&dev->mode_config.mutex); | ||
531 | } | ||
532 | EXPORT_SYMBOL(drm_encoder_cleanup); | ||
533 | |||
534 | /** | ||
535 | * drm_mode_create - create a new display mode | ||
536 | * @dev: DRM device | ||
537 | * | ||
538 | * LOCKING: | ||
539 | * Caller must hold DRM mode_config lock. | ||
540 | * | ||
541 | * Create a new drm_display_mode, give it an ID, and return it. | ||
542 | * | ||
543 | * RETURNS: | ||
544 | * Pointer to new mode on success, NULL on error. | ||
545 | */ | ||
546 | struct drm_display_mode *drm_mode_create(struct drm_device *dev) | ||
547 | { | ||
548 | struct drm_display_mode *nmode; | ||
549 | |||
550 | nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); | ||
551 | if (!nmode) | ||
552 | return NULL; | ||
553 | |||
554 | drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE); | ||
555 | return nmode; | ||
556 | } | ||
557 | EXPORT_SYMBOL(drm_mode_create); | ||
558 | |||
559 | /** | ||
560 | * drm_mode_destroy - remove a mode | ||
561 | * @dev: DRM device | ||
562 | * @mode: mode to remove | ||
563 | * | ||
564 | * LOCKING: | ||
565 | * Caller must hold mode config lock. | ||
566 | * | ||
567 | * Free @mode's unique identifier, then free it. | ||
568 | */ | ||
569 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) | ||
570 | { | ||
571 | drm_mode_object_put(dev, &mode->base); | ||
572 | |||
573 | kfree(mode); | ||
574 | } | ||
575 | EXPORT_SYMBOL(drm_mode_destroy); | ||
576 | |||
577 | static int drm_mode_create_standard_connector_properties(struct drm_device *dev) | ||
578 | { | ||
579 | struct drm_property *edid; | ||
580 | struct drm_property *dpms; | ||
581 | int i; | ||
582 | |||
583 | /* | ||
584 | * Standard properties (apply to all connectors) | ||
585 | */ | ||
586 | edid = drm_property_create(dev, DRM_MODE_PROP_BLOB | | ||
587 | DRM_MODE_PROP_IMMUTABLE, | ||
588 | "EDID", 0); | ||
589 | dev->mode_config.edid_property = edid; | ||
590 | |||
591 | dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
592 | "DPMS", ARRAY_SIZE(drm_dpms_enum_list)); | ||
593 | for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++) | ||
594 | drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type, | ||
595 | drm_dpms_enum_list[i].name); | ||
596 | dev->mode_config.dpms_property = dpms; | ||
597 | |||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties | ||
603 | * @dev: DRM device | ||
604 | * | ||
605 | * Called by a driver the first time a DVI-I connector is made. | ||
606 | */ | ||
607 | int drm_mode_create_dvi_i_properties(struct drm_device *dev) | ||
608 | { | ||
609 | struct drm_property *dvi_i_selector; | ||
610 | struct drm_property *dvi_i_subconnector; | ||
611 | int i; | ||
612 | |||
613 | if (dev->mode_config.dvi_i_select_subconnector_property) | ||
614 | return 0; | ||
615 | |||
616 | dvi_i_selector = | ||
617 | drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
618 | "select subconnector", | ||
619 | ARRAY_SIZE(drm_dvi_i_select_enum_list)); | ||
620 | for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++) | ||
621 | drm_property_add_enum(dvi_i_selector, i, | ||
622 | drm_dvi_i_select_enum_list[i].type, | ||
623 | drm_dvi_i_select_enum_list[i].name); | ||
624 | dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; | ||
625 | |||
626 | dvi_i_subconnector = | ||
627 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
628 | DRM_MODE_PROP_IMMUTABLE, | ||
629 | "subconnector", | ||
630 | ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); | ||
631 | for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++) | ||
632 | drm_property_add_enum(dvi_i_subconnector, i, | ||
633 | drm_dvi_i_subconnector_enum_list[i].type, | ||
634 | drm_dvi_i_subconnector_enum_list[i].name); | ||
635 | dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); | ||
640 | |||
641 | /** | ||
642 | * drm_create_tv_properties - create TV specific connector properties | ||
643 | * @dev: DRM device | ||
644 | * @num_modes: number of different TV formats (modes) supported | ||
645 | * @modes: array of pointers to strings containing name of each format | ||
646 | * | ||
647 | * Called by a driver's TV initialization routine, this function creates | ||
648 | * the TV specific connector properties for a given device. Caller is | ||
649 | * responsible for allocating a list of format names and passing them to | ||
650 | * this routine. | ||
651 | */ | ||
652 | int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, | ||
653 | char *modes[]) | ||
654 | { | ||
655 | struct drm_property *tv_selector; | ||
656 | struct drm_property *tv_subconnector; | ||
657 | int i; | ||
658 | |||
659 | if (dev->mode_config.tv_select_subconnector_property) | ||
660 | return 0; | ||
661 | |||
662 | /* | ||
663 | * Basic connector properties | ||
664 | */ | ||
665 | tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
666 | "select subconnector", | ||
667 | ARRAY_SIZE(drm_tv_select_enum_list)); | ||
668 | for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++) | ||
669 | drm_property_add_enum(tv_selector, i, | ||
670 | drm_tv_select_enum_list[i].type, | ||
671 | drm_tv_select_enum_list[i].name); | ||
672 | dev->mode_config.tv_select_subconnector_property = tv_selector; | ||
673 | |||
674 | tv_subconnector = | ||
675 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
676 | DRM_MODE_PROP_IMMUTABLE, "subconnector", | ||
677 | ARRAY_SIZE(drm_tv_subconnector_enum_list)); | ||
678 | for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++) | ||
679 | drm_property_add_enum(tv_subconnector, i, | ||
680 | drm_tv_subconnector_enum_list[i].type, | ||
681 | drm_tv_subconnector_enum_list[i].name); | ||
682 | dev->mode_config.tv_subconnector_property = tv_subconnector; | ||
683 | |||
684 | /* | ||
685 | * Other, TV specific properties: margins & TV modes. | ||
686 | */ | ||
687 | dev->mode_config.tv_left_margin_property = | ||
688 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
689 | "left margin", 2); | ||
690 | dev->mode_config.tv_left_margin_property->values[0] = 0; | ||
691 | dev->mode_config.tv_left_margin_property->values[1] = 100; | ||
692 | |||
693 | dev->mode_config.tv_right_margin_property = | ||
694 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
695 | "right margin", 2); | ||
696 | dev->mode_config.tv_right_margin_property->values[0] = 0; | ||
697 | dev->mode_config.tv_right_margin_property->values[1] = 100; | ||
698 | |||
699 | dev->mode_config.tv_top_margin_property = | ||
700 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
701 | "top margin", 2); | ||
702 | dev->mode_config.tv_top_margin_property->values[0] = 0; | ||
703 | dev->mode_config.tv_top_margin_property->values[1] = 100; | ||
704 | |||
705 | dev->mode_config.tv_bottom_margin_property = | ||
706 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
707 | "bottom margin", 2); | ||
708 | dev->mode_config.tv_bottom_margin_property->values[0] = 0; | ||
709 | dev->mode_config.tv_bottom_margin_property->values[1] = 100; | ||
710 | |||
711 | dev->mode_config.tv_mode_property = | ||
712 | drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
713 | "mode", num_modes); | ||
714 | for (i = 0; i < num_modes; i++) | ||
715 | drm_property_add_enum(dev->mode_config.tv_mode_property, i, | ||
716 | i, modes[i]); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | EXPORT_SYMBOL(drm_mode_create_tv_properties); | ||
721 | |||
722 | /** | ||
723 | * drm_mode_create_scaling_mode_property - create scaling mode property | ||
724 | * @dev: DRM device | ||
725 | * | ||
726 | * Called by a driver the first time it's needed, must be attached to desired | ||
727 | * connectors. | ||
728 | */ | ||
729 | int drm_mode_create_scaling_mode_property(struct drm_device *dev) | ||
730 | { | ||
731 | struct drm_property *scaling_mode; | ||
732 | int i; | ||
733 | |||
734 | if (dev->mode_config.scaling_mode_property) | ||
735 | return 0; | ||
736 | |||
737 | scaling_mode = | ||
738 | drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode", | ||
739 | ARRAY_SIZE(drm_scaling_mode_enum_list)); | ||
740 | for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) | ||
741 | drm_property_add_enum(scaling_mode, i, | ||
742 | drm_scaling_mode_enum_list[i].type, | ||
743 | drm_scaling_mode_enum_list[i].name); | ||
744 | |||
745 | dev->mode_config.scaling_mode_property = scaling_mode; | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); | ||
750 | |||
751 | /** | ||
752 | * drm_mode_create_dithering_property - create dithering property | ||
753 | * @dev: DRM device | ||
754 | * | ||
755 | * Called by a driver the first time it's needed, must be attached to desired | ||
756 | * connectors. | ||
757 | */ | ||
758 | int drm_mode_create_dithering_property(struct drm_device *dev) | ||
759 | { | ||
760 | struct drm_property *dithering_mode; | ||
761 | int i; | ||
762 | |||
763 | if (dev->mode_config.dithering_mode_property) | ||
764 | return 0; | ||
765 | |||
766 | dithering_mode = | ||
767 | drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering", | ||
768 | ARRAY_SIZE(drm_dithering_mode_enum_list)); | ||
769 | for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++) | ||
770 | drm_property_add_enum(dithering_mode, i, | ||
771 | drm_dithering_mode_enum_list[i].type, | ||
772 | drm_dithering_mode_enum_list[i].name); | ||
773 | dev->mode_config.dithering_mode_property = dithering_mode; | ||
774 | |||
775 | return 0; | ||
776 | } | ||
777 | EXPORT_SYMBOL(drm_mode_create_dithering_property); | ||
778 | |||
779 | /** | ||
780 | * drm_mode_config_init - initialize DRM mode_configuration structure | ||
781 | * @dev: DRM device | ||
782 | * | ||
783 | * LOCKING: | ||
784 | * None, should happen single threaded at init time. | ||
785 | * | ||
786 | * Initialize @dev's mode_config structure, used for tracking the graphics | ||
787 | * configuration of @dev. | ||
788 | */ | ||
789 | void drm_mode_config_init(struct drm_device *dev) | ||
790 | { | ||
791 | mutex_init(&dev->mode_config.mutex); | ||
792 | mutex_init(&dev->mode_config.idr_mutex); | ||
793 | INIT_LIST_HEAD(&dev->mode_config.fb_list); | ||
794 | INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); | ||
795 | INIT_LIST_HEAD(&dev->mode_config.crtc_list); | ||
796 | INIT_LIST_HEAD(&dev->mode_config.connector_list); | ||
797 | INIT_LIST_HEAD(&dev->mode_config.encoder_list); | ||
798 | INIT_LIST_HEAD(&dev->mode_config.property_list); | ||
799 | INIT_LIST_HEAD(&dev->mode_config.property_blob_list); | ||
800 | idr_init(&dev->mode_config.crtc_idr); | ||
801 | |||
802 | mutex_lock(&dev->mode_config.mutex); | ||
803 | drm_mode_create_standard_connector_properties(dev); | ||
804 | mutex_unlock(&dev->mode_config.mutex); | ||
805 | |||
806 | /* Just to be sure */ | ||
807 | dev->mode_config.num_fb = 0; | ||
808 | dev->mode_config.num_connector = 0; | ||
809 | dev->mode_config.num_crtc = 0; | ||
810 | dev->mode_config.num_encoder = 0; | ||
811 | } | ||
812 | EXPORT_SYMBOL(drm_mode_config_init); | ||
813 | |||
814 | int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) | ||
815 | { | ||
816 | uint32_t total_objects = 0; | ||
817 | |||
818 | total_objects += dev->mode_config.num_crtc; | ||
819 | total_objects += dev->mode_config.num_connector; | ||
820 | total_objects += dev->mode_config.num_encoder; | ||
821 | |||
822 | if (total_objects == 0) | ||
823 | return -EINVAL; | ||
824 | |||
825 | group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); | ||
826 | if (!group->id_list) | ||
827 | return -ENOMEM; | ||
828 | |||
829 | group->num_crtcs = 0; | ||
830 | group->num_connectors = 0; | ||
831 | group->num_encoders = 0; | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | int drm_mode_group_init_legacy_group(struct drm_device *dev, | ||
836 | struct drm_mode_group *group) | ||
837 | { | ||
838 | struct drm_crtc *crtc; | ||
839 | struct drm_encoder *encoder; | ||
840 | struct drm_connector *connector; | ||
841 | int ret; | ||
842 | |||
843 | if ((ret = drm_mode_group_init(dev, group))) | ||
844 | return ret; | ||
845 | |||
846 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
847 | group->id_list[group->num_crtcs++] = crtc->base.id; | ||
848 | |||
849 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | ||
850 | group->id_list[group->num_crtcs + group->num_encoders++] = | ||
851 | encoder->base.id; | ||
852 | |||
853 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
854 | group->id_list[group->num_crtcs + group->num_encoders + | ||
855 | group->num_connectors++] = connector->base.id; | ||
856 | |||
857 | return 0; | ||
858 | } | ||
859 | |||
860 | /** | ||
861 | * drm_mode_config_cleanup - free up DRM mode_config info | ||
862 | * @dev: DRM device | ||
863 | * | ||
864 | * LOCKING: | ||
865 | * Caller must hold mode config lock. | ||
866 | * | ||
867 | * Free up all the connectors and CRTCs associated with this DRM device, then | ||
868 | * free up the framebuffers and associated buffer objects. | ||
869 | * | ||
870 | * FIXME: cleanup any dangling user buffer objects too | ||
871 | */ | ||
872 | void drm_mode_config_cleanup(struct drm_device *dev) | ||
873 | { | ||
874 | struct drm_connector *connector, *ot; | ||
875 | struct drm_crtc *crtc, *ct; | ||
876 | struct drm_encoder *encoder, *enct; | ||
877 | struct drm_framebuffer *fb, *fbt; | ||
878 | struct drm_property *property, *pt; | ||
879 | |||
880 | list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, | ||
881 | head) { | ||
882 | encoder->funcs->destroy(encoder); | ||
883 | } | ||
884 | |||
885 | list_for_each_entry_safe(connector, ot, | ||
886 | &dev->mode_config.connector_list, head) { | ||
887 | connector->funcs->destroy(connector); | ||
888 | } | ||
889 | |||
890 | list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, | ||
891 | head) { | ||
892 | drm_property_destroy(dev, property); | ||
893 | } | ||
894 | |||
895 | list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { | ||
896 | fb->funcs->destroy(fb); | ||
897 | } | ||
898 | |||
899 | list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { | ||
900 | crtc->funcs->destroy(crtc); | ||
901 | } | ||
902 | |||
903 | } | ||
904 | EXPORT_SYMBOL(drm_mode_config_cleanup); | ||
905 | |||
906 | /** | ||
907 | * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo | ||
908 | * @out: drm_mode_modeinfo struct to return to the user | ||
909 | * @in: drm_display_mode to use | ||
910 | * | ||
911 | * LOCKING: | ||
912 | * None. | ||
913 | * | ||
914 | * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to | ||
915 | * the user. | ||
916 | */ | ||
917 | void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, | ||
918 | struct drm_display_mode *in) | ||
919 | { | ||
920 | out->clock = in->clock; | ||
921 | out->hdisplay = in->hdisplay; | ||
922 | out->hsync_start = in->hsync_start; | ||
923 | out->hsync_end = in->hsync_end; | ||
924 | out->htotal = in->htotal; | ||
925 | out->hskew = in->hskew; | ||
926 | out->vdisplay = in->vdisplay; | ||
927 | out->vsync_start = in->vsync_start; | ||
928 | out->vsync_end = in->vsync_end; | ||
929 | out->vtotal = in->vtotal; | ||
930 | out->vscan = in->vscan; | ||
931 | out->vrefresh = in->vrefresh; | ||
932 | out->flags = in->flags; | ||
933 | out->type = in->type; | ||
934 | strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); | ||
935 | out->name[DRM_DISPLAY_MODE_LEN-1] = 0; | ||
936 | } | ||
937 | |||
938 | /** | ||
939 | * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode | ||
940 | * @out: drm_display_mode to return to the user | ||
941 | * @in: drm_mode_modeinfo to use | ||
942 | * | ||
943 | * LOCKING: | ||
944 | * None. | ||
945 | * | ||
946 | * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to | ||
947 | * the caller. | ||
948 | */ | ||
949 | void drm_crtc_convert_umode(struct drm_display_mode *out, | ||
950 | struct drm_mode_modeinfo *in) | ||
951 | { | ||
952 | out->clock = in->clock; | ||
953 | out->hdisplay = in->hdisplay; | ||
954 | out->hsync_start = in->hsync_start; | ||
955 | out->hsync_end = in->hsync_end; | ||
956 | out->htotal = in->htotal; | ||
957 | out->hskew = in->hskew; | ||
958 | out->vdisplay = in->vdisplay; | ||
959 | out->vsync_start = in->vsync_start; | ||
960 | out->vsync_end = in->vsync_end; | ||
961 | out->vtotal = in->vtotal; | ||
962 | out->vscan = in->vscan; | ||
963 | out->vrefresh = in->vrefresh; | ||
964 | out->flags = in->flags; | ||
965 | out->type = in->type; | ||
966 | strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); | ||
967 | out->name[DRM_DISPLAY_MODE_LEN-1] = 0; | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * drm_mode_getresources - get graphics configuration | ||
972 | * @inode: inode from the ioctl | ||
973 | * @filp: file * from the ioctl | ||
974 | * @cmd: cmd from ioctl | ||
975 | * @arg: arg from ioctl | ||
976 | * | ||
977 | * LOCKING: | ||
978 | * Takes mode config lock. | ||
979 | * | ||
980 | * Construct a set of configuration description structures and return | ||
981 | * them to the user, including CRTC, connector and framebuffer configuration. | ||
982 | * | ||
983 | * Called by the user via ioctl. | ||
984 | * | ||
985 | * RETURNS: | ||
986 | * Zero on success, errno on failure. | ||
987 | */ | ||
988 | int drm_mode_getresources(struct drm_device *dev, void *data, | ||
989 | struct drm_file *file_priv) | ||
990 | { | ||
991 | struct drm_mode_card_res *card_res = data; | ||
992 | struct list_head *lh; | ||
993 | struct drm_framebuffer *fb; | ||
994 | struct drm_connector *connector; | ||
995 | struct drm_crtc *crtc; | ||
996 | struct drm_encoder *encoder; | ||
997 | int ret = 0; | ||
998 | int connector_count = 0; | ||
999 | int crtc_count = 0; | ||
1000 | int fb_count = 0; | ||
1001 | int encoder_count = 0; | ||
1002 | int copied = 0, i; | ||
1003 | uint32_t __user *fb_id; | ||
1004 | uint32_t __user *crtc_id; | ||
1005 | uint32_t __user *connector_id; | ||
1006 | uint32_t __user *encoder_id; | ||
1007 | struct drm_mode_group *mode_group; | ||
1008 | |||
1009 | mutex_lock(&dev->mode_config.mutex); | ||
1010 | |||
1011 | /* | ||
1012 | * For the non-control nodes we need to limit the list of resources | ||
1013 | * by IDs in the group list for this node | ||
1014 | */ | ||
1015 | list_for_each(lh, &file_priv->fbs) | ||
1016 | fb_count++; | ||
1017 | |||
1018 | mode_group = &file_priv->master->minor->mode_group; | ||
1019 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | ||
1020 | |||
1021 | list_for_each(lh, &dev->mode_config.crtc_list) | ||
1022 | crtc_count++; | ||
1023 | |||
1024 | list_for_each(lh, &dev->mode_config.connector_list) | ||
1025 | connector_count++; | ||
1026 | |||
1027 | list_for_each(lh, &dev->mode_config.encoder_list) | ||
1028 | encoder_count++; | ||
1029 | } else { | ||
1030 | |||
1031 | crtc_count = mode_group->num_crtcs; | ||
1032 | connector_count = mode_group->num_connectors; | ||
1033 | encoder_count = mode_group->num_encoders; | ||
1034 | } | ||
1035 | |||
1036 | card_res->max_height = dev->mode_config.max_height; | ||
1037 | card_res->min_height = dev->mode_config.min_height; | ||
1038 | card_res->max_width = dev->mode_config.max_width; | ||
1039 | card_res->min_width = dev->mode_config.min_width; | ||
1040 | |||
1041 | /* handle this in 4 parts */ | ||
1042 | /* FBs */ | ||
1043 | if (card_res->count_fbs >= fb_count) { | ||
1044 | copied = 0; | ||
1045 | fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; | ||
1046 | list_for_each_entry(fb, &file_priv->fbs, head) { | ||
1047 | if (put_user(fb->base.id, fb_id + copied)) { | ||
1048 | ret = -EFAULT; | ||
1049 | goto out; | ||
1050 | } | ||
1051 | copied++; | ||
1052 | } | ||
1053 | } | ||
1054 | card_res->count_fbs = fb_count; | ||
1055 | |||
1056 | /* CRTCs */ | ||
1057 | if (card_res->count_crtcs >= crtc_count) { | ||
1058 | copied = 0; | ||
1059 | crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; | ||
1060 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | ||
1061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | ||
1062 | head) { | ||
1063 | DRM_DEBUG("CRTC ID is %d\n", crtc->base.id); | ||
1064 | if (put_user(crtc->base.id, crtc_id + copied)) { | ||
1065 | ret = -EFAULT; | ||
1066 | goto out; | ||
1067 | } | ||
1068 | copied++; | ||
1069 | } | ||
1070 | } else { | ||
1071 | for (i = 0; i < mode_group->num_crtcs; i++) { | ||
1072 | if (put_user(mode_group->id_list[i], | ||
1073 | crtc_id + copied)) { | ||
1074 | ret = -EFAULT; | ||
1075 | goto out; | ||
1076 | } | ||
1077 | copied++; | ||
1078 | } | ||
1079 | } | ||
1080 | } | ||
1081 | card_res->count_crtcs = crtc_count; | ||
1082 | |||
1083 | /* Encoders */ | ||
1084 | if (card_res->count_encoders >= encoder_count) { | ||
1085 | copied = 0; | ||
1086 | encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; | ||
1087 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | ||
1088 | list_for_each_entry(encoder, | ||
1089 | &dev->mode_config.encoder_list, | ||
1090 | head) { | ||
1091 | DRM_DEBUG("ENCODER ID is %d\n", | ||
1092 | encoder->base.id); | ||
1093 | if (put_user(encoder->base.id, encoder_id + | ||
1094 | copied)) { | ||
1095 | ret = -EFAULT; | ||
1096 | goto out; | ||
1097 | } | ||
1098 | copied++; | ||
1099 | } | ||
1100 | } else { | ||
1101 | for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { | ||
1102 | if (put_user(mode_group->id_list[i], | ||
1103 | encoder_id + copied)) { | ||
1104 | ret = -EFAULT; | ||
1105 | goto out; | ||
1106 | } | ||
1107 | copied++; | ||
1108 | } | ||
1109 | |||
1110 | } | ||
1111 | } | ||
1112 | card_res->count_encoders = encoder_count; | ||
1113 | |||
1114 | /* Connectors */ | ||
1115 | if (card_res->count_connectors >= connector_count) { | ||
1116 | copied = 0; | ||
1117 | connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; | ||
1118 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | ||
1119 | list_for_each_entry(connector, | ||
1120 | &dev->mode_config.connector_list, | ||
1121 | head) { | ||
1122 | DRM_DEBUG("CONNECTOR ID is %d\n", | ||
1123 | connector->base.id); | ||
1124 | if (put_user(connector->base.id, | ||
1125 | connector_id + copied)) { | ||
1126 | ret = -EFAULT; | ||
1127 | goto out; | ||
1128 | } | ||
1129 | copied++; | ||
1130 | } | ||
1131 | } else { | ||
1132 | int start = mode_group->num_crtcs + | ||
1133 | mode_group->num_encoders; | ||
1134 | for (i = start; i < start + mode_group->num_connectors; i++) { | ||
1135 | if (put_user(mode_group->id_list[i], | ||
1136 | connector_id + copied)) { | ||
1137 | ret = -EFAULT; | ||
1138 | goto out; | ||
1139 | } | ||
1140 | copied++; | ||
1141 | } | ||
1142 | } | ||
1143 | } | ||
1144 | card_res->count_connectors = connector_count; | ||
1145 | |||
1146 | DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs, | ||
1147 | card_res->count_connectors, card_res->count_encoders); | ||
1148 | |||
1149 | out: | ||
1150 | mutex_unlock(&dev->mode_config.mutex); | ||
1151 | return ret; | ||
1152 | } | ||
1153 | |||
1154 | /** | ||
1155 | * drm_mode_getcrtc - get CRTC configuration | ||
1156 | * @inode: inode from the ioctl | ||
1157 | * @filp: file * from the ioctl | ||
1158 | * @cmd: cmd from ioctl | ||
1159 | * @arg: arg from ioctl | ||
1160 | * | ||
1161 | * LOCKING: | ||
1162 | * Caller? (FIXME) | ||
1163 | * | ||
1164 | * Construct a CRTC configuration structure to return to the user. | ||
1165 | * | ||
1166 | * Called by the user via ioctl. | ||
1167 | * | ||
1168 | * RETURNS: | ||
1169 | * Zero on success, errno on failure. | ||
1170 | */ | ||
1171 | int drm_mode_getcrtc(struct drm_device *dev, | ||
1172 | void *data, struct drm_file *file_priv) | ||
1173 | { | ||
1174 | struct drm_mode_crtc *crtc_resp = data; | ||
1175 | struct drm_crtc *crtc; | ||
1176 | struct drm_mode_object *obj; | ||
1177 | int ret = 0; | ||
1178 | |||
1179 | mutex_lock(&dev->mode_config.mutex); | ||
1180 | |||
1181 | obj = drm_mode_object_find(dev, crtc_resp->crtc_id, | ||
1182 | DRM_MODE_OBJECT_CRTC); | ||
1183 | if (!obj) { | ||
1184 | ret = -EINVAL; | ||
1185 | goto out; | ||
1186 | } | ||
1187 | crtc = obj_to_crtc(obj); | ||
1188 | |||
1189 | crtc_resp->x = crtc->x; | ||
1190 | crtc_resp->y = crtc->y; | ||
1191 | crtc_resp->gamma_size = crtc->gamma_size; | ||
1192 | if (crtc->fb) | ||
1193 | crtc_resp->fb_id = crtc->fb->base.id; | ||
1194 | else | ||
1195 | crtc_resp->fb_id = 0; | ||
1196 | |||
1197 | if (crtc->enabled) { | ||
1198 | |||
1199 | drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); | ||
1200 | crtc_resp->mode_valid = 1; | ||
1201 | |||
1202 | } else { | ||
1203 | crtc_resp->mode_valid = 0; | ||
1204 | } | ||
1205 | |||
1206 | out: | ||
1207 | mutex_unlock(&dev->mode_config.mutex); | ||
1208 | return ret; | ||
1209 | } | ||
1210 | |||
1211 | /** | ||
1212 | * drm_mode_getconnector - get connector configuration | ||
1213 | * @inode: inode from the ioctl | ||
1214 | * @filp: file * from the ioctl | ||
1215 | * @cmd: cmd from ioctl | ||
1216 | * @arg: arg from ioctl | ||
1217 | * | ||
1218 | * LOCKING: | ||
1219 | * Caller? (FIXME) | ||
1220 | * | ||
1221 | * Construct a connector configuration structure to return to the user. | ||
1222 | * | ||
1223 | * Called by the user via ioctl. | ||
1224 | * | ||
1225 | * RETURNS: | ||
1226 | * Zero on success, errno on failure. | ||
1227 | */ | ||
1228 | int drm_mode_getconnector(struct drm_device *dev, void *data, | ||
1229 | struct drm_file *file_priv) | ||
1230 | { | ||
1231 | struct drm_mode_get_connector *out_resp = data; | ||
1232 | struct drm_mode_object *obj; | ||
1233 | struct drm_connector *connector; | ||
1234 | struct drm_display_mode *mode; | ||
1235 | int mode_count = 0; | ||
1236 | int props_count = 0; | ||
1237 | int encoders_count = 0; | ||
1238 | int ret = 0; | ||
1239 | int copied = 0; | ||
1240 | int i; | ||
1241 | struct drm_mode_modeinfo u_mode; | ||
1242 | struct drm_mode_modeinfo __user *mode_ptr; | ||
1243 | uint32_t __user *prop_ptr; | ||
1244 | uint64_t __user *prop_values; | ||
1245 | uint32_t __user *encoder_ptr; | ||
1246 | |||
1247 | memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); | ||
1248 | |||
1249 | DRM_DEBUG("connector id %d:\n", out_resp->connector_id); | ||
1250 | |||
1251 | mutex_lock(&dev->mode_config.mutex); | ||
1252 | |||
1253 | obj = drm_mode_object_find(dev, out_resp->connector_id, | ||
1254 | DRM_MODE_OBJECT_CONNECTOR); | ||
1255 | if (!obj) { | ||
1256 | ret = -EINVAL; | ||
1257 | goto out; | ||
1258 | } | ||
1259 | connector = obj_to_connector(obj); | ||
1260 | |||
1261 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
1262 | if (connector->property_ids[i] != 0) { | ||
1263 | props_count++; | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1268 | if (connector->encoder_ids[i] != 0) { | ||
1269 | encoders_count++; | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1273 | if (out_resp->count_modes == 0) { | ||
1274 | connector->funcs->fill_modes(connector, | ||
1275 | dev->mode_config.max_width, | ||
1276 | dev->mode_config.max_height); | ||
1277 | } | ||
1278 | |||
1279 | /* delayed so we get modes regardless of pre-fill_modes state */ | ||
1280 | list_for_each_entry(mode, &connector->modes, head) | ||
1281 | mode_count++; | ||
1282 | |||
1283 | out_resp->connector_id = connector->base.id; | ||
1284 | out_resp->connector_type = connector->connector_type; | ||
1285 | out_resp->connector_type_id = connector->connector_type_id; | ||
1286 | out_resp->mm_width = connector->display_info.width_mm; | ||
1287 | out_resp->mm_height = connector->display_info.height_mm; | ||
1288 | out_resp->subpixel = connector->display_info.subpixel_order; | ||
1289 | out_resp->connection = connector->status; | ||
1290 | if (connector->encoder) | ||
1291 | out_resp->encoder_id = connector->encoder->base.id; | ||
1292 | else | ||
1293 | out_resp->encoder_id = 0; | ||
1294 | |||
1295 | /* | ||
1296 | * This ioctl is called twice, once to determine how much space is | ||
1297 | * needed, and the 2nd time to fill it. | ||
1298 | */ | ||
1299 | if ((out_resp->count_modes >= mode_count) && mode_count) { | ||
1300 | copied = 0; | ||
1301 | mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; | ||
1302 | list_for_each_entry(mode, &connector->modes, head) { | ||
1303 | drm_crtc_convert_to_umode(&u_mode, mode); | ||
1304 | if (copy_to_user(mode_ptr + copied, | ||
1305 | &u_mode, sizeof(u_mode))) { | ||
1306 | ret = -EFAULT; | ||
1307 | goto out; | ||
1308 | } | ||
1309 | copied++; | ||
1310 | } | ||
1311 | } | ||
1312 | out_resp->count_modes = mode_count; | ||
1313 | |||
1314 | if ((out_resp->count_props >= props_count) && props_count) { | ||
1315 | copied = 0; | ||
1316 | prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); | ||
1317 | prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); | ||
1318 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
1319 | if (connector->property_ids[i] != 0) { | ||
1320 | if (put_user(connector->property_ids[i], | ||
1321 | prop_ptr + copied)) { | ||
1322 | ret = -EFAULT; | ||
1323 | goto out; | ||
1324 | } | ||
1325 | |||
1326 | if (put_user(connector->property_values[i], | ||
1327 | prop_values + copied)) { | ||
1328 | ret = -EFAULT; | ||
1329 | goto out; | ||
1330 | } | ||
1331 | copied++; | ||
1332 | } | ||
1333 | } | ||
1334 | } | ||
1335 | out_resp->count_props = props_count; | ||
1336 | |||
1337 | if ((out_resp->count_encoders >= encoders_count) && encoders_count) { | ||
1338 | copied = 0; | ||
1339 | encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); | ||
1340 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1341 | if (connector->encoder_ids[i] != 0) { | ||
1342 | if (put_user(connector->encoder_ids[i], | ||
1343 | encoder_ptr + copied)) { | ||
1344 | ret = -EFAULT; | ||
1345 | goto out; | ||
1346 | } | ||
1347 | copied++; | ||
1348 | } | ||
1349 | } | ||
1350 | } | ||
1351 | out_resp->count_encoders = encoders_count; | ||
1352 | |||
1353 | out: | ||
1354 | mutex_unlock(&dev->mode_config.mutex); | ||
1355 | return ret; | ||
1356 | } | ||
1357 | |||
1358 | int drm_mode_getencoder(struct drm_device *dev, void *data, | ||
1359 | struct drm_file *file_priv) | ||
1360 | { | ||
1361 | struct drm_mode_get_encoder *enc_resp = data; | ||
1362 | struct drm_mode_object *obj; | ||
1363 | struct drm_encoder *encoder; | ||
1364 | int ret = 0; | ||
1365 | |||
1366 | mutex_lock(&dev->mode_config.mutex); | ||
1367 | obj = drm_mode_object_find(dev, enc_resp->encoder_id, | ||
1368 | DRM_MODE_OBJECT_ENCODER); | ||
1369 | if (!obj) { | ||
1370 | ret = -EINVAL; | ||
1371 | goto out; | ||
1372 | } | ||
1373 | encoder = obj_to_encoder(obj); | ||
1374 | |||
1375 | if (encoder->crtc) | ||
1376 | enc_resp->crtc_id = encoder->crtc->base.id; | ||
1377 | else | ||
1378 | enc_resp->crtc_id = 0; | ||
1379 | enc_resp->encoder_type = encoder->encoder_type; | ||
1380 | enc_resp->encoder_id = encoder->base.id; | ||
1381 | enc_resp->possible_crtcs = encoder->possible_crtcs; | ||
1382 | enc_resp->possible_clones = encoder->possible_clones; | ||
1383 | |||
1384 | out: | ||
1385 | mutex_unlock(&dev->mode_config.mutex); | ||
1386 | return ret; | ||
1387 | } | ||
1388 | |||
1389 | /** | ||
1390 | * drm_mode_setcrtc - set CRTC configuration | ||
1391 | * @inode: inode from the ioctl | ||
1392 | * @filp: file * from the ioctl | ||
1393 | * @cmd: cmd from ioctl | ||
1394 | * @arg: arg from ioctl | ||
1395 | * | ||
1396 | * LOCKING: | ||
1397 | * Caller? (FIXME) | ||
1398 | * | ||
1399 | * Build a new CRTC configuration based on user request. | ||
1400 | * | ||
1401 | * Called by the user via ioctl. | ||
1402 | * | ||
1403 | * RETURNS: | ||
1404 | * Zero on success, errno on failure. | ||
1405 | */ | ||
1406 | int drm_mode_setcrtc(struct drm_device *dev, void *data, | ||
1407 | struct drm_file *file_priv) | ||
1408 | { | ||
1409 | struct drm_mode_config *config = &dev->mode_config; | ||
1410 | struct drm_mode_crtc *crtc_req = data; | ||
1411 | struct drm_mode_object *obj; | ||
1412 | struct drm_crtc *crtc, *crtcfb; | ||
1413 | struct drm_connector **connector_set = NULL, *connector; | ||
1414 | struct drm_framebuffer *fb = NULL; | ||
1415 | struct drm_display_mode *mode = NULL; | ||
1416 | struct drm_mode_set set; | ||
1417 | uint32_t __user *set_connectors_ptr; | ||
1418 | int ret = 0; | ||
1419 | int i; | ||
1420 | |||
1421 | mutex_lock(&dev->mode_config.mutex); | ||
1422 | obj = drm_mode_object_find(dev, crtc_req->crtc_id, | ||
1423 | DRM_MODE_OBJECT_CRTC); | ||
1424 | if (!obj) { | ||
1425 | DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id); | ||
1426 | ret = -EINVAL; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | crtc = obj_to_crtc(obj); | ||
1430 | |||
1431 | if (crtc_req->mode_valid) { | ||
1432 | /* If we have a mode we need a framebuffer. */ | ||
1433 | /* If we pass -1, set the mode with the currently bound fb */ | ||
1434 | if (crtc_req->fb_id == -1) { | ||
1435 | list_for_each_entry(crtcfb, | ||
1436 | &dev->mode_config.crtc_list, head) { | ||
1437 | if (crtcfb == crtc) { | ||
1438 | DRM_DEBUG("Using current fb for setmode\n"); | ||
1439 | fb = crtc->fb; | ||
1440 | } | ||
1441 | } | ||
1442 | } else { | ||
1443 | obj = drm_mode_object_find(dev, crtc_req->fb_id, | ||
1444 | DRM_MODE_OBJECT_FB); | ||
1445 | if (!obj) { | ||
1446 | DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id); | ||
1447 | ret = -EINVAL; | ||
1448 | goto out; | ||
1449 | } | ||
1450 | fb = obj_to_fb(obj); | ||
1451 | } | ||
1452 | |||
1453 | mode = drm_mode_create(dev); | ||
1454 | drm_crtc_convert_umode(mode, &crtc_req->mode); | ||
1455 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
1456 | } | ||
1457 | |||
1458 | if (crtc_req->count_connectors == 0 && mode) { | ||
1459 | DRM_DEBUG("Count connectors is 0 but mode set\n"); | ||
1460 | ret = -EINVAL; | ||
1461 | goto out; | ||
1462 | } | ||
1463 | |||
1464 | if (crtc_req->count_connectors > 0 && !mode && !fb) { | ||
1465 | DRM_DEBUG("Count connectors is %d but no mode or fb set\n", | ||
1466 | crtc_req->count_connectors); | ||
1467 | ret = -EINVAL; | ||
1468 | goto out; | ||
1469 | } | ||
1470 | |||
1471 | if (crtc_req->count_connectors > 0) { | ||
1472 | u32 out_id; | ||
1473 | |||
1474 | /* Avoid unbounded kernel memory allocation */ | ||
1475 | if (crtc_req->count_connectors > config->num_connector) { | ||
1476 | ret = -EINVAL; | ||
1477 | goto out; | ||
1478 | } | ||
1479 | |||
1480 | connector_set = kmalloc(crtc_req->count_connectors * | ||
1481 | sizeof(struct drm_connector *), | ||
1482 | GFP_KERNEL); | ||
1483 | if (!connector_set) { | ||
1484 | ret = -ENOMEM; | ||
1485 | goto out; | ||
1486 | } | ||
1487 | |||
1488 | for (i = 0; i < crtc_req->count_connectors; i++) { | ||
1489 | set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; | ||
1490 | if (get_user(out_id, &set_connectors_ptr[i])) { | ||
1491 | ret = -EFAULT; | ||
1492 | goto out; | ||
1493 | } | ||
1494 | |||
1495 | obj = drm_mode_object_find(dev, out_id, | ||
1496 | DRM_MODE_OBJECT_CONNECTOR); | ||
1497 | if (!obj) { | ||
1498 | DRM_DEBUG("Connector id %d unknown\n", out_id); | ||
1499 | ret = -EINVAL; | ||
1500 | goto out; | ||
1501 | } | ||
1502 | connector = obj_to_connector(obj); | ||
1503 | |||
1504 | connector_set[i] = connector; | ||
1505 | } | ||
1506 | } | ||
1507 | |||
1508 | set.crtc = crtc; | ||
1509 | set.x = crtc_req->x; | ||
1510 | set.y = crtc_req->y; | ||
1511 | set.mode = mode; | ||
1512 | set.connectors = connector_set; | ||
1513 | set.num_connectors = crtc_req->count_connectors; | ||
1514 | set.fb =fb; | ||
1515 | ret = crtc->funcs->set_config(&set); | ||
1516 | |||
1517 | out: | ||
1518 | kfree(connector_set); | ||
1519 | mutex_unlock(&dev->mode_config.mutex); | ||
1520 | return ret; | ||
1521 | } | ||
1522 | |||
1523 | int drm_mode_cursor_ioctl(struct drm_device *dev, | ||
1524 | void *data, struct drm_file *file_priv) | ||
1525 | { | ||
1526 | struct drm_mode_cursor *req = data; | ||
1527 | struct drm_mode_object *obj; | ||
1528 | struct drm_crtc *crtc; | ||
1529 | int ret = 0; | ||
1530 | |||
1531 | DRM_DEBUG("\n"); | ||
1532 | |||
1533 | if (!req->flags) { | ||
1534 | DRM_ERROR("no operation set\n"); | ||
1535 | return -EINVAL; | ||
1536 | } | ||
1537 | |||
1538 | mutex_lock(&dev->mode_config.mutex); | ||
1539 | obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
1540 | if (!obj) { | ||
1541 | DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id); | ||
1542 | ret = -EINVAL; | ||
1543 | goto out; | ||
1544 | } | ||
1545 | crtc = obj_to_crtc(obj); | ||
1546 | |||
1547 | if (req->flags & DRM_MODE_CURSOR_BO) { | ||
1548 | if (!crtc->funcs->cursor_set) { | ||
1549 | DRM_ERROR("crtc does not support cursor\n"); | ||
1550 | ret = -ENXIO; | ||
1551 | goto out; | ||
1552 | } | ||
1553 | /* Turns off the cursor if handle is 0 */ | ||
1554 | ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, | ||
1555 | req->width, req->height); | ||
1556 | } | ||
1557 | |||
1558 | if (req->flags & DRM_MODE_CURSOR_MOVE) { | ||
1559 | if (crtc->funcs->cursor_move) { | ||
1560 | ret = crtc->funcs->cursor_move(crtc, req->x, req->y); | ||
1561 | } else { | ||
1562 | DRM_ERROR("crtc does not support cursor\n"); | ||
1563 | ret = -EFAULT; | ||
1564 | goto out; | ||
1565 | } | ||
1566 | } | ||
1567 | out: | ||
1568 | mutex_unlock(&dev->mode_config.mutex); | ||
1569 | return ret; | ||
1570 | } | ||
1571 | |||
1572 | /** | ||
1573 | * drm_mode_addfb - add an FB to the graphics configuration | ||
1574 | * @inode: inode from the ioctl | ||
1575 | * @filp: file * from the ioctl | ||
1576 | * @cmd: cmd from ioctl | ||
1577 | * @arg: arg from ioctl | ||
1578 | * | ||
1579 | * LOCKING: | ||
1580 | * Takes mode config lock. | ||
1581 | * | ||
1582 | * Add a new FB to the specified CRTC, given a user request. | ||
1583 | * | ||
1584 | * Called by the user via ioctl. | ||
1585 | * | ||
1586 | * RETURNS: | ||
1587 | * Zero on success, errno on failure. | ||
1588 | */ | ||
1589 | int drm_mode_addfb(struct drm_device *dev, | ||
1590 | void *data, struct drm_file *file_priv) | ||
1591 | { | ||
1592 | struct drm_mode_fb_cmd *r = data; | ||
1593 | struct drm_mode_config *config = &dev->mode_config; | ||
1594 | struct drm_framebuffer *fb; | ||
1595 | int ret = 0; | ||
1596 | |||
1597 | if ((config->min_width > r->width) || (r->width > config->max_width)) { | ||
1598 | DRM_ERROR("mode new framebuffer width not within limits\n"); | ||
1599 | return -EINVAL; | ||
1600 | } | ||
1601 | if ((config->min_height > r->height) || (r->height > config->max_height)) { | ||
1602 | DRM_ERROR("mode new framebuffer height not within limits\n"); | ||
1603 | return -EINVAL; | ||
1604 | } | ||
1605 | |||
1606 | mutex_lock(&dev->mode_config.mutex); | ||
1607 | |||
1608 | /* TODO check buffer is sufficently large */ | ||
1609 | /* TODO setup destructor callback */ | ||
1610 | |||
1611 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); | ||
1612 | if (!fb) { | ||
1613 | DRM_ERROR("could not create framebuffer\n"); | ||
1614 | ret = -EINVAL; | ||
1615 | goto out; | ||
1616 | } | ||
1617 | |||
1618 | r->fb_id = fb->base.id; | ||
1619 | list_add(&fb->filp_head, &file_priv->fbs); | ||
1620 | |||
1621 | out: | ||
1622 | mutex_unlock(&dev->mode_config.mutex); | ||
1623 | return ret; | ||
1624 | } | ||
1625 | |||
1626 | /** | ||
1627 | * drm_mode_rmfb - remove an FB from the configuration | ||
1628 | * @inode: inode from the ioctl | ||
1629 | * @filp: file * from the ioctl | ||
1630 | * @cmd: cmd from ioctl | ||
1631 | * @arg: arg from ioctl | ||
1632 | * | ||
1633 | * LOCKING: | ||
1634 | * Takes mode config lock. | ||
1635 | * | ||
1636 | * Remove the FB specified by the user. | ||
1637 | * | ||
1638 | * Called by the user via ioctl. | ||
1639 | * | ||
1640 | * RETURNS: | ||
1641 | * Zero on success, errno on failure. | ||
1642 | */ | ||
1643 | int drm_mode_rmfb(struct drm_device *dev, | ||
1644 | void *data, struct drm_file *file_priv) | ||
1645 | { | ||
1646 | struct drm_mode_object *obj; | ||
1647 | struct drm_framebuffer *fb = NULL; | ||
1648 | struct drm_framebuffer *fbl = NULL; | ||
1649 | uint32_t *id = data; | ||
1650 | int ret = 0; | ||
1651 | int found = 0; | ||
1652 | |||
1653 | mutex_lock(&dev->mode_config.mutex); | ||
1654 | obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); | ||
1655 | /* TODO check that we realy get a framebuffer back. */ | ||
1656 | if (!obj) { | ||
1657 | DRM_ERROR("mode invalid framebuffer id\n"); | ||
1658 | ret = -EINVAL; | ||
1659 | goto out; | ||
1660 | } | ||
1661 | fb = obj_to_fb(obj); | ||
1662 | |||
1663 | list_for_each_entry(fbl, &file_priv->fbs, filp_head) | ||
1664 | if (fb == fbl) | ||
1665 | found = 1; | ||
1666 | |||
1667 | if (!found) { | ||
1668 | DRM_ERROR("tried to remove a fb that we didn't own\n"); | ||
1669 | ret = -EINVAL; | ||
1670 | goto out; | ||
1671 | } | ||
1672 | |||
1673 | /* TODO release all crtc connected to the framebuffer */ | ||
1674 | /* TODO unhock the destructor from the buffer object */ | ||
1675 | |||
1676 | list_del(&fb->filp_head); | ||
1677 | fb->funcs->destroy(fb); | ||
1678 | |||
1679 | out: | ||
1680 | mutex_unlock(&dev->mode_config.mutex); | ||
1681 | return ret; | ||
1682 | } | ||
1683 | |||
1684 | /** | ||
1685 | * drm_mode_getfb - get FB info | ||
1686 | * @inode: inode from the ioctl | ||
1687 | * @filp: file * from the ioctl | ||
1688 | * @cmd: cmd from ioctl | ||
1689 | * @arg: arg from ioctl | ||
1690 | * | ||
1691 | * LOCKING: | ||
1692 | * Caller? (FIXME) | ||
1693 | * | ||
1694 | * Lookup the FB given its ID and return info about it. | ||
1695 | * | ||
1696 | * Called by the user via ioctl. | ||
1697 | * | ||
1698 | * RETURNS: | ||
1699 | * Zero on success, errno on failure. | ||
1700 | */ | ||
1701 | int drm_mode_getfb(struct drm_device *dev, | ||
1702 | void *data, struct drm_file *file_priv) | ||
1703 | { | ||
1704 | struct drm_mode_fb_cmd *r = data; | ||
1705 | struct drm_mode_object *obj; | ||
1706 | struct drm_framebuffer *fb; | ||
1707 | int ret = 0; | ||
1708 | |||
1709 | mutex_lock(&dev->mode_config.mutex); | ||
1710 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | ||
1711 | if (!obj) { | ||
1712 | DRM_ERROR("invalid framebuffer id\n"); | ||
1713 | ret = -EINVAL; | ||
1714 | goto out; | ||
1715 | } | ||
1716 | fb = obj_to_fb(obj); | ||
1717 | |||
1718 | r->height = fb->height; | ||
1719 | r->width = fb->width; | ||
1720 | r->depth = fb->depth; | ||
1721 | r->bpp = fb->bits_per_pixel; | ||
1722 | r->pitch = fb->pitch; | ||
1723 | fb->funcs->create_handle(fb, file_priv, &r->handle); | ||
1724 | |||
1725 | out: | ||
1726 | mutex_unlock(&dev->mode_config.mutex); | ||
1727 | return ret; | ||
1728 | } | ||
1729 | |||
1730 | /** | ||
1731 | * drm_fb_release - remove and free the FBs on this file | ||
1732 | * @filp: file * from the ioctl | ||
1733 | * | ||
1734 | * LOCKING: | ||
1735 | * Takes mode config lock. | ||
1736 | * | ||
1737 | * Destroy all the FBs associated with @filp. | ||
1738 | * | ||
1739 | * Called by the user via ioctl. | ||
1740 | * | ||
1741 | * RETURNS: | ||
1742 | * Zero on success, errno on failure. | ||
1743 | */ | ||
1744 | void drm_fb_release(struct file *filp) | ||
1745 | { | ||
1746 | struct drm_file *priv = filp->private_data; | ||
1747 | struct drm_device *dev = priv->minor->dev; | ||
1748 | struct drm_framebuffer *fb, *tfb; | ||
1749 | |||
1750 | mutex_lock(&dev->mode_config.mutex); | ||
1751 | list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { | ||
1752 | list_del(&fb->filp_head); | ||
1753 | fb->funcs->destroy(fb); | ||
1754 | } | ||
1755 | mutex_unlock(&dev->mode_config.mutex); | ||
1756 | } | ||
1757 | |||
1758 | /** | ||
1759 | * drm_mode_attachmode - add a mode to the user mode list | ||
1760 | * @dev: DRM device | ||
1761 | * @connector: connector to add the mode to | ||
1762 | * @mode: mode to add | ||
1763 | * | ||
1764 | * Add @mode to @connector's user mode list. | ||
1765 | */ | ||
1766 | static int drm_mode_attachmode(struct drm_device *dev, | ||
1767 | struct drm_connector *connector, | ||
1768 | struct drm_display_mode *mode) | ||
1769 | { | ||
1770 | int ret = 0; | ||
1771 | |||
1772 | list_add_tail(&mode->head, &connector->user_modes); | ||
1773 | return ret; | ||
1774 | } | ||
1775 | |||
1776 | int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, | ||
1777 | struct drm_display_mode *mode) | ||
1778 | { | ||
1779 | struct drm_connector *connector; | ||
1780 | int ret = 0; | ||
1781 | struct drm_display_mode *dup_mode; | ||
1782 | int need_dup = 0; | ||
1783 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1784 | if (!connector->encoder) | ||
1785 | break; | ||
1786 | if (connector->encoder->crtc == crtc) { | ||
1787 | if (need_dup) | ||
1788 | dup_mode = drm_mode_duplicate(dev, mode); | ||
1789 | else | ||
1790 | dup_mode = mode; | ||
1791 | ret = drm_mode_attachmode(dev, connector, dup_mode); | ||
1792 | if (ret) | ||
1793 | return ret; | ||
1794 | need_dup = 1; | ||
1795 | } | ||
1796 | } | ||
1797 | return 0; | ||
1798 | } | ||
1799 | EXPORT_SYMBOL(drm_mode_attachmode_crtc); | ||
1800 | |||
1801 | static int drm_mode_detachmode(struct drm_device *dev, | ||
1802 | struct drm_connector *connector, | ||
1803 | struct drm_display_mode *mode) | ||
1804 | { | ||
1805 | int found = 0; | ||
1806 | int ret = 0; | ||
1807 | struct drm_display_mode *match_mode, *t; | ||
1808 | |||
1809 | list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) { | ||
1810 | if (drm_mode_equal(match_mode, mode)) { | ||
1811 | list_del(&match_mode->head); | ||
1812 | drm_mode_destroy(dev, match_mode); | ||
1813 | found = 1; | ||
1814 | break; | ||
1815 | } | ||
1816 | } | ||
1817 | |||
1818 | if (!found) | ||
1819 | ret = -EINVAL; | ||
1820 | |||
1821 | return ret; | ||
1822 | } | ||
1823 | |||
1824 | int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode) | ||
1825 | { | ||
1826 | struct drm_connector *connector; | ||
1827 | |||
1828 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1829 | drm_mode_detachmode(dev, connector, mode); | ||
1830 | } | ||
1831 | return 0; | ||
1832 | } | ||
1833 | EXPORT_SYMBOL(drm_mode_detachmode_crtc); | ||
1834 | |||
1835 | /** | ||
1836 | * drm_fb_attachmode - Attach a user mode to an connector | ||
1837 | * @inode: inode from the ioctl | ||
1838 | * @filp: file * from the ioctl | ||
1839 | * @cmd: cmd from ioctl | ||
1840 | * @arg: arg from ioctl | ||
1841 | * | ||
1842 | * This attaches a user specified mode to an connector. | ||
1843 | * Called by the user via ioctl. | ||
1844 | * | ||
1845 | * RETURNS: | ||
1846 | * Zero on success, errno on failure. | ||
1847 | */ | ||
1848 | int drm_mode_attachmode_ioctl(struct drm_device *dev, | ||
1849 | void *data, struct drm_file *file_priv) | ||
1850 | { | ||
1851 | struct drm_mode_mode_cmd *mode_cmd = data; | ||
1852 | struct drm_connector *connector; | ||
1853 | struct drm_display_mode *mode; | ||
1854 | struct drm_mode_object *obj; | ||
1855 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; | ||
1856 | int ret = 0; | ||
1857 | |||
1858 | mutex_lock(&dev->mode_config.mutex); | ||
1859 | |||
1860 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); | ||
1861 | if (!obj) { | ||
1862 | ret = -EINVAL; | ||
1863 | goto out; | ||
1864 | } | ||
1865 | connector = obj_to_connector(obj); | ||
1866 | |||
1867 | mode = drm_mode_create(dev); | ||
1868 | if (!mode) { | ||
1869 | ret = -ENOMEM; | ||
1870 | goto out; | ||
1871 | } | ||
1872 | |||
1873 | drm_crtc_convert_umode(mode, umode); | ||
1874 | |||
1875 | ret = drm_mode_attachmode(dev, connector, mode); | ||
1876 | out: | ||
1877 | mutex_unlock(&dev->mode_config.mutex); | ||
1878 | return ret; | ||
1879 | } | ||
1880 | |||
1881 | |||
1882 | /** | ||
1883 | * drm_fb_detachmode - Detach a user specified mode from an connector | ||
1884 | * @inode: inode from the ioctl | ||
1885 | * @filp: file * from the ioctl | ||
1886 | * @cmd: cmd from ioctl | ||
1887 | * @arg: arg from ioctl | ||
1888 | * | ||
1889 | * Called by the user via ioctl. | ||
1890 | * | ||
1891 | * RETURNS: | ||
1892 | * Zero on success, errno on failure. | ||
1893 | */ | ||
1894 | int drm_mode_detachmode_ioctl(struct drm_device *dev, | ||
1895 | void *data, struct drm_file *file_priv) | ||
1896 | { | ||
1897 | struct drm_mode_object *obj; | ||
1898 | struct drm_mode_mode_cmd *mode_cmd = data; | ||
1899 | struct drm_connector *connector; | ||
1900 | struct drm_display_mode mode; | ||
1901 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; | ||
1902 | int ret = 0; | ||
1903 | |||
1904 | mutex_lock(&dev->mode_config.mutex); | ||
1905 | |||
1906 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); | ||
1907 | if (!obj) { | ||
1908 | ret = -EINVAL; | ||
1909 | goto out; | ||
1910 | } | ||
1911 | connector = obj_to_connector(obj); | ||
1912 | |||
1913 | drm_crtc_convert_umode(&mode, umode); | ||
1914 | ret = drm_mode_detachmode(dev, connector, &mode); | ||
1915 | out: | ||
1916 | mutex_unlock(&dev->mode_config.mutex); | ||
1917 | return ret; | ||
1918 | } | ||
1919 | |||
1920 | struct drm_property *drm_property_create(struct drm_device *dev, int flags, | ||
1921 | const char *name, int num_values) | ||
1922 | { | ||
1923 | struct drm_property *property = NULL; | ||
1924 | |||
1925 | property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); | ||
1926 | if (!property) | ||
1927 | return NULL; | ||
1928 | |||
1929 | if (num_values) { | ||
1930 | property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL); | ||
1931 | if (!property->values) | ||
1932 | goto fail; | ||
1933 | } | ||
1934 | |||
1935 | drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); | ||
1936 | property->flags = flags; | ||
1937 | property->num_values = num_values; | ||
1938 | INIT_LIST_HEAD(&property->enum_blob_list); | ||
1939 | |||
1940 | if (name) | ||
1941 | strncpy(property->name, name, DRM_PROP_NAME_LEN); | ||
1942 | |||
1943 | list_add_tail(&property->head, &dev->mode_config.property_list); | ||
1944 | return property; | ||
1945 | fail: | ||
1946 | kfree(property); | ||
1947 | return NULL; | ||
1948 | } | ||
1949 | EXPORT_SYMBOL(drm_property_create); | ||
1950 | |||
1951 | int drm_property_add_enum(struct drm_property *property, int index, | ||
1952 | uint64_t value, const char *name) | ||
1953 | { | ||
1954 | struct drm_property_enum *prop_enum; | ||
1955 | |||
1956 | if (!(property->flags & DRM_MODE_PROP_ENUM)) | ||
1957 | return -EINVAL; | ||
1958 | |||
1959 | if (!list_empty(&property->enum_blob_list)) { | ||
1960 | list_for_each_entry(prop_enum, &property->enum_blob_list, head) { | ||
1961 | if (prop_enum->value == value) { | ||
1962 | strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); | ||
1963 | prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; | ||
1964 | return 0; | ||
1965 | } | ||
1966 | } | ||
1967 | } | ||
1968 | |||
1969 | prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); | ||
1970 | if (!prop_enum) | ||
1971 | return -ENOMEM; | ||
1972 | |||
1973 | strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); | ||
1974 | prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; | ||
1975 | prop_enum->value = value; | ||
1976 | |||
1977 | property->values[index] = value; | ||
1978 | list_add_tail(&prop_enum->head, &property->enum_blob_list); | ||
1979 | return 0; | ||
1980 | } | ||
1981 | EXPORT_SYMBOL(drm_property_add_enum); | ||
1982 | |||
1983 | void drm_property_destroy(struct drm_device *dev, struct drm_property *property) | ||
1984 | { | ||
1985 | struct drm_property_enum *prop_enum, *pt; | ||
1986 | |||
1987 | list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { | ||
1988 | list_del(&prop_enum->head); | ||
1989 | kfree(prop_enum); | ||
1990 | } | ||
1991 | |||
1992 | if (property->num_values) | ||
1993 | kfree(property->values); | ||
1994 | drm_mode_object_put(dev, &property->base); | ||
1995 | list_del(&property->head); | ||
1996 | kfree(property); | ||
1997 | } | ||
1998 | EXPORT_SYMBOL(drm_property_destroy); | ||
1999 | |||
2000 | int drm_connector_attach_property(struct drm_connector *connector, | ||
2001 | struct drm_property *property, uint64_t init_val) | ||
2002 | { | ||
2003 | int i; | ||
2004 | |||
2005 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
2006 | if (connector->property_ids[i] == 0) { | ||
2007 | connector->property_ids[i] = property->base.id; | ||
2008 | connector->property_values[i] = init_val; | ||
2009 | break; | ||
2010 | } | ||
2011 | } | ||
2012 | |||
2013 | if (i == DRM_CONNECTOR_MAX_PROPERTY) | ||
2014 | return -EINVAL; | ||
2015 | return 0; | ||
2016 | } | ||
2017 | EXPORT_SYMBOL(drm_connector_attach_property); | ||
2018 | |||
2019 | int drm_connector_property_set_value(struct drm_connector *connector, | ||
2020 | struct drm_property *property, uint64_t value) | ||
2021 | { | ||
2022 | int i; | ||
2023 | |||
2024 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
2025 | if (connector->property_ids[i] == property->base.id) { | ||
2026 | connector->property_values[i] = value; | ||
2027 | break; | ||
2028 | } | ||
2029 | } | ||
2030 | |||
2031 | if (i == DRM_CONNECTOR_MAX_PROPERTY) | ||
2032 | return -EINVAL; | ||
2033 | return 0; | ||
2034 | } | ||
2035 | EXPORT_SYMBOL(drm_connector_property_set_value); | ||
2036 | |||
2037 | int drm_connector_property_get_value(struct drm_connector *connector, | ||
2038 | struct drm_property *property, uint64_t *val) | ||
2039 | { | ||
2040 | int i; | ||
2041 | |||
2042 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
2043 | if (connector->property_ids[i] == property->base.id) { | ||
2044 | *val = connector->property_values[i]; | ||
2045 | break; | ||
2046 | } | ||
2047 | } | ||
2048 | |||
2049 | if (i == DRM_CONNECTOR_MAX_PROPERTY) | ||
2050 | return -EINVAL; | ||
2051 | return 0; | ||
2052 | } | ||
2053 | EXPORT_SYMBOL(drm_connector_property_get_value); | ||
2054 | |||
2055 | int drm_mode_getproperty_ioctl(struct drm_device *dev, | ||
2056 | void *data, struct drm_file *file_priv) | ||
2057 | { | ||
2058 | struct drm_mode_object *obj; | ||
2059 | struct drm_mode_get_property *out_resp = data; | ||
2060 | struct drm_property *property; | ||
2061 | int enum_count = 0; | ||
2062 | int blob_count = 0; | ||
2063 | int value_count = 0; | ||
2064 | int ret = 0, i; | ||
2065 | int copied; | ||
2066 | struct drm_property_enum *prop_enum; | ||
2067 | struct drm_mode_property_enum __user *enum_ptr; | ||
2068 | struct drm_property_blob *prop_blob; | ||
2069 | uint32_t *blob_id_ptr; | ||
2070 | uint64_t __user *values_ptr; | ||
2071 | uint32_t __user *blob_length_ptr; | ||
2072 | |||
2073 | mutex_lock(&dev->mode_config.mutex); | ||
2074 | obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); | ||
2075 | if (!obj) { | ||
2076 | ret = -EINVAL; | ||
2077 | goto done; | ||
2078 | } | ||
2079 | property = obj_to_property(obj); | ||
2080 | |||
2081 | if (property->flags & DRM_MODE_PROP_ENUM) { | ||
2082 | list_for_each_entry(prop_enum, &property->enum_blob_list, head) | ||
2083 | enum_count++; | ||
2084 | } else if (property->flags & DRM_MODE_PROP_BLOB) { | ||
2085 | list_for_each_entry(prop_blob, &property->enum_blob_list, head) | ||
2086 | blob_count++; | ||
2087 | } | ||
2088 | |||
2089 | value_count = property->num_values; | ||
2090 | |||
2091 | strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); | ||
2092 | out_resp->name[DRM_PROP_NAME_LEN-1] = 0; | ||
2093 | out_resp->flags = property->flags; | ||
2094 | |||
2095 | if ((out_resp->count_values >= value_count) && value_count) { | ||
2096 | values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; | ||
2097 | for (i = 0; i < value_count; i++) { | ||
2098 | if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { | ||
2099 | ret = -EFAULT; | ||
2100 | goto done; | ||
2101 | } | ||
2102 | } | ||
2103 | } | ||
2104 | out_resp->count_values = value_count; | ||
2105 | |||
2106 | if (property->flags & DRM_MODE_PROP_ENUM) { | ||
2107 | if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { | ||
2108 | copied = 0; | ||
2109 | enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; | ||
2110 | list_for_each_entry(prop_enum, &property->enum_blob_list, head) { | ||
2111 | |||
2112 | if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { | ||
2113 | ret = -EFAULT; | ||
2114 | goto done; | ||
2115 | } | ||
2116 | |||
2117 | if (copy_to_user(&enum_ptr[copied].name, | ||
2118 | &prop_enum->name, DRM_PROP_NAME_LEN)) { | ||
2119 | ret = -EFAULT; | ||
2120 | goto done; | ||
2121 | } | ||
2122 | copied++; | ||
2123 | } | ||
2124 | } | ||
2125 | out_resp->count_enum_blobs = enum_count; | ||
2126 | } | ||
2127 | |||
2128 | if (property->flags & DRM_MODE_PROP_BLOB) { | ||
2129 | if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { | ||
2130 | copied = 0; | ||
2131 | blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; | ||
2132 | blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; | ||
2133 | |||
2134 | list_for_each_entry(prop_blob, &property->enum_blob_list, head) { | ||
2135 | if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { | ||
2136 | ret = -EFAULT; | ||
2137 | goto done; | ||
2138 | } | ||
2139 | |||
2140 | if (put_user(prop_blob->length, blob_length_ptr + copied)) { | ||
2141 | ret = -EFAULT; | ||
2142 | goto done; | ||
2143 | } | ||
2144 | |||
2145 | copied++; | ||
2146 | } | ||
2147 | } | ||
2148 | out_resp->count_enum_blobs = blob_count; | ||
2149 | } | ||
2150 | done: | ||
2151 | mutex_unlock(&dev->mode_config.mutex); | ||
2152 | return ret; | ||
2153 | } | ||
2154 | |||
2155 | static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, | ||
2156 | void *data) | ||
2157 | { | ||
2158 | struct drm_property_blob *blob; | ||
2159 | |||
2160 | if (!length || !data) | ||
2161 | return NULL; | ||
2162 | |||
2163 | blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); | ||
2164 | if (!blob) | ||
2165 | return NULL; | ||
2166 | |||
2167 | blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob)); | ||
2168 | blob->length = length; | ||
2169 | |||
2170 | memcpy(blob->data, data, length); | ||
2171 | |||
2172 | drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); | ||
2173 | |||
2174 | list_add_tail(&blob->head, &dev->mode_config.property_blob_list); | ||
2175 | return blob; | ||
2176 | } | ||
2177 | |||
2178 | static void drm_property_destroy_blob(struct drm_device *dev, | ||
2179 | struct drm_property_blob *blob) | ||
2180 | { | ||
2181 | drm_mode_object_put(dev, &blob->base); | ||
2182 | list_del(&blob->head); | ||
2183 | kfree(blob); | ||
2184 | } | ||
2185 | |||
2186 | int drm_mode_getblob_ioctl(struct drm_device *dev, | ||
2187 | void *data, struct drm_file *file_priv) | ||
2188 | { | ||
2189 | struct drm_mode_object *obj; | ||
2190 | struct drm_mode_get_blob *out_resp = data; | ||
2191 | struct drm_property_blob *blob; | ||
2192 | int ret = 0; | ||
2193 | void *blob_ptr; | ||
2194 | |||
2195 | mutex_lock(&dev->mode_config.mutex); | ||
2196 | obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); | ||
2197 | if (!obj) { | ||
2198 | ret = -EINVAL; | ||
2199 | goto done; | ||
2200 | } | ||
2201 | blob = obj_to_blob(obj); | ||
2202 | |||
2203 | if (out_resp->length == blob->length) { | ||
2204 | blob_ptr = (void *)(unsigned long)out_resp->data; | ||
2205 | if (copy_to_user(blob_ptr, blob->data, blob->length)){ | ||
2206 | ret = -EFAULT; | ||
2207 | goto done; | ||
2208 | } | ||
2209 | } | ||
2210 | out_resp->length = blob->length; | ||
2211 | |||
2212 | done: | ||
2213 | mutex_unlock(&dev->mode_config.mutex); | ||
2214 | return ret; | ||
2215 | } | ||
2216 | |||
2217 | int drm_mode_connector_update_edid_property(struct drm_connector *connector, | ||
2218 | struct edid *edid) | ||
2219 | { | ||
2220 | struct drm_device *dev = connector->dev; | ||
2221 | int ret = 0; | ||
2222 | |||
2223 | if (connector->edid_blob_ptr) | ||
2224 | drm_property_destroy_blob(dev, connector->edid_blob_ptr); | ||
2225 | |||
2226 | /* Delete edid, when there is none. */ | ||
2227 | if (!edid) { | ||
2228 | connector->edid_blob_ptr = NULL; | ||
2229 | ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); | ||
2230 | return ret; | ||
2231 | } | ||
2232 | |||
2233 | connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); | ||
2234 | |||
2235 | ret = drm_connector_property_set_value(connector, | ||
2236 | dev->mode_config.edid_property, | ||
2237 | connector->edid_blob_ptr->base.id); | ||
2238 | |||
2239 | return ret; | ||
2240 | } | ||
2241 | EXPORT_SYMBOL(drm_mode_connector_update_edid_property); | ||
2242 | |||
2243 | int drm_mode_connector_property_set_ioctl(struct drm_device *dev, | ||
2244 | void *data, struct drm_file *file_priv) | ||
2245 | { | ||
2246 | struct drm_mode_connector_set_property *out_resp = data; | ||
2247 | struct drm_mode_object *obj; | ||
2248 | struct drm_property *property; | ||
2249 | struct drm_connector *connector; | ||
2250 | int ret = -EINVAL; | ||
2251 | int i; | ||
2252 | |||
2253 | mutex_lock(&dev->mode_config.mutex); | ||
2254 | |||
2255 | obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); | ||
2256 | if (!obj) { | ||
2257 | goto out; | ||
2258 | } | ||
2259 | connector = obj_to_connector(obj); | ||
2260 | |||
2261 | for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { | ||
2262 | if (connector->property_ids[i] == out_resp->prop_id) | ||
2263 | break; | ||
2264 | } | ||
2265 | |||
2266 | if (i == DRM_CONNECTOR_MAX_PROPERTY) { | ||
2267 | goto out; | ||
2268 | } | ||
2269 | |||
2270 | obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); | ||
2271 | if (!obj) { | ||
2272 | goto out; | ||
2273 | } | ||
2274 | property = obj_to_property(obj); | ||
2275 | |||
2276 | if (property->flags & DRM_MODE_PROP_IMMUTABLE) | ||
2277 | goto out; | ||
2278 | |||
2279 | if (property->flags & DRM_MODE_PROP_RANGE) { | ||
2280 | if (out_resp->value < property->values[0]) | ||
2281 | goto out; | ||
2282 | |||
2283 | if (out_resp->value > property->values[1]) | ||
2284 | goto out; | ||
2285 | } else { | ||
2286 | int found = 0; | ||
2287 | for (i = 0; i < property->num_values; i++) { | ||
2288 | if (property->values[i] == out_resp->value) { | ||
2289 | found = 1; | ||
2290 | break; | ||
2291 | } | ||
2292 | } | ||
2293 | if (!found) { | ||
2294 | goto out; | ||
2295 | } | ||
2296 | } | ||
2297 | |||
2298 | if (connector->funcs->set_property) | ||
2299 | ret = connector->funcs->set_property(connector, property, out_resp->value); | ||
2300 | |||
2301 | /* store the property value if succesful */ | ||
2302 | if (!ret) | ||
2303 | drm_connector_property_set_value(connector, property, out_resp->value); | ||
2304 | out: | ||
2305 | mutex_unlock(&dev->mode_config.mutex); | ||
2306 | return ret; | ||
2307 | } | ||
2308 | |||
2309 | int drm_mode_connector_attach_encoder(struct drm_connector *connector, | ||
2310 | struct drm_encoder *encoder) | ||
2311 | { | ||
2312 | int i; | ||
2313 | |||
2314 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
2315 | if (connector->encoder_ids[i] == 0) { | ||
2316 | connector->encoder_ids[i] = encoder->base.id; | ||
2317 | return 0; | ||
2318 | } | ||
2319 | } | ||
2320 | return -ENOMEM; | ||
2321 | } | ||
2322 | EXPORT_SYMBOL(drm_mode_connector_attach_encoder); | ||
2323 | |||
2324 | void drm_mode_connector_detach_encoder(struct drm_connector *connector, | ||
2325 | struct drm_encoder *encoder) | ||
2326 | { | ||
2327 | int i; | ||
2328 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
2329 | if (connector->encoder_ids[i] == encoder->base.id) { | ||
2330 | connector->encoder_ids[i] = 0; | ||
2331 | if (connector->encoder == encoder) | ||
2332 | connector->encoder = NULL; | ||
2333 | break; | ||
2334 | } | ||
2335 | } | ||
2336 | } | ||
2337 | EXPORT_SYMBOL(drm_mode_connector_detach_encoder); | ||
2338 | |||
2339 | bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | ||
2340 | int gamma_size) | ||
2341 | { | ||
2342 | crtc->gamma_size = gamma_size; | ||
2343 | |||
2344 | crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); | ||
2345 | if (!crtc->gamma_store) { | ||
2346 | crtc->gamma_size = 0; | ||
2347 | return false; | ||
2348 | } | ||
2349 | |||
2350 | return true; | ||
2351 | } | ||
2352 | EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); | ||
2353 | |||
2354 | int drm_mode_gamma_set_ioctl(struct drm_device *dev, | ||
2355 | void *data, struct drm_file *file_priv) | ||
2356 | { | ||
2357 | struct drm_mode_crtc_lut *crtc_lut = data; | ||
2358 | struct drm_mode_object *obj; | ||
2359 | struct drm_crtc *crtc; | ||
2360 | void *r_base, *g_base, *b_base; | ||
2361 | int size; | ||
2362 | int ret = 0; | ||
2363 | |||
2364 | mutex_lock(&dev->mode_config.mutex); | ||
2365 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
2366 | if (!obj) { | ||
2367 | ret = -EINVAL; | ||
2368 | goto out; | ||
2369 | } | ||
2370 | crtc = obj_to_crtc(obj); | ||
2371 | |||
2372 | /* memcpy into gamma store */ | ||
2373 | if (crtc_lut->gamma_size != crtc->gamma_size) { | ||
2374 | ret = -EINVAL; | ||
2375 | goto out; | ||
2376 | } | ||
2377 | |||
2378 | size = crtc_lut->gamma_size * (sizeof(uint16_t)); | ||
2379 | r_base = crtc->gamma_store; | ||
2380 | if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { | ||
2381 | ret = -EFAULT; | ||
2382 | goto out; | ||
2383 | } | ||
2384 | |||
2385 | g_base = r_base + size; | ||
2386 | if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { | ||
2387 | ret = -EFAULT; | ||
2388 | goto out; | ||
2389 | } | ||
2390 | |||
2391 | b_base = g_base + size; | ||
2392 | if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { | ||
2393 | ret = -EFAULT; | ||
2394 | goto out; | ||
2395 | } | ||
2396 | |||
2397 | crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); | ||
2398 | |||
2399 | out: | ||
2400 | mutex_unlock(&dev->mode_config.mutex); | ||
2401 | return ret; | ||
2402 | |||
2403 | } | ||
2404 | |||
2405 | int drm_mode_gamma_get_ioctl(struct drm_device *dev, | ||
2406 | void *data, struct drm_file *file_priv) | ||
2407 | { | ||
2408 | struct drm_mode_crtc_lut *crtc_lut = data; | ||
2409 | struct drm_mode_object *obj; | ||
2410 | struct drm_crtc *crtc; | ||
2411 | void *r_base, *g_base, *b_base; | ||
2412 | int size; | ||
2413 | int ret = 0; | ||
2414 | |||
2415 | mutex_lock(&dev->mode_config.mutex); | ||
2416 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
2417 | if (!obj) { | ||
2418 | ret = -EINVAL; | ||
2419 | goto out; | ||
2420 | } | ||
2421 | crtc = obj_to_crtc(obj); | ||
2422 | |||
2423 | /* memcpy into gamma store */ | ||
2424 | if (crtc_lut->gamma_size != crtc->gamma_size) { | ||
2425 | ret = -EINVAL; | ||
2426 | goto out; | ||
2427 | } | ||
2428 | |||
2429 | size = crtc_lut->gamma_size * (sizeof(uint16_t)); | ||
2430 | r_base = crtc->gamma_store; | ||
2431 | if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { | ||
2432 | ret = -EFAULT; | ||
2433 | goto out; | ||
2434 | } | ||
2435 | |||
2436 | g_base = r_base + size; | ||
2437 | if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { | ||
2438 | ret = -EFAULT; | ||
2439 | goto out; | ||
2440 | } | ||
2441 | |||
2442 | b_base = g_base + size; | ||
2443 | if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { | ||
2444 | ret = -EFAULT; | ||
2445 | goto out; | ||
2446 | } | ||
2447 | out: | ||
2448 | mutex_unlock(&dev->mode_config.mutex); | ||
2449 | return ret; | ||
2450 | } | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c new file mode 100644 index 000000000000..964c5eb1fada --- /dev/null +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -0,0 +1,903 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006-2008 Intel Corporation | ||
3 | * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> | ||
4 | * | ||
5 | * DRM core CRTC related functions | ||
6 | * | ||
7 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
8 | * documentation for any purpose is hereby granted without fee, provided that | ||
9 | * the above copyright notice appear in all copies and that both that copyright | ||
10 | * notice and this permission notice appear in supporting documentation, and | ||
11 | * that the name of the copyright holders not be used in advertising or | ||
12 | * publicity pertaining to distribution of the software without specific, | ||
13 | * written prior permission. The copyright holders make no representations | ||
14 | * about the suitability of this software for any purpose. It is provided "as | ||
15 | * is" without express or implied warranty. | ||
16 | * | ||
17 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
18 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
19 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
20 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
21 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
22 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
23 | * OF THIS SOFTWARE. | ||
24 | * | ||
25 | * Authors: | ||
26 | * Keith Packard | ||
27 | * Eric Anholt <eric@anholt.net> | ||
28 | * Dave Airlie <airlied@linux.ie> | ||
29 | * Jesse Barnes <jesse.barnes@intel.com> | ||
30 | */ | ||
31 | |||
32 | #include "drmP.h" | ||
33 | #include "drm_crtc.h" | ||
34 | #include "drm_crtc_helper.h" | ||
35 | |||
36 | /* | ||
37 | * Detailed mode info for 800x600@60Hz | ||
38 | */ | ||
39 | static struct drm_display_mode std_modes[] = { | ||
40 | { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, | ||
41 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
42 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * drm_helper_probe_connector_modes - get complete set of display modes | ||
47 | * @dev: DRM device | ||
48 | * @maxX: max width for modes | ||
49 | * @maxY: max height for modes | ||
50 | * | ||
51 | * LOCKING: | ||
52 | * Caller must hold mode config lock. | ||
53 | * | ||
54 | * Based on @dev's mode_config layout, scan all the connectors and try to detect | ||
55 | * modes on them. Modes will first be added to the connector's probed_modes | ||
56 | * list, then culled (based on validity and the @maxX, @maxY parameters) and | ||
57 | * put into the normal modes list. | ||
58 | * | ||
59 | * Intended to be used either at bootup time or when major configuration | ||
60 | * changes have occurred. | ||
61 | * | ||
62 | * FIXME: take into account monitor limits | ||
63 | * | ||
64 | * RETURNS: | ||
65 | * Number of modes found on @connector. | ||
66 | */ | ||
67 | int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | ||
68 | uint32_t maxX, uint32_t maxY) | ||
69 | { | ||
70 | struct drm_device *dev = connector->dev; | ||
71 | struct drm_display_mode *mode, *t; | ||
72 | struct drm_connector_helper_funcs *connector_funcs = | ||
73 | connector->helper_private; | ||
74 | int count = 0; | ||
75 | |||
76 | DRM_DEBUG("%s\n", drm_get_connector_name(connector)); | ||
77 | /* set all modes to the unverified state */ | ||
78 | list_for_each_entry_safe(mode, t, &connector->modes, head) | ||
79 | mode->status = MODE_UNVERIFIED; | ||
80 | |||
81 | connector->status = connector->funcs->detect(connector); | ||
82 | |||
83 | if (connector->status == connector_status_disconnected) { | ||
84 | DRM_DEBUG("%s is disconnected\n", | ||
85 | drm_get_connector_name(connector)); | ||
86 | /* TODO set EDID to NULL */ | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | count = (*connector_funcs->get_modes)(connector); | ||
91 | if (!count) | ||
92 | return 0; | ||
93 | |||
94 | drm_mode_connector_list_update(connector); | ||
95 | |||
96 | if (maxX && maxY) | ||
97 | drm_mode_validate_size(dev, &connector->modes, maxX, | ||
98 | maxY, 0); | ||
99 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | ||
100 | if (mode->status == MODE_OK) | ||
101 | mode->status = connector_funcs->mode_valid(connector, | ||
102 | mode); | ||
103 | } | ||
104 | |||
105 | |||
106 | drm_mode_prune_invalid(dev, &connector->modes, true); | ||
107 | |||
108 | if (list_empty(&connector->modes)) | ||
109 | return 0; | ||
110 | |||
111 | drm_mode_sort(&connector->modes); | ||
112 | |||
113 | DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector)); | ||
114 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | ||
115 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
116 | |||
117 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
118 | drm_mode_debug_printmodeline(mode); | ||
119 | } | ||
120 | |||
121 | return count; | ||
122 | } | ||
123 | EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); | ||
124 | |||
125 | int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, | ||
126 | uint32_t maxY) | ||
127 | { | ||
128 | struct drm_connector *connector; | ||
129 | int count = 0; | ||
130 | |||
131 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
132 | count += drm_helper_probe_single_connector_modes(connector, | ||
133 | maxX, maxY); | ||
134 | } | ||
135 | |||
136 | return count; | ||
137 | } | ||
138 | EXPORT_SYMBOL(drm_helper_probe_connector_modes); | ||
139 | |||
140 | static void drm_helper_add_std_modes(struct drm_device *dev, | ||
141 | struct drm_connector *connector) | ||
142 | { | ||
143 | struct drm_display_mode *mode, *t; | ||
144 | int i; | ||
145 | |||
146 | for (i = 0; i < ARRAY_SIZE(std_modes); i++) { | ||
147 | struct drm_display_mode *stdmode; | ||
148 | |||
149 | /* | ||
150 | * When no valid EDID modes are available we end up | ||
151 | * here and bailed in the past, now we add some standard | ||
152 | * modes and move on. | ||
153 | */ | ||
154 | stdmode = drm_mode_duplicate(dev, &std_modes[i]); | ||
155 | drm_mode_probed_add(connector, stdmode); | ||
156 | drm_mode_list_concat(&connector->probed_modes, | ||
157 | &connector->modes); | ||
158 | |||
159 | DRM_DEBUG("Adding mode %s to %s\n", stdmode->name, | ||
160 | drm_get_connector_name(connector)); | ||
161 | } | ||
162 | drm_mode_sort(&connector->modes); | ||
163 | |||
164 | DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector)); | ||
165 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | ||
166 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
167 | |||
168 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
169 | drm_mode_debug_printmodeline(mode); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config | ||
175 | * @crtc: CRTC to check | ||
176 | * | ||
177 | * LOCKING: | ||
178 | * Caller must hold mode config lock. | ||
179 | * | ||
180 | * Walk @crtc's DRM device's mode_config and see if it's in use. | ||
181 | * | ||
182 | * RETURNS: | ||
183 | * True if @crtc is part of the mode_config, false otherwise. | ||
184 | */ | ||
185 | bool drm_helper_crtc_in_use(struct drm_crtc *crtc) | ||
186 | { | ||
187 | struct drm_encoder *encoder; | ||
188 | struct drm_device *dev = crtc->dev; | ||
189 | /* FIXME: Locking around list access? */ | ||
190 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | ||
191 | if (encoder->crtc == crtc) | ||
192 | return true; | ||
193 | return false; | ||
194 | } | ||
195 | EXPORT_SYMBOL(drm_helper_crtc_in_use); | ||
196 | |||
197 | /** | ||
198 | * drm_disable_unused_functions - disable unused objects | ||
199 | * @dev: DRM device | ||
200 | * | ||
201 | * LOCKING: | ||
202 | * Caller must hold mode config lock. | ||
203 | * | ||
204 | * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled | ||
205 | * by calling its dpms function, which should power it off. | ||
206 | */ | ||
207 | void drm_helper_disable_unused_functions(struct drm_device *dev) | ||
208 | { | ||
209 | struct drm_encoder *encoder; | ||
210 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
211 | struct drm_crtc *crtc; | ||
212 | |||
213 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
214 | encoder_funcs = encoder->helper_private; | ||
215 | if (!encoder->crtc) | ||
216 | (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); | ||
217 | } | ||
218 | |||
219 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
220 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
221 | crtc->enabled = drm_helper_crtc_in_use(crtc); | ||
222 | if (!crtc->enabled) { | ||
223 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
224 | crtc->fb = NULL; | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | EXPORT_SYMBOL(drm_helper_disable_unused_functions); | ||
229 | |||
230 | static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height) | ||
231 | { | ||
232 | struct drm_display_mode *mode; | ||
233 | |||
234 | list_for_each_entry(mode, &connector->modes, head) { | ||
235 | if (drm_mode_width(mode) > width || | ||
236 | drm_mode_height(mode) > height) | ||
237 | continue; | ||
238 | if (mode->type & DRM_MODE_TYPE_PREFERRED) | ||
239 | return mode; | ||
240 | } | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | static bool drm_connector_enabled(struct drm_connector *connector, bool strict) | ||
245 | { | ||
246 | bool enable; | ||
247 | |||
248 | if (strict) { | ||
249 | enable = connector->status == connector_status_connected; | ||
250 | } else { | ||
251 | enable = connector->status != connector_status_disconnected; | ||
252 | } | ||
253 | return enable; | ||
254 | } | ||
255 | |||
256 | static void drm_enable_connectors(struct drm_device *dev, bool *enabled) | ||
257 | { | ||
258 | bool any_enabled = false; | ||
259 | struct drm_connector *connector; | ||
260 | int i = 0; | ||
261 | |||
262 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
263 | enabled[i] = drm_connector_enabled(connector, true); | ||
264 | DRM_DEBUG("connector %d enabled? %s\n", connector->base.id, | ||
265 | enabled[i] ? "yes" : "no"); | ||
266 | any_enabled |= enabled[i]; | ||
267 | i++; | ||
268 | } | ||
269 | |||
270 | if (any_enabled) | ||
271 | return; | ||
272 | |||
273 | i = 0; | ||
274 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
275 | enabled[i] = drm_connector_enabled(connector, false); | ||
276 | i++; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static bool drm_target_preferred(struct drm_device *dev, | ||
281 | struct drm_display_mode **modes, | ||
282 | bool *enabled, int width, int height) | ||
283 | { | ||
284 | struct drm_connector *connector; | ||
285 | int i = 0; | ||
286 | |||
287 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
288 | |||
289 | if (enabled[i] == false) { | ||
290 | i++; | ||
291 | continue; | ||
292 | } | ||
293 | |||
294 | DRM_DEBUG("looking for preferred mode on connector %d\n", | ||
295 | connector->base.id); | ||
296 | |||
297 | modes[i] = drm_has_preferred_mode(connector, width, height); | ||
298 | /* No preferred modes, pick one off the list */ | ||
299 | if (!modes[i] && !list_empty(&connector->modes)) { | ||
300 | list_for_each_entry(modes[i], &connector->modes, head) | ||
301 | break; | ||
302 | } | ||
303 | DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name : | ||
304 | "none"); | ||
305 | i++; | ||
306 | } | ||
307 | return true; | ||
308 | } | ||
309 | |||
310 | static int drm_pick_crtcs(struct drm_device *dev, | ||
311 | struct drm_crtc **best_crtcs, | ||
312 | struct drm_display_mode **modes, | ||
313 | int n, int width, int height) | ||
314 | { | ||
315 | int c, o; | ||
316 | struct drm_connector *connector; | ||
317 | struct drm_connector_helper_funcs *connector_funcs; | ||
318 | struct drm_encoder *encoder; | ||
319 | struct drm_crtc *best_crtc; | ||
320 | int my_score, best_score, score; | ||
321 | struct drm_crtc **crtcs, *crtc; | ||
322 | |||
323 | if (n == dev->mode_config.num_connector) | ||
324 | return 0; | ||
325 | c = 0; | ||
326 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
327 | if (c == n) | ||
328 | break; | ||
329 | c++; | ||
330 | } | ||
331 | |||
332 | best_crtcs[n] = NULL; | ||
333 | best_crtc = NULL; | ||
334 | best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); | ||
335 | if (modes[n] == NULL) | ||
336 | return best_score; | ||
337 | |||
338 | crtcs = kmalloc(dev->mode_config.num_connector * | ||
339 | sizeof(struct drm_crtc *), GFP_KERNEL); | ||
340 | if (!crtcs) | ||
341 | return best_score; | ||
342 | |||
343 | my_score = 1; | ||
344 | if (connector->status == connector_status_connected) | ||
345 | my_score++; | ||
346 | if (drm_has_preferred_mode(connector, width, height)) | ||
347 | my_score++; | ||
348 | |||
349 | connector_funcs = connector->helper_private; | ||
350 | encoder = connector_funcs->best_encoder(connector); | ||
351 | if (!encoder) | ||
352 | goto out; | ||
353 | |||
354 | connector->encoder = encoder; | ||
355 | |||
356 | /* select a crtc for this connector and then attempt to configure | ||
357 | remaining connectors */ | ||
358 | c = 0; | ||
359 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
360 | |||
361 | if ((connector->encoder->possible_crtcs & (1 << c)) == 0) { | ||
362 | c++; | ||
363 | continue; | ||
364 | } | ||
365 | |||
366 | for (o = 0; o < n; o++) | ||
367 | if (best_crtcs[o] == crtc) | ||
368 | break; | ||
369 | |||
370 | if (o < n) { | ||
371 | /* ignore cloning for now */ | ||
372 | c++; | ||
373 | continue; | ||
374 | } | ||
375 | |||
376 | crtcs[n] = crtc; | ||
377 | memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *)); | ||
378 | score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, | ||
379 | width, height); | ||
380 | if (score > best_score) { | ||
381 | best_crtc = crtc; | ||
382 | best_score = score; | ||
383 | memcpy(best_crtcs, crtcs, | ||
384 | dev->mode_config.num_connector * | ||
385 | sizeof(struct drm_crtc *)); | ||
386 | } | ||
387 | c++; | ||
388 | } | ||
389 | out: | ||
390 | kfree(crtcs); | ||
391 | return best_score; | ||
392 | } | ||
393 | |||
394 | static void drm_setup_crtcs(struct drm_device *dev) | ||
395 | { | ||
396 | struct drm_crtc **crtcs; | ||
397 | struct drm_display_mode **modes; | ||
398 | struct drm_encoder *encoder; | ||
399 | struct drm_connector *connector; | ||
400 | bool *enabled; | ||
401 | int width, height; | ||
402 | int i, ret; | ||
403 | |||
404 | DRM_DEBUG("\n"); | ||
405 | |||
406 | width = dev->mode_config.max_width; | ||
407 | height = dev->mode_config.max_height; | ||
408 | |||
409 | /* clean out all the encoder/crtc combos */ | ||
410 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
411 | encoder->crtc = NULL; | ||
412 | } | ||
413 | |||
414 | crtcs = kcalloc(dev->mode_config.num_connector, | ||
415 | sizeof(struct drm_crtc *), GFP_KERNEL); | ||
416 | modes = kcalloc(dev->mode_config.num_connector, | ||
417 | sizeof(struct drm_display_mode *), GFP_KERNEL); | ||
418 | enabled = kcalloc(dev->mode_config.num_connector, | ||
419 | sizeof(bool), GFP_KERNEL); | ||
420 | |||
421 | drm_enable_connectors(dev, enabled); | ||
422 | |||
423 | ret = drm_target_preferred(dev, modes, enabled, width, height); | ||
424 | if (!ret) | ||
425 | DRM_ERROR("Unable to find initial modes\n"); | ||
426 | |||
427 | DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height); | ||
428 | |||
429 | drm_pick_crtcs(dev, crtcs, modes, 0, width, height); | ||
430 | |||
431 | i = 0; | ||
432 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
433 | struct drm_display_mode *mode = modes[i]; | ||
434 | struct drm_crtc *crtc = crtcs[i]; | ||
435 | |||
436 | if (connector->encoder == NULL) { | ||
437 | i++; | ||
438 | continue; | ||
439 | } | ||
440 | |||
441 | if (mode && crtc) { | ||
442 | DRM_DEBUG("desired mode %s set on crtc %d\n", | ||
443 | mode->name, crtc->base.id); | ||
444 | crtc->desired_mode = mode; | ||
445 | connector->encoder->crtc = crtc; | ||
446 | } else | ||
447 | connector->encoder->crtc = NULL; | ||
448 | i++; | ||
449 | } | ||
450 | |||
451 | kfree(crtcs); | ||
452 | kfree(modes); | ||
453 | kfree(enabled); | ||
454 | } | ||
455 | /** | ||
456 | * drm_crtc_set_mode - set a mode | ||
457 | * @crtc: CRTC to program | ||
458 | * @mode: mode to use | ||
459 | * @x: width of mode | ||
460 | * @y: height of mode | ||
461 | * | ||
462 | * LOCKING: | ||
463 | * Caller must hold mode config lock. | ||
464 | * | ||
465 | * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance | ||
466 | * to fixup or reject the mode prior to trying to set it. | ||
467 | * | ||
468 | * RETURNS: | ||
469 | * True if the mode was set successfully, or false otherwise. | ||
470 | */ | ||
471 | bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | ||
472 | struct drm_display_mode *mode, | ||
473 | int x, int y, | ||
474 | struct drm_framebuffer *old_fb) | ||
475 | { | ||
476 | struct drm_device *dev = crtc->dev; | ||
477 | struct drm_display_mode *adjusted_mode, saved_mode; | ||
478 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
479 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
480 | int saved_x, saved_y; | ||
481 | struct drm_encoder *encoder; | ||
482 | bool ret = true; | ||
483 | bool depth_changed, bpp_changed; | ||
484 | |||
485 | adjusted_mode = drm_mode_duplicate(dev, mode); | ||
486 | |||
487 | crtc->enabled = drm_helper_crtc_in_use(crtc); | ||
488 | |||
489 | if (!crtc->enabled) | ||
490 | return true; | ||
491 | |||
492 | if (old_fb && crtc->fb) { | ||
493 | depth_changed = (old_fb->depth != crtc->fb->depth); | ||
494 | bpp_changed = (old_fb->bits_per_pixel != | ||
495 | crtc->fb->bits_per_pixel); | ||
496 | } else { | ||
497 | depth_changed = true; | ||
498 | bpp_changed = true; | ||
499 | } | ||
500 | |||
501 | saved_mode = crtc->mode; | ||
502 | saved_x = crtc->x; | ||
503 | saved_y = crtc->y; | ||
504 | |||
505 | /* Update crtc values up front so the driver can rely on them for mode | ||
506 | * setting. | ||
507 | */ | ||
508 | crtc->mode = *mode; | ||
509 | crtc->x = x; | ||
510 | crtc->y = y; | ||
511 | |||
512 | if (drm_mode_equal(&saved_mode, &crtc->mode)) { | ||
513 | if (saved_x != crtc->x || saved_y != crtc->y || | ||
514 | depth_changed || bpp_changed) { | ||
515 | crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, | ||
516 | old_fb); | ||
517 | goto done; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | /* Pass our mode to the connectors and the CRTC to give them a chance to | ||
522 | * adjust it according to limitations or connector properties, and also | ||
523 | * a chance to reject the mode entirely. | ||
524 | */ | ||
525 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
526 | |||
527 | if (encoder->crtc != crtc) | ||
528 | continue; | ||
529 | encoder_funcs = encoder->helper_private; | ||
530 | if (!(ret = encoder_funcs->mode_fixup(encoder, mode, | ||
531 | adjusted_mode))) { | ||
532 | goto done; | ||
533 | } | ||
534 | } | ||
535 | |||
536 | if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { | ||
537 | goto done; | ||
538 | } | ||
539 | |||
540 | /* Prepare the encoders and CRTCs before setting the mode. */ | ||
541 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
542 | |||
543 | if (encoder->crtc != crtc) | ||
544 | continue; | ||
545 | encoder_funcs = encoder->helper_private; | ||
546 | /* Disable the encoders as the first thing we do. */ | ||
547 | encoder_funcs->prepare(encoder); | ||
548 | } | ||
549 | |||
550 | crtc_funcs->prepare(crtc); | ||
551 | |||
552 | /* Set up the DPLL and any encoders state that needs to adjust or depend | ||
553 | * on the DPLL. | ||
554 | */ | ||
555 | crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); | ||
556 | |||
557 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
558 | |||
559 | if (encoder->crtc != crtc) | ||
560 | continue; | ||
561 | |||
562 | DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder), | ||
563 | mode->name, mode->base.id); | ||
564 | encoder_funcs = encoder->helper_private; | ||
565 | encoder_funcs->mode_set(encoder, mode, adjusted_mode); | ||
566 | } | ||
567 | |||
568 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ | ||
569 | crtc_funcs->commit(crtc); | ||
570 | |||
571 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
572 | |||
573 | if (encoder->crtc != crtc) | ||
574 | continue; | ||
575 | |||
576 | encoder_funcs = encoder->helper_private; | ||
577 | encoder_funcs->commit(encoder); | ||
578 | |||
579 | } | ||
580 | |||
581 | /* XXX free adjustedmode */ | ||
582 | drm_mode_destroy(dev, adjusted_mode); | ||
583 | /* FIXME: add subpixel order */ | ||
584 | done: | ||
585 | if (!ret) { | ||
586 | crtc->mode = saved_mode; | ||
587 | crtc->x = saved_x; | ||
588 | crtc->y = saved_y; | ||
589 | } | ||
590 | |||
591 | return ret; | ||
592 | } | ||
593 | EXPORT_SYMBOL(drm_crtc_helper_set_mode); | ||
594 | |||
595 | |||
596 | /** | ||
597 | * drm_crtc_helper_set_config - set a new config from userspace | ||
598 | * @crtc: CRTC to setup | ||
599 | * @crtc_info: user provided configuration | ||
600 | * @new_mode: new mode to set | ||
601 | * @connector_set: set of connectors for the new config | ||
602 | * @fb: new framebuffer | ||
603 | * | ||
604 | * LOCKING: | ||
605 | * Caller must hold mode config lock. | ||
606 | * | ||
607 | * Setup a new configuration, provided by the user in @crtc_info, and enable | ||
608 | * it. | ||
609 | * | ||
610 | * RETURNS: | ||
611 | * Zero. (FIXME) | ||
612 | */ | ||
613 | int drm_crtc_helper_set_config(struct drm_mode_set *set) | ||
614 | { | ||
615 | struct drm_device *dev; | ||
616 | struct drm_crtc **save_crtcs, *new_crtc; | ||
617 | struct drm_encoder **save_encoders, *new_encoder; | ||
618 | struct drm_framebuffer *old_fb; | ||
619 | bool save_enabled; | ||
620 | bool mode_changed = false; | ||
621 | bool fb_changed = false; | ||
622 | struct drm_connector *connector; | ||
623 | int count = 0, ro, fail = 0; | ||
624 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
625 | int ret = 0; | ||
626 | |||
627 | DRM_DEBUG("\n"); | ||
628 | |||
629 | if (!set) | ||
630 | return -EINVAL; | ||
631 | |||
632 | if (!set->crtc) | ||
633 | return -EINVAL; | ||
634 | |||
635 | if (!set->crtc->helper_private) | ||
636 | return -EINVAL; | ||
637 | |||
638 | crtc_funcs = set->crtc->helper_private; | ||
639 | |||
640 | DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n", | ||
641 | set->crtc, set->crtc->base.id, set->fb, set->connectors, | ||
642 | (int)set->num_connectors, set->x, set->y); | ||
643 | |||
644 | dev = set->crtc->dev; | ||
645 | |||
646 | /* save previous config */ | ||
647 | save_enabled = set->crtc->enabled; | ||
648 | |||
649 | /* | ||
650 | * We do mode_config.num_connectors here since we'll look at the | ||
651 | * CRTC and encoder associated with each connector later. | ||
652 | */ | ||
653 | save_crtcs = kzalloc(dev->mode_config.num_connector * | ||
654 | sizeof(struct drm_crtc *), GFP_KERNEL); | ||
655 | if (!save_crtcs) | ||
656 | return -ENOMEM; | ||
657 | |||
658 | save_encoders = kzalloc(dev->mode_config.num_connector * | ||
659 | sizeof(struct drm_encoders *), GFP_KERNEL); | ||
660 | if (!save_encoders) { | ||
661 | kfree(save_crtcs); | ||
662 | return -ENOMEM; | ||
663 | } | ||
664 | |||
665 | /* We should be able to check here if the fb has the same properties | ||
666 | * and then just flip_or_move it */ | ||
667 | if (set->crtc->fb != set->fb) { | ||
668 | /* If we have no fb then treat it as a full mode set */ | ||
669 | if (set->crtc->fb == NULL) | ||
670 | mode_changed = true; | ||
671 | else if ((set->fb->bits_per_pixel != | ||
672 | set->crtc->fb->bits_per_pixel) || | ||
673 | set->fb->depth != set->crtc->fb->depth) | ||
674 | fb_changed = true; | ||
675 | else | ||
676 | fb_changed = true; | ||
677 | } | ||
678 | |||
679 | if (set->x != set->crtc->x || set->y != set->crtc->y) | ||
680 | fb_changed = true; | ||
681 | |||
682 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { | ||
683 | DRM_DEBUG("modes are different\n"); | ||
684 | drm_mode_debug_printmodeline(&set->crtc->mode); | ||
685 | drm_mode_debug_printmodeline(set->mode); | ||
686 | mode_changed = true; | ||
687 | } | ||
688 | |||
689 | /* a) traverse passed in connector list and get encoders for them */ | ||
690 | count = 0; | ||
691 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
692 | struct drm_connector_helper_funcs *connector_funcs = | ||
693 | connector->helper_private; | ||
694 | save_encoders[count++] = connector->encoder; | ||
695 | new_encoder = connector->encoder; | ||
696 | for (ro = 0; ro < set->num_connectors; ro++) { | ||
697 | if (set->connectors[ro] == connector) { | ||
698 | new_encoder = connector_funcs->best_encoder(connector); | ||
699 | /* if we can't get an encoder for a connector | ||
700 | we are setting now - then fail */ | ||
701 | if (new_encoder == NULL) | ||
702 | /* don't break so fail path works correct */ | ||
703 | fail = 1; | ||
704 | break; | ||
705 | } | ||
706 | } | ||
707 | |||
708 | if (new_encoder != connector->encoder) { | ||
709 | mode_changed = true; | ||
710 | connector->encoder = new_encoder; | ||
711 | } | ||
712 | } | ||
713 | |||
714 | if (fail) { | ||
715 | ret = -EINVAL; | ||
716 | goto fail_no_encoder; | ||
717 | } | ||
718 | |||
719 | count = 0; | ||
720 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
721 | if (!connector->encoder) | ||
722 | continue; | ||
723 | |||
724 | save_crtcs[count++] = connector->encoder->crtc; | ||
725 | |||
726 | if (connector->encoder->crtc == set->crtc) | ||
727 | new_crtc = NULL; | ||
728 | else | ||
729 | new_crtc = connector->encoder->crtc; | ||
730 | |||
731 | for (ro = 0; ro < set->num_connectors; ro++) { | ||
732 | if (set->connectors[ro] == connector) | ||
733 | new_crtc = set->crtc; | ||
734 | } | ||
735 | if (new_crtc != connector->encoder->crtc) { | ||
736 | mode_changed = true; | ||
737 | connector->encoder->crtc = new_crtc; | ||
738 | } | ||
739 | } | ||
740 | |||
741 | /* mode_set_base is not a required function */ | ||
742 | if (fb_changed && !crtc_funcs->mode_set_base) | ||
743 | mode_changed = true; | ||
744 | |||
745 | if (mode_changed) { | ||
746 | old_fb = set->crtc->fb; | ||
747 | set->crtc->fb = set->fb; | ||
748 | set->crtc->enabled = (set->mode != NULL); | ||
749 | if (set->mode != NULL) { | ||
750 | DRM_DEBUG("attempting to set mode from userspace\n"); | ||
751 | drm_mode_debug_printmodeline(set->mode); | ||
752 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, | ||
753 | set->x, set->y, | ||
754 | old_fb)) { | ||
755 | ret = -EINVAL; | ||
756 | goto fail_set_mode; | ||
757 | } | ||
758 | /* TODO are these needed? */ | ||
759 | set->crtc->desired_x = set->x; | ||
760 | set->crtc->desired_y = set->y; | ||
761 | set->crtc->desired_mode = set->mode; | ||
762 | } | ||
763 | drm_helper_disable_unused_functions(dev); | ||
764 | } else if (fb_changed) { | ||
765 | old_fb = set->crtc->fb; | ||
766 | if (set->crtc->fb != set->fb) | ||
767 | set->crtc->fb = set->fb; | ||
768 | crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); | ||
769 | } | ||
770 | |||
771 | kfree(save_encoders); | ||
772 | kfree(save_crtcs); | ||
773 | return 0; | ||
774 | |||
775 | fail_set_mode: | ||
776 | set->crtc->enabled = save_enabled; | ||
777 | count = 0; | ||
778 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
779 | connector->encoder->crtc = save_crtcs[count++]; | ||
780 | fail_no_encoder: | ||
781 | kfree(save_crtcs); | ||
782 | count = 0; | ||
783 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
784 | connector->encoder = save_encoders[count++]; | ||
785 | } | ||
786 | kfree(save_encoders); | ||
787 | return ret; | ||
788 | } | ||
789 | EXPORT_SYMBOL(drm_crtc_helper_set_config); | ||
790 | |||
791 | bool drm_helper_plugged_event(struct drm_device *dev) | ||
792 | { | ||
793 | DRM_DEBUG("\n"); | ||
794 | |||
795 | drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, | ||
796 | dev->mode_config.max_height); | ||
797 | |||
798 | drm_setup_crtcs(dev); | ||
799 | |||
800 | /* alert the driver fb layer */ | ||
801 | dev->mode_config.funcs->fb_changed(dev); | ||
802 | |||
803 | /* FIXME: send hotplug event */ | ||
804 | return true; | ||
805 | } | ||
806 | /** | ||
807 | * drm_initial_config - setup a sane initial connector configuration | ||
808 | * @dev: DRM device | ||
809 | * @can_grow: this configuration is growable | ||
810 | * | ||
811 | * LOCKING: | ||
812 | * Called at init time, must take mode config lock. | ||
813 | * | ||
814 | * Scan the CRTCs and connectors and try to put together an initial setup. | ||
815 | * At the moment, this is a cloned configuration across all heads with | ||
816 | * a new framebuffer object as the backing store. | ||
817 | * | ||
818 | * RETURNS: | ||
819 | * Zero if everything went ok, nonzero otherwise. | ||
820 | */ | ||
821 | bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) | ||
822 | { | ||
823 | struct drm_connector *connector; | ||
824 | int count = 0; | ||
825 | |||
826 | count = drm_helper_probe_connector_modes(dev, | ||
827 | dev->mode_config.max_width, | ||
828 | dev->mode_config.max_height); | ||
829 | |||
830 | /* | ||
831 | * None of the available connectors had any modes, so add some | ||
832 | * and try to light them up anyway | ||
833 | */ | ||
834 | if (!count) { | ||
835 | DRM_ERROR("connectors have no modes, using standard modes\n"); | ||
836 | list_for_each_entry(connector, | ||
837 | &dev->mode_config.connector_list, | ||
838 | head) | ||
839 | drm_helper_add_std_modes(dev, connector); | ||
840 | } | ||
841 | |||
842 | drm_setup_crtcs(dev); | ||
843 | |||
844 | /* alert the driver fb layer */ | ||
845 | dev->mode_config.funcs->fb_changed(dev); | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | EXPORT_SYMBOL(drm_helper_initial_config); | ||
850 | |||
851 | /** | ||
852 | * drm_hotplug_stage_two | ||
853 | * @dev DRM device | ||
854 | * @connector hotpluged connector | ||
855 | * | ||
856 | * LOCKING. | ||
857 | * Caller must hold mode config lock, function might grab struct lock. | ||
858 | * | ||
859 | * Stage two of a hotplug. | ||
860 | * | ||
861 | * RETURNS: | ||
862 | * Zero on success, errno on failure. | ||
863 | */ | ||
864 | int drm_helper_hotplug_stage_two(struct drm_device *dev) | ||
865 | { | ||
866 | drm_helper_plugged_event(dev); | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | EXPORT_SYMBOL(drm_helper_hotplug_stage_two); | ||
871 | |||
872 | int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | ||
873 | struct drm_mode_fb_cmd *mode_cmd) | ||
874 | { | ||
875 | fb->width = mode_cmd->width; | ||
876 | fb->height = mode_cmd->height; | ||
877 | fb->pitch = mode_cmd->pitch; | ||
878 | fb->bits_per_pixel = mode_cmd->bpp; | ||
879 | fb->depth = mode_cmd->depth; | ||
880 | |||
881 | return 0; | ||
882 | } | ||
883 | EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); | ||
884 | |||
885 | int drm_helper_resume_force_mode(struct drm_device *dev) | ||
886 | { | ||
887 | struct drm_crtc *crtc; | ||
888 | int ret; | ||
889 | |||
890 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
891 | |||
892 | if (!crtc->enabled) | ||
893 | continue; | ||
894 | |||
895 | ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
896 | crtc->x, crtc->y, crtc->fb); | ||
897 | |||
898 | if (ret == false) | ||
899 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); | ||
900 | } | ||
901 | return 0; | ||
902 | } | ||
903 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 3ab1e9cc4692..14c7a23dc157 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
74 | DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 74 | DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
75 | DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), | 75 | DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), |
76 | 76 | ||
77 | DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), | ||
78 | DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), | ||
79 | |||
77 | DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), | 80 | DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), |
78 | DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 81 | DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
79 | DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 82 | DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
123 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), | 126 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), |
124 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), | 127 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), |
125 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), | 128 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), |
129 | |||
130 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
131 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
132 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
133 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
134 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), | ||
135 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), | ||
136 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
137 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), | ||
141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
142 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
126 | }; | 146 | }; |
127 | 147 | ||
128 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 148 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
138 | */ | 158 | */ |
139 | int drm_lastclose(struct drm_device * dev) | 159 | int drm_lastclose(struct drm_device * dev) |
140 | { | 160 | { |
141 | struct drm_magic_entry *pt, *next; | ||
142 | struct drm_map_list *r_list, *list_t; | ||
143 | struct drm_vma_entry *vma, *vma_temp; | 161 | struct drm_vma_entry *vma, *vma_temp; |
144 | int i; | 162 | int i; |
145 | 163 | ||
@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev) | |||
149 | dev->driver->lastclose(dev); | 167 | dev->driver->lastclose(dev); |
150 | DRM_DEBUG("driver lastclose completed\n"); | 168 | DRM_DEBUG("driver lastclose completed\n"); |
151 | 169 | ||
152 | if (dev->unique) { | 170 | if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) |
153 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); | ||
154 | dev->unique = NULL; | ||
155 | dev->unique_len = 0; | ||
156 | } | ||
157 | |||
158 | if (dev->irq_enabled) | ||
159 | drm_irq_uninstall(dev); | 171 | drm_irq_uninstall(dev); |
160 | 172 | ||
161 | mutex_lock(&dev->struct_mutex); | 173 | mutex_lock(&dev->struct_mutex); |
@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev) | |||
164 | drm_drawable_free_all(dev); | 176 | drm_drawable_free_all(dev); |
165 | del_timer(&dev->timer); | 177 | del_timer(&dev->timer); |
166 | 178 | ||
167 | /* Clear pid list */ | ||
168 | if (dev->magicfree.next) { | ||
169 | list_for_each_entry_safe(pt, next, &dev->magicfree, head) { | ||
170 | list_del(&pt->head); | ||
171 | drm_ht_remove_item(&dev->magiclist, &pt->hash_item); | ||
172 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); | ||
173 | } | ||
174 | drm_ht_remove(&dev->magiclist); | ||
175 | } | ||
176 | |||
177 | /* Clear AGP information */ | 179 | /* Clear AGP information */ |
178 | if (drm_core_has_AGP(dev) && dev->agp) { | 180 | if (drm_core_has_AGP(dev) && dev->agp && |
181 | !drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
179 | struct drm_agp_mem *entry, *tempe; | 182 | struct drm_agp_mem *entry, *tempe; |
180 | 183 | ||
181 | /* Remove AGP resources, but leave dev->agp | 184 | /* Remove AGP resources, but leave dev->agp |
@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev) | |||
194 | dev->agp->acquired = 0; | 197 | dev->agp->acquired = 0; |
195 | dev->agp->enabled = 0; | 198 | dev->agp->enabled = 0; |
196 | } | 199 | } |
197 | if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { | 200 | if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && |
201 | !drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
198 | drm_sg_cleanup(dev->sg); | 202 | drm_sg_cleanup(dev->sg); |
199 | dev->sg = NULL; | 203 | dev->sg = NULL; |
200 | } | 204 | } |
@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev) | |||
205 | drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); | 209 | drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); |
206 | } | 210 | } |
207 | 211 | ||
208 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { | ||
209 | if (!(r_list->map->flags & _DRM_DRIVER)) { | ||
210 | drm_rmmap_locked(dev, r_list->map); | ||
211 | r_list = NULL; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { | 212 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { |
216 | for (i = 0; i < dev->queue_count; i++) { | 213 | for (i = 0; i < dev->queue_count; i++) { |
217 | if (dev->queuelist[i]) { | 214 | if (dev->queuelist[i]) { |
@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev) | |||
228 | } | 225 | } |
229 | dev->queue_count = 0; | 226 | dev->queue_count = 0; |
230 | 227 | ||
231 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 228 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && |
229 | !drm_core_check_feature(dev, DRIVER_MODESET)) | ||
232 | drm_dma_takedown(dev); | 230 | drm_dma_takedown(dev); |
233 | 231 | ||
234 | if (dev->lock.hw_lock) { | 232 | dev->dev_mapping = NULL; |
235 | dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ | ||
236 | dev->lock.file_priv = NULL; | ||
237 | wake_up_interruptible(&dev->lock.lock_queue); | ||
238 | } | ||
239 | mutex_unlock(&dev->struct_mutex); | 233 | mutex_unlock(&dev->struct_mutex); |
240 | 234 | ||
241 | DRM_DEBUG("lastclose completed\n"); | 235 | DRM_DEBUG("lastclose completed\n"); |
@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver) | |||
263 | 257 | ||
264 | DRM_DEBUG("\n"); | 258 | DRM_DEBUG("\n"); |
265 | 259 | ||
260 | INIT_LIST_HEAD(&driver->device_list); | ||
261 | |||
266 | for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { | 262 | for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { |
267 | pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; | 263 | pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; |
268 | 264 | ||
@@ -298,6 +294,7 @@ EXPORT_SYMBOL(drm_init); | |||
298 | */ | 294 | */ |
299 | static void drm_cleanup(struct drm_device * dev) | 295 | static void drm_cleanup(struct drm_device * dev) |
300 | { | 296 | { |
297 | struct drm_map_list *r_list, *list_temp; | ||
301 | DRM_DEBUG("\n"); | 298 | DRM_DEBUG("\n"); |
302 | 299 | ||
303 | if (!dev) { | 300 | if (!dev) { |
@@ -305,6 +302,8 @@ static void drm_cleanup(struct drm_device * dev) | |||
305 | return; | 302 | return; |
306 | } | 303 | } |
307 | 304 | ||
305 | drm_vblank_cleanup(dev); | ||
306 | |||
308 | drm_lastclose(dev); | 307 | drm_lastclose(dev); |
309 | 308 | ||
310 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && | 309 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && |
@@ -316,46 +315,38 @@ static void drm_cleanup(struct drm_device * dev) | |||
316 | DRM_DEBUG("mtrr_del=%d\n", retval); | 315 | DRM_DEBUG("mtrr_del=%d\n", retval); |
317 | } | 316 | } |
318 | 317 | ||
318 | if (dev->driver->unload) | ||
319 | dev->driver->unload(dev); | ||
320 | |||
319 | if (drm_core_has_AGP(dev) && dev->agp) { | 321 | if (drm_core_has_AGP(dev) && dev->agp) { |
320 | drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); | 322 | drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); |
321 | dev->agp = NULL; | 323 | dev->agp = NULL; |
322 | } | 324 | } |
323 | 325 | ||
324 | if (dev->driver->unload) | ||
325 | dev->driver->unload(dev); | ||
326 | |||
327 | drm_ht_remove(&dev->map_hash); | 326 | drm_ht_remove(&dev->map_hash); |
328 | drm_ctxbitmap_cleanup(dev); | 327 | drm_ctxbitmap_cleanup(dev); |
329 | 328 | ||
330 | drm_put_minor(&dev->primary); | 329 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) |
331 | if (drm_put_dev(dev)) | 330 | drm_rmmap(dev, r_list->map); |
332 | DRM_ERROR("Cannot unload module\n"); | ||
333 | } | ||
334 | |||
335 | static int drm_minors_cleanup(int id, void *ptr, void *data) | ||
336 | { | ||
337 | struct drm_minor *minor = ptr; | ||
338 | struct drm_device *dev; | ||
339 | struct drm_driver *driver = data; | ||
340 | 331 | ||
341 | dev = minor->dev; | 332 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
342 | if (minor->dev->driver != driver) | 333 | drm_put_minor(&dev->control); |
343 | return 0; | ||
344 | 334 | ||
345 | if (minor->type != DRM_MINOR_LEGACY) | 335 | if (dev->driver->driver_features & DRIVER_GEM) |
346 | return 0; | 336 | drm_gem_destroy(dev); |
347 | 337 | ||
348 | if (dev) | 338 | drm_put_minor(&dev->primary); |
349 | pci_dev_put(dev->pdev); | 339 | if (drm_put_dev(dev)) |
350 | drm_cleanup(dev); | 340 | DRM_ERROR("Cannot unload module\n"); |
351 | return 1; | ||
352 | } | 341 | } |
353 | 342 | ||
354 | void drm_exit(struct drm_driver *driver) | 343 | void drm_exit(struct drm_driver *driver) |
355 | { | 344 | { |
345 | struct drm_device *dev, *tmp; | ||
356 | DRM_DEBUG("\n"); | 346 | DRM_DEBUG("\n"); |
357 | 347 | ||
358 | idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); | 348 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) |
349 | drm_cleanup(dev); | ||
359 | 350 | ||
360 | DRM_INFO("Module unloaded\n"); | 351 | DRM_INFO("Module unloaded\n"); |
361 | } | 352 | } |
@@ -501,7 +492,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, | |||
501 | retcode = -EINVAL; | 492 | retcode = -EINVAL; |
502 | } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || | 493 | } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || |
503 | ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || | 494 | ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || |
504 | ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { | 495 | ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) { |
505 | retcode = -EACCES; | 496 | retcode = -EACCES; |
506 | } else { | 497 | } else { |
507 | if (cmd & (IOC_IN | IOC_OUT)) { | 498 | if (cmd & (IOC_IN | IOC_OUT)) { |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c new file mode 100644 index 000000000000..5a4d3244758a --- /dev/null +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -0,0 +1,732 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Luc Verhaegen (quirks list) | ||
3 | * Copyright (c) 2007-2008 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from | ||
7 | * FB layer. | ||
8 | * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com> | ||
9 | * | ||
10 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
11 | * copy of this software and associated documentation files (the "Software"), | ||
12 | * to deal in the Software without restriction, including without limitation | ||
13 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
14 | * and/or sell copies of the Software, and to permit persons to whom the | ||
15 | * Software is furnished to do so, subject to the following conditions: | ||
16 | * | ||
17 | * The above copyright notice and this permission notice (including the | ||
18 | * next paragraph) shall be included in all copies or substantial portions | ||
19 | * of the Software. | ||
20 | * | ||
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
24 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
27 | * DEALINGS IN THE SOFTWARE. | ||
28 | */ | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/i2c.h> | ||
31 | #include <linux/i2c-algo-bit.h> | ||
32 | #include "drmP.h" | ||
33 | #include "drm_edid.h" | ||
34 | |||
35 | /* | ||
36 | * TODO: | ||
37 | * - support EDID 1.4 (incl. CE blocks) | ||
38 | */ | ||
39 | |||
40 | /* | ||
41 | * EDID blocks out in the wild have a variety of bugs, try to collect | ||
42 | * them here (note that userspace may work around broken monitors first, | ||
43 | * but fixes should make their way here so that the kernel "just works" | ||
44 | * on as many displays as possible). | ||
45 | */ | ||
46 | |||
47 | /* First detailed mode wrong, use largest 60Hz mode */ | ||
48 | #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) | ||
49 | /* Reported 135MHz pixel clock is too high, needs adjustment */ | ||
50 | #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) | ||
51 | /* Prefer the largest mode at 75 Hz */ | ||
52 | #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) | ||
53 | /* Detail timing is in cm not mm */ | ||
54 | #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) | ||
55 | /* Detailed timing descriptors have bogus size values, so just take the | ||
56 | * maximum size and use that. | ||
57 | */ | ||
58 | #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) | ||
59 | /* Monitor forgot to set the first detailed is preferred bit. */ | ||
60 | #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) | ||
61 | /* use +hsync +vsync for detailed mode */ | ||
62 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) | ||
63 | |||
64 | static struct edid_quirk { | ||
65 | char *vendor; | ||
66 | int product_id; | ||
67 | u32 quirks; | ||
68 | } edid_quirk_list[] = { | ||
69 | /* Acer AL1706 */ | ||
70 | { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, | ||
71 | /* Acer F51 */ | ||
72 | { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, | ||
73 | /* Unknown Acer */ | ||
74 | { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | ||
75 | |||
76 | /* Belinea 10 15 55 */ | ||
77 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, | ||
78 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, | ||
79 | |||
80 | /* Envision Peripherals, Inc. EN-7100e */ | ||
81 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, | ||
82 | |||
83 | /* Funai Electronics PM36B */ | ||
84 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | | ||
85 | EDID_QUIRK_DETAILED_IN_CM }, | ||
86 | |||
87 | /* LG Philips LCD LP154W01-A5 */ | ||
88 | { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, | ||
89 | { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, | ||
90 | |||
91 | /* Philips 107p5 CRT */ | ||
92 | { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | ||
93 | |||
94 | /* Proview AY765C */ | ||
95 | { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | ||
96 | |||
97 | /* Samsung SyncMaster 205BW. Note: irony */ | ||
98 | { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, | ||
99 | /* Samsung SyncMaster 22[5-6]BW */ | ||
100 | { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, | ||
101 | { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, | ||
102 | }; | ||
103 | |||
104 | |||
105 | /* Valid EDID header has these bytes */ | ||
106 | static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; | ||
107 | |||
108 | /** | ||
109 | * edid_is_valid - sanity check EDID data | ||
110 | * @edid: EDID data | ||
111 | * | ||
112 | * Sanity check the EDID block by looking at the header, the version number | ||
113 | * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's | ||
114 | * valid. | ||
115 | */ | ||
116 | static bool edid_is_valid(struct edid *edid) | ||
117 | { | ||
118 | int i; | ||
119 | u8 csum = 0; | ||
120 | u8 *raw_edid = (u8 *)edid; | ||
121 | |||
122 | if (memcmp(edid->header, edid_header, sizeof(edid_header))) | ||
123 | goto bad; | ||
124 | if (edid->version != 1) { | ||
125 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | ||
126 | goto bad; | ||
127 | } | ||
128 | if (edid->revision <= 0 || edid->revision > 3) { | ||
129 | DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); | ||
130 | goto bad; | ||
131 | } | ||
132 | |||
133 | for (i = 0; i < EDID_LENGTH; i++) | ||
134 | csum += raw_edid[i]; | ||
135 | if (csum) { | ||
136 | DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); | ||
137 | goto bad; | ||
138 | } | ||
139 | |||
140 | return 1; | ||
141 | |||
142 | bad: | ||
143 | if (raw_edid) { | ||
144 | DRM_ERROR("Raw EDID:\n"); | ||
145 | print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); | ||
146 | printk("\n"); | ||
147 | } | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * edid_vendor - match a string against EDID's obfuscated vendor field | ||
153 | * @edid: EDID to match | ||
154 | * @vendor: vendor string | ||
155 | * | ||
156 | * Returns true if @vendor is in @edid, false otherwise | ||
157 | */ | ||
158 | static bool edid_vendor(struct edid *edid, char *vendor) | ||
159 | { | ||
160 | char edid_vendor[3]; | ||
161 | |||
162 | edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; | ||
163 | edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | | ||
164 | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; | ||
165 | edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@'; | ||
166 | |||
167 | return !strncmp(edid_vendor, vendor, 3); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * edid_get_quirks - return quirk flags for a given EDID | ||
172 | * @edid: EDID to process | ||
173 | * | ||
174 | * This tells subsequent routines what fixes they need to apply. | ||
175 | */ | ||
176 | static u32 edid_get_quirks(struct edid *edid) | ||
177 | { | ||
178 | struct edid_quirk *quirk; | ||
179 | int i; | ||
180 | |||
181 | for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { | ||
182 | quirk = &edid_quirk_list[i]; | ||
183 | |||
184 | if (edid_vendor(edid, quirk->vendor) && | ||
185 | (EDID_PRODUCT_ID(edid) == quirk->product_id)) | ||
186 | return quirk->quirks; | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) | ||
193 | #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) | ||
194 | |||
195 | |||
196 | /** | ||
197 | * edid_fixup_preferred - set preferred modes based on quirk list | ||
198 | * @connector: has mode list to fix up | ||
199 | * @quirks: quirks list | ||
200 | * | ||
201 | * Walk the mode list for @connector, clearing the preferred status | ||
202 | * on existing modes and setting it anew for the right mode ala @quirks. | ||
203 | */ | ||
204 | static void edid_fixup_preferred(struct drm_connector *connector, | ||
205 | u32 quirks) | ||
206 | { | ||
207 | struct drm_display_mode *t, *cur_mode, *preferred_mode; | ||
208 | int target_refresh = 0; | ||
209 | |||
210 | if (list_empty(&connector->probed_modes)) | ||
211 | return; | ||
212 | |||
213 | if (quirks & EDID_QUIRK_PREFER_LARGE_60) | ||
214 | target_refresh = 60; | ||
215 | if (quirks & EDID_QUIRK_PREFER_LARGE_75) | ||
216 | target_refresh = 75; | ||
217 | |||
218 | preferred_mode = list_first_entry(&connector->probed_modes, | ||
219 | struct drm_display_mode, head); | ||
220 | |||
221 | list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { | ||
222 | cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; | ||
223 | |||
224 | if (cur_mode == preferred_mode) | ||
225 | continue; | ||
226 | |||
227 | /* Largest mode is preferred */ | ||
228 | if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) | ||
229 | preferred_mode = cur_mode; | ||
230 | |||
231 | /* At a given size, try to get closest to target refresh */ | ||
232 | if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && | ||
233 | MODE_REFRESH_DIFF(cur_mode, target_refresh) < | ||
234 | MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { | ||
235 | preferred_mode = cur_mode; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode | ||
244 | * @t: standard timing params | ||
245 | * | ||
246 | * Take the standard timing params (in this case width, aspect, and refresh) | ||
247 | * and convert them into a real mode using CVT. | ||
248 | * | ||
249 | * Punts for now, but should eventually use the FB layer's CVT based mode | ||
250 | * generation code. | ||
251 | */ | ||
252 | struct drm_display_mode *drm_mode_std(struct drm_device *dev, | ||
253 | struct std_timing *t) | ||
254 | { | ||
255 | struct drm_display_mode *mode; | ||
256 | int hsize = t->hsize * 8 + 248, vsize; | ||
257 | |||
258 | mode = drm_mode_create(dev); | ||
259 | if (!mode) | ||
260 | return NULL; | ||
261 | |||
262 | if (t->aspect_ratio == 0) | ||
263 | vsize = (hsize * 10) / 16; | ||
264 | else if (t->aspect_ratio == 1) | ||
265 | vsize = (hsize * 3) / 4; | ||
266 | else if (t->aspect_ratio == 2) | ||
267 | vsize = (hsize * 4) / 5; | ||
268 | else | ||
269 | vsize = (hsize * 9) / 16; | ||
270 | |||
271 | drm_mode_set_name(mode); | ||
272 | |||
273 | return mode; | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | ||
278 | * @dev: DRM device (needed to create new mode) | ||
279 | * @edid: EDID block | ||
280 | * @timing: EDID detailed timing info | ||
281 | * @quirks: quirks to apply | ||
282 | * | ||
283 | * An EDID detailed timing block contains enough info for us to create and | ||
284 | * return a new struct drm_display_mode. | ||
285 | */ | ||
286 | static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | ||
287 | struct edid *edid, | ||
288 | struct detailed_timing *timing, | ||
289 | u32 quirks) | ||
290 | { | ||
291 | struct drm_display_mode *mode; | ||
292 | struct detailed_pixel_timing *pt = &timing->data.pixel_data; | ||
293 | |||
294 | if (pt->stereo) { | ||
295 | printk(KERN_WARNING "stereo mode not supported\n"); | ||
296 | return NULL; | ||
297 | } | ||
298 | if (!pt->separate_sync) { | ||
299 | printk(KERN_WARNING "integrated sync not supported\n"); | ||
300 | return NULL; | ||
301 | } | ||
302 | |||
303 | mode = drm_mode_create(dev); | ||
304 | if (!mode) | ||
305 | return NULL; | ||
306 | |||
307 | mode->type = DRM_MODE_TYPE_DRIVER; | ||
308 | |||
309 | if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) | ||
310 | timing->pixel_clock = 1088; | ||
311 | |||
312 | mode->clock = timing->pixel_clock * 10; | ||
313 | |||
314 | mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo; | ||
315 | mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) | | ||
316 | pt->hsync_offset_lo); | ||
317 | mode->hsync_end = mode->hsync_start + | ||
318 | ((pt->hsync_pulse_width_hi << 8) | | ||
319 | pt->hsync_pulse_width_lo); | ||
320 | mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); | ||
321 | |||
322 | mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; | ||
323 | mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) | | ||
324 | pt->vsync_offset_lo); | ||
325 | mode->vsync_end = mode->vsync_start + | ||
326 | ((pt->vsync_pulse_width_hi << 8) | | ||
327 | pt->vsync_pulse_width_lo); | ||
328 | mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); | ||
329 | |||
330 | drm_mode_set_name(mode); | ||
331 | |||
332 | if (pt->interlaced) | ||
333 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
334 | |||
335 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | ||
336 | pt->hsync_positive = 1; | ||
337 | pt->vsync_positive = 1; | ||
338 | } | ||
339 | |||
340 | mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; | ||
341 | mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; | ||
342 | |||
343 | mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8); | ||
344 | mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8); | ||
345 | |||
346 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { | ||
347 | mode->width_mm *= 10; | ||
348 | mode->height_mm *= 10; | ||
349 | } | ||
350 | |||
351 | if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { | ||
352 | mode->width_mm = edid->width_cm * 10; | ||
353 | mode->height_mm = edid->height_cm * 10; | ||
354 | } | ||
355 | |||
356 | return mode; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Detailed mode info for the EDID "established modes" data to use. | ||
361 | */ | ||
362 | static struct drm_display_mode edid_est_modes[] = { | ||
363 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | ||
364 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
365 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ | ||
366 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, | ||
367 | 896, 1024, 0, 600, 601, 603, 625, 0, | ||
368 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ | ||
369 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, | ||
370 | 720, 840, 0, 480, 481, 484, 500, 0, | ||
371 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ | ||
372 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, | ||
373 | 704, 832, 0, 480, 489, 491, 520, 0, | ||
374 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ | ||
375 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, | ||
376 | 768, 864, 0, 480, 483, 486, 525, 0, | ||
377 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ | ||
378 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, | ||
379 | 752, 800, 0, 480, 490, 492, 525, 0, | ||
380 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ | ||
381 | { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, | ||
382 | 846, 900, 0, 400, 421, 423, 449, 0, | ||
383 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ | ||
384 | { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, | ||
385 | 846, 900, 0, 400, 412, 414, 449, 0, | ||
386 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ | ||
387 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, | ||
388 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
389 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ | ||
390 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, | ||
391 | 1136, 1312, 0, 768, 769, 772, 800, 0, | ||
392 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ | ||
393 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, | ||
394 | 1184, 1328, 0, 768, 771, 777, 806, 0, | ||
395 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ | ||
396 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
397 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
398 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ | ||
399 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, | ||
400 | 1208, 1264, 0, 768, 768, 776, 817, 0, | ||
401 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ | ||
402 | { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, | ||
403 | 928, 1152, 0, 624, 625, 628, 667, 0, | ||
404 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ | ||
405 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, | ||
406 | 896, 1056, 0, 600, 601, 604, 625, 0, | ||
407 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ | ||
408 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, | ||
409 | 976, 1040, 0, 600, 637, 643, 666, 0, | ||
410 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ | ||
411 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
412 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
413 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ | ||
414 | }; | ||
415 | |||
416 | #define EDID_EST_TIMINGS 16 | ||
417 | #define EDID_STD_TIMINGS 8 | ||
418 | #define EDID_DETAILED_TIMINGS 4 | ||
419 | |||
420 | /** | ||
421 | * add_established_modes - get est. modes from EDID and add them | ||
422 | * @edid: EDID block to scan | ||
423 | * | ||
424 | * Each EDID block contains a bitmap of the supported "established modes" list | ||
425 | * (defined above). Tease them out and add them to the global modes list. | ||
426 | */ | ||
427 | static int add_established_modes(struct drm_connector *connector, struct edid *edid) | ||
428 | { | ||
429 | struct drm_device *dev = connector->dev; | ||
430 | unsigned long est_bits = edid->established_timings.t1 | | ||
431 | (edid->established_timings.t2 << 8) | | ||
432 | ((edid->established_timings.mfg_rsvd & 0x80) << 9); | ||
433 | int i, modes = 0; | ||
434 | |||
435 | for (i = 0; i <= EDID_EST_TIMINGS; i++) | ||
436 | if (est_bits & (1<<i)) { | ||
437 | struct drm_display_mode *newmode; | ||
438 | newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); | ||
439 | if (newmode) { | ||
440 | drm_mode_probed_add(connector, newmode); | ||
441 | modes++; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | return modes; | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * add_standard_modes - get std. modes from EDID and add them | ||
450 | * @edid: EDID block to scan | ||
451 | * | ||
452 | * Standard modes can be calculated using the CVT standard. Grab them from | ||
453 | * @edid, calculate them, and add them to the list. | ||
454 | */ | ||
455 | static int add_standard_modes(struct drm_connector *connector, struct edid *edid) | ||
456 | { | ||
457 | struct drm_device *dev = connector->dev; | ||
458 | int i, modes = 0; | ||
459 | |||
460 | for (i = 0; i < EDID_STD_TIMINGS; i++) { | ||
461 | struct std_timing *t = &edid->standard_timings[i]; | ||
462 | struct drm_display_mode *newmode; | ||
463 | |||
464 | /* If std timings bytes are 1, 1 it's empty */ | ||
465 | if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1) | ||
466 | continue; | ||
467 | |||
468 | newmode = drm_mode_std(dev, &edid->standard_timings[i]); | ||
469 | if (newmode) { | ||
470 | drm_mode_probed_add(connector, newmode); | ||
471 | modes++; | ||
472 | } | ||
473 | } | ||
474 | |||
475 | return modes; | ||
476 | } | ||
477 | |||
478 | /** | ||
479 | * add_detailed_modes - get detailed mode info from EDID data | ||
480 | * @connector: attached connector | ||
481 | * @edid: EDID block to scan | ||
482 | * @quirks: quirks to apply | ||
483 | * | ||
484 | * Some of the detailed timing sections may contain mode information. Grab | ||
485 | * it and add it to the list. | ||
486 | */ | ||
487 | static int add_detailed_info(struct drm_connector *connector, | ||
488 | struct edid *edid, u32 quirks) | ||
489 | { | ||
490 | struct drm_device *dev = connector->dev; | ||
491 | int i, j, modes = 0; | ||
492 | |||
493 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { | ||
494 | struct detailed_timing *timing = &edid->detailed_timings[i]; | ||
495 | struct detailed_non_pixel *data = &timing->data.other_data; | ||
496 | struct drm_display_mode *newmode; | ||
497 | |||
498 | /* EDID up to and including 1.2 may put monitor info here */ | ||
499 | if (edid->version == 1 && edid->revision < 3) | ||
500 | continue; | ||
501 | |||
502 | /* Detailed mode timing */ | ||
503 | if (timing->pixel_clock) { | ||
504 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
505 | if (!newmode) | ||
506 | continue; | ||
507 | |||
508 | /* First detailed mode is preferred */ | ||
509 | if (i == 0 && edid->preferred_timing) | ||
510 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | ||
511 | drm_mode_probed_add(connector, newmode); | ||
512 | |||
513 | modes++; | ||
514 | continue; | ||
515 | } | ||
516 | |||
517 | /* Other timing or info */ | ||
518 | switch (data->type) { | ||
519 | case EDID_DETAIL_MONITOR_SERIAL: | ||
520 | break; | ||
521 | case EDID_DETAIL_MONITOR_STRING: | ||
522 | break; | ||
523 | case EDID_DETAIL_MONITOR_RANGE: | ||
524 | /* Get monitor range data */ | ||
525 | break; | ||
526 | case EDID_DETAIL_MONITOR_NAME: | ||
527 | break; | ||
528 | case EDID_DETAIL_MONITOR_CPDATA: | ||
529 | break; | ||
530 | case EDID_DETAIL_STD_MODES: | ||
531 | /* Five modes per detailed section */ | ||
532 | for (j = 0; j < 5; i++) { | ||
533 | struct std_timing *std; | ||
534 | struct drm_display_mode *newmode; | ||
535 | |||
536 | std = &data->data.timings[j]; | ||
537 | newmode = drm_mode_std(dev, std); | ||
538 | if (newmode) { | ||
539 | drm_mode_probed_add(connector, newmode); | ||
540 | modes++; | ||
541 | } | ||
542 | } | ||
543 | break; | ||
544 | default: | ||
545 | break; | ||
546 | } | ||
547 | } | ||
548 | |||
549 | return modes; | ||
550 | } | ||
551 | |||
552 | #define DDC_ADDR 0x50 | ||
553 | |||
554 | unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter) | ||
555 | { | ||
556 | unsigned char start = 0x0; | ||
557 | unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
558 | struct i2c_msg msgs[] = { | ||
559 | { | ||
560 | .addr = DDC_ADDR, | ||
561 | .flags = 0, | ||
562 | .len = 1, | ||
563 | .buf = &start, | ||
564 | }, { | ||
565 | .addr = DDC_ADDR, | ||
566 | .flags = I2C_M_RD, | ||
567 | .len = EDID_LENGTH, | ||
568 | .buf = buf, | ||
569 | } | ||
570 | }; | ||
571 | |||
572 | if (!buf) { | ||
573 | dev_warn(&adapter->dev, "unable to allocate memory for EDID " | ||
574 | "block.\n"); | ||
575 | return NULL; | ||
576 | } | ||
577 | |||
578 | if (i2c_transfer(adapter, msgs, 2) == 2) | ||
579 | return buf; | ||
580 | |||
581 | dev_info(&adapter->dev, "unable to read EDID block.\n"); | ||
582 | kfree(buf); | ||
583 | return NULL; | ||
584 | } | ||
585 | EXPORT_SYMBOL(drm_do_probe_ddc_edid); | ||
586 | |||
587 | static unsigned char *drm_ddc_read(struct i2c_adapter *adapter) | ||
588 | { | ||
589 | struct i2c_algo_bit_data *algo_data = adapter->algo_data; | ||
590 | unsigned char *edid = NULL; | ||
591 | int i, j; | ||
592 | |||
593 | algo_data->setscl(algo_data->data, 1); | ||
594 | |||
595 | for (i = 0; i < 1; i++) { | ||
596 | /* For some old monitors we need the | ||
597 | * following process to initialize/stop DDC | ||
598 | */ | ||
599 | algo_data->setsda(algo_data->data, 1); | ||
600 | msleep(13); | ||
601 | |||
602 | algo_data->setscl(algo_data->data, 1); | ||
603 | for (j = 0; j < 5; j++) { | ||
604 | msleep(10); | ||
605 | if (algo_data->getscl(algo_data->data)) | ||
606 | break; | ||
607 | } | ||
608 | if (j == 5) | ||
609 | continue; | ||
610 | |||
611 | algo_data->setsda(algo_data->data, 0); | ||
612 | msleep(15); | ||
613 | algo_data->setscl(algo_data->data, 0); | ||
614 | msleep(15); | ||
615 | algo_data->setsda(algo_data->data, 1); | ||
616 | msleep(15); | ||
617 | |||
618 | /* Do the real work */ | ||
619 | edid = drm_do_probe_ddc_edid(adapter); | ||
620 | algo_data->setsda(algo_data->data, 0); | ||
621 | algo_data->setscl(algo_data->data, 0); | ||
622 | msleep(15); | ||
623 | |||
624 | algo_data->setscl(algo_data->data, 1); | ||
625 | for (j = 0; j < 10; j++) { | ||
626 | msleep(10); | ||
627 | if (algo_data->getscl(algo_data->data)) | ||
628 | break; | ||
629 | } | ||
630 | |||
631 | algo_data->setsda(algo_data->data, 1); | ||
632 | msleep(15); | ||
633 | algo_data->setscl(algo_data->data, 0); | ||
634 | algo_data->setsda(algo_data->data, 0); | ||
635 | if (edid) | ||
636 | break; | ||
637 | } | ||
638 | /* Release the DDC lines when done or the Apple Cinema HD display | ||
639 | * will switch off | ||
640 | */ | ||
641 | algo_data->setsda(algo_data->data, 1); | ||
642 | algo_data->setscl(algo_data->data, 1); | ||
643 | |||
644 | return edid; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * drm_get_edid - get EDID data, if available | ||
649 | * @connector: connector we're probing | ||
650 | * @adapter: i2c adapter to use for DDC | ||
651 | * | ||
652 | * Poke the given connector's i2c channel to grab EDID data if possible. | ||
653 | * | ||
654 | * Return edid data or NULL if we couldn't find any. | ||
655 | */ | ||
656 | struct edid *drm_get_edid(struct drm_connector *connector, | ||
657 | struct i2c_adapter *adapter) | ||
658 | { | ||
659 | struct edid *edid; | ||
660 | |||
661 | edid = (struct edid *)drm_ddc_read(adapter); | ||
662 | if (!edid) { | ||
663 | dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", | ||
664 | drm_get_connector_name(connector)); | ||
665 | return NULL; | ||
666 | } | ||
667 | if (!edid_is_valid(edid)) { | ||
668 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
669 | drm_get_connector_name(connector)); | ||
670 | kfree(edid); | ||
671 | return NULL; | ||
672 | } | ||
673 | |||
674 | connector->display_info.raw_edid = (char *)edid; | ||
675 | |||
676 | return edid; | ||
677 | } | ||
678 | EXPORT_SYMBOL(drm_get_edid); | ||
679 | |||
680 | /** | ||
681 | * drm_add_edid_modes - add modes from EDID data, if available | ||
682 | * @connector: connector we're probing | ||
683 | * @edid: edid data | ||
684 | * | ||
685 | * Add the specified modes to the connector's mode list. | ||
686 | * | ||
687 | * Return number of modes added or 0 if we couldn't find any. | ||
688 | */ | ||
689 | int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) | ||
690 | { | ||
691 | int num_modes = 0; | ||
692 | u32 quirks; | ||
693 | |||
694 | if (edid == NULL) { | ||
695 | return 0; | ||
696 | } | ||
697 | if (!edid_is_valid(edid)) { | ||
698 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
699 | drm_get_connector_name(connector)); | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | quirks = edid_get_quirks(edid); | ||
704 | |||
705 | num_modes += add_established_modes(connector, edid); | ||
706 | num_modes += add_standard_modes(connector, edid); | ||
707 | num_modes += add_detailed_info(connector, edid, quirks); | ||
708 | |||
709 | if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) | ||
710 | edid_fixup_preferred(connector, quirks); | ||
711 | |||
712 | connector->display_info.serration_vsync = edid->serration_vsync; | ||
713 | connector->display_info.sync_on_green = edid->sync_on_green; | ||
714 | connector->display_info.composite_sync = edid->composite_sync; | ||
715 | connector->display_info.separate_syncs = edid->separate_syncs; | ||
716 | connector->display_info.blank_to_black = edid->blank_to_black; | ||
717 | connector->display_info.video_level = edid->video_level; | ||
718 | connector->display_info.digital = edid->digital; | ||
719 | connector->display_info.width_mm = edid->width_cm * 10; | ||
720 | connector->display_info.height_mm = edid->height_cm * 10; | ||
721 | connector->display_info.gamma = edid->gamma; | ||
722 | connector->display_info.gtf_supported = edid->default_gtf; | ||
723 | connector->display_info.standard_color = edid->standard_color; | ||
724 | connector->display_info.display_type = edid->display_type; | ||
725 | connector->display_info.active_off_supported = edid->pm_active_off; | ||
726 | connector->display_info.suspend_supported = edid->pm_suspend; | ||
727 | connector->display_info.standby_supported = edid->pm_standby; | ||
728 | connector->display_info.gamma = edid->gamma; | ||
729 | |||
730 | return num_modes; | ||
731 | } | ||
732 | EXPORT_SYMBOL(drm_add_edid_modes); | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 78eeed5caaff..b06a53715853 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -35,7 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "drmP.h" | 37 | #include "drmP.h" |
38 | #include "drm_sarea.h" | ||
39 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
40 | #include <linux/smp_lock.h> | 39 | #include <linux/smp_lock.h> |
41 | 40 | ||
@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
44 | 43 | ||
45 | static int drm_setup(struct drm_device * dev) | 44 | static int drm_setup(struct drm_device * dev) |
46 | { | 45 | { |
47 | drm_local_map_t *map; | ||
48 | int i; | 46 | int i; |
49 | int ret; | 47 | int ret; |
50 | u32 sareapage; | ||
51 | 48 | ||
52 | if (dev->driver->firstopen) { | 49 | if (dev->driver->firstopen) { |
53 | ret = dev->driver->firstopen(dev); | 50 | ret = dev->driver->firstopen(dev); |
@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev) | |||
55 | return ret; | 52 | return ret; |
56 | } | 53 | } |
57 | 54 | ||
58 | dev->magicfree.next = NULL; | ||
59 | |||
60 | /* prebuild the SAREA */ | ||
61 | sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); | ||
62 | i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); | ||
63 | if (i != 0) | ||
64 | return i; | ||
65 | |||
66 | atomic_set(&dev->ioctl_count, 0); | 55 | atomic_set(&dev->ioctl_count, 0); |
67 | atomic_set(&dev->vma_count, 0); | 56 | atomic_set(&dev->vma_count, 0); |
68 | dev->buf_use = 0; | ||
69 | atomic_set(&dev->buf_alloc, 0); | ||
70 | 57 | ||
71 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { | 58 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && |
59 | !drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
60 | dev->buf_use = 0; | ||
61 | atomic_set(&dev->buf_alloc, 0); | ||
62 | |||
72 | i = drm_dma_setup(dev); | 63 | i = drm_dma_setup(dev); |
73 | if (i < 0) | 64 | if (i < 0) |
74 | return i; | 65 | return i; |
@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev) | |||
77 | for (i = 0; i < ARRAY_SIZE(dev->counts); i++) | 68 | for (i = 0; i < ARRAY_SIZE(dev->counts); i++) |
78 | atomic_set(&dev->counts[i], 0); | 69 | atomic_set(&dev->counts[i], 0); |
79 | 70 | ||
80 | drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); | ||
81 | INIT_LIST_HEAD(&dev->magicfree); | ||
82 | |||
83 | dev->sigdata.lock = NULL; | 71 | dev->sigdata.lock = NULL; |
84 | init_waitqueue_head(&dev->lock.lock_queue); | 72 | |
85 | dev->queue_count = 0; | 73 | dev->queue_count = 0; |
86 | dev->queue_reserved = 0; | 74 | dev->queue_reserved = 0; |
87 | dev->queue_slots = 0; | 75 | dev->queue_slots = 0; |
88 | dev->queuelist = NULL; | 76 | dev->queuelist = NULL; |
89 | dev->irq_enabled = 0; | ||
90 | dev->context_flag = 0; | 77 | dev->context_flag = 0; |
91 | dev->interrupt_flag = 0; | 78 | dev->interrupt_flag = 0; |
92 | dev->dma_flag = 0; | 79 | dev->dma_flag = 0; |
@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp) | |||
147 | spin_lock(&dev->count_lock); | 134 | spin_lock(&dev->count_lock); |
148 | if (!dev->open_count++) { | 135 | if (!dev->open_count++) { |
149 | spin_unlock(&dev->count_lock); | 136 | spin_unlock(&dev->count_lock); |
150 | return drm_setup(dev); | 137 | retcode = drm_setup(dev); |
138 | goto out; | ||
151 | } | 139 | } |
152 | spin_unlock(&dev->count_lock); | 140 | spin_unlock(&dev->count_lock); |
153 | } | 141 | } |
142 | out: | ||
143 | mutex_lock(&dev->struct_mutex); | ||
144 | if (minor->type == DRM_MINOR_LEGACY) { | ||
145 | BUG_ON((dev->dev_mapping != NULL) && | ||
146 | (dev->dev_mapping != inode->i_mapping)); | ||
147 | if (dev->dev_mapping == NULL) | ||
148 | dev->dev_mapping = inode->i_mapping; | ||
149 | } | ||
150 | mutex_unlock(&dev->struct_mutex); | ||
154 | 151 | ||
155 | return retcode; | 152 | return retcode; |
156 | } | 153 | } |
@@ -186,6 +183,10 @@ int drm_stub_open(struct inode *inode, struct file *filp) | |||
186 | 183 | ||
187 | old_fops = filp->f_op; | 184 | old_fops = filp->f_op; |
188 | filp->f_op = fops_get(&dev->driver->fops); | 185 | filp->f_op = fops_get(&dev->driver->fops); |
186 | if (filp->f_op == NULL) { | ||
187 | filp->f_op = old_fops; | ||
188 | goto out; | ||
189 | } | ||
189 | if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { | 190 | if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { |
190 | fops_put(filp->f_op); | 191 | fops_put(filp->f_op); |
191 | filp->f_op = fops_get(old_fops); | 192 | filp->f_op = fops_get(old_fops); |
@@ -255,6 +256,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
255 | priv->lock_count = 0; | 256 | priv->lock_count = 0; |
256 | 257 | ||
257 | INIT_LIST_HEAD(&priv->lhead); | 258 | INIT_LIST_HEAD(&priv->lhead); |
259 | INIT_LIST_HEAD(&priv->fbs); | ||
258 | 260 | ||
259 | if (dev->driver->driver_features & DRIVER_GEM) | 261 | if (dev->driver->driver_features & DRIVER_GEM) |
260 | drm_gem_open(dev, priv); | 262 | drm_gem_open(dev, priv); |
@@ -265,10 +267,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
265 | goto out_free; | 267 | goto out_free; |
266 | } | 268 | } |
267 | 269 | ||
270 | |||
271 | /* if there is no current master make this fd it */ | ||
268 | mutex_lock(&dev->struct_mutex); | 272 | mutex_lock(&dev->struct_mutex); |
269 | if (list_empty(&dev->filelist)) | 273 | if (!priv->minor->master) { |
270 | priv->master = 1; | 274 | /* create a new master */ |
275 | priv->minor->master = drm_master_create(priv->minor); | ||
276 | if (!priv->minor->master) { | ||
277 | ret = -ENOMEM; | ||
278 | goto out_free; | ||
279 | } | ||
280 | |||
281 | priv->is_master = 1; | ||
282 | /* take another reference for the copy in the local file priv */ | ||
283 | priv->master = drm_master_get(priv->minor->master); | ||
284 | |||
285 | priv->authenticated = 1; | ||
286 | |||
287 | mutex_unlock(&dev->struct_mutex); | ||
288 | if (dev->driver->master_create) { | ||
289 | ret = dev->driver->master_create(dev, priv->master); | ||
290 | if (ret) { | ||
291 | mutex_lock(&dev->struct_mutex); | ||
292 | /* drop both references if this fails */ | ||
293 | drm_master_put(&priv->minor->master); | ||
294 | drm_master_put(&priv->master); | ||
295 | mutex_unlock(&dev->struct_mutex); | ||
296 | goto out_free; | ||
297 | } | ||
298 | } | ||
299 | } else { | ||
300 | /* get a reference to the master */ | ||
301 | priv->master = drm_master_get(priv->minor->master); | ||
302 | mutex_unlock(&dev->struct_mutex); | ||
303 | } | ||
271 | 304 | ||
305 | mutex_lock(&dev->struct_mutex); | ||
272 | list_add(&priv->lhead, &dev->filelist); | 306 | list_add(&priv->lhead, &dev->filelist); |
273 | mutex_unlock(&dev->struct_mutex); | 307 | mutex_unlock(&dev->struct_mutex); |
274 | 308 | ||
@@ -314,6 +348,74 @@ int drm_fasync(int fd, struct file *filp, int on) | |||
314 | } | 348 | } |
315 | EXPORT_SYMBOL(drm_fasync); | 349 | EXPORT_SYMBOL(drm_fasync); |
316 | 350 | ||
351 | /* | ||
352 | * Reclaim locked buffers; note that this may be a bad idea if the current | ||
353 | * context doesn't have the hw lock... | ||
354 | */ | ||
355 | static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) | ||
356 | { | ||
357 | struct drm_file *file_priv = f->private_data; | ||
358 | |||
359 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
360 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
361 | } else { | ||
362 | unsigned long _end = jiffies + 3 * DRM_HZ; | ||
363 | int locked = 0; | ||
364 | |||
365 | drm_idlelock_take(&file_priv->master->lock); | ||
366 | |||
367 | /* | ||
368 | * Wait for a while. | ||
369 | */ | ||
370 | do { | ||
371 | spin_lock_bh(&file_priv->master->lock.spinlock); | ||
372 | locked = file_priv->master->lock.idle_has_lock; | ||
373 | spin_unlock_bh(&file_priv->master->lock.spinlock); | ||
374 | if (locked) | ||
375 | break; | ||
376 | schedule(); | ||
377 | } while (!time_after_eq(jiffies, _end)); | ||
378 | |||
379 | if (!locked) { | ||
380 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
381 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
382 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
383 | } | ||
384 | |||
385 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
386 | drm_idlelock_release(&file_priv->master->lock); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static void drm_master_release(struct drm_device *dev, struct file *filp) | ||
391 | { | ||
392 | struct drm_file *file_priv = filp->private_data; | ||
393 | |||
394 | if (dev->driver->reclaim_buffers_locked && | ||
395 | file_priv->master->lock.hw_lock) | ||
396 | drm_reclaim_locked_buffers(dev, filp); | ||
397 | |||
398 | if (dev->driver->reclaim_buffers_idlelocked && | ||
399 | file_priv->master->lock.hw_lock) { | ||
400 | drm_idlelock_take(&file_priv->master->lock); | ||
401 | dev->driver->reclaim_buffers_idlelocked(dev, file_priv); | ||
402 | drm_idlelock_release(&file_priv->master->lock); | ||
403 | } | ||
404 | |||
405 | |||
406 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
407 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
408 | filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||
409 | drm_lock_free(&file_priv->master->lock, | ||
410 | _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||
411 | } | ||
412 | |||
413 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||
414 | !dev->driver->reclaim_buffers_locked) { | ||
415 | dev->driver->reclaim_buffers(dev, file_priv); | ||
416 | } | ||
417 | } | ||
418 | |||
317 | /** | 419 | /** |
318 | * Release file. | 420 | * Release file. |
319 | * | 421 | * |
@@ -348,60 +450,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
348 | (long)old_encode_dev(file_priv->minor->device), | 450 | (long)old_encode_dev(file_priv->minor->device), |
349 | dev->open_count); | 451 | dev->open_count); |
350 | 452 | ||
351 | if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { | 453 | /* if the master has gone away we can't do anything with the lock */ |
352 | if (drm_i_have_hw_lock(dev, file_priv)) { | 454 | if (file_priv->minor->master) |
353 | dev->driver->reclaim_buffers_locked(dev, file_priv); | 455 | drm_master_release(dev, filp); |
354 | } else { | ||
355 | unsigned long endtime = jiffies + 3 * DRM_HZ; | ||
356 | int locked = 0; | ||
357 | |||
358 | drm_idlelock_take(&dev->lock); | ||
359 | |||
360 | /* | ||
361 | * Wait for a while. | ||
362 | */ | ||
363 | |||
364 | do{ | ||
365 | spin_lock_bh(&dev->lock.spinlock); | ||
366 | locked = dev->lock.idle_has_lock; | ||
367 | spin_unlock_bh(&dev->lock.spinlock); | ||
368 | if (locked) | ||
369 | break; | ||
370 | schedule(); | ||
371 | } while (!time_after_eq(jiffies, endtime)); | ||
372 | |||
373 | if (!locked) { | ||
374 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
375 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
376 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
377 | } | ||
378 | |||
379 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
380 | drm_idlelock_release(&dev->lock); | ||
381 | } | ||
382 | } | ||
383 | |||
384 | if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { | ||
385 | |||
386 | drm_idlelock_take(&dev->lock); | ||
387 | dev->driver->reclaim_buffers_idlelocked(dev, file_priv); | ||
388 | drm_idlelock_release(&dev->lock); | ||
389 | |||
390 | } | ||
391 | |||
392 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
393 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
394 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
395 | |||
396 | drm_lock_free(&dev->lock, | ||
397 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
398 | } | ||
399 | |||
400 | |||
401 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||
402 | !dev->driver->reclaim_buffers_locked) { | ||
403 | dev->driver->reclaim_buffers(dev, file_priv); | ||
404 | } | ||
405 | 456 | ||
406 | if (dev->driver->driver_features & DRIVER_GEM) | 457 | if (dev->driver->driver_features & DRIVER_GEM) |
407 | drm_gem_release(dev, file_priv); | 458 | drm_gem_release(dev, file_priv); |
@@ -428,12 +479,24 @@ int drm_release(struct inode *inode, struct file *filp) | |||
428 | mutex_unlock(&dev->ctxlist_mutex); | 479 | mutex_unlock(&dev->ctxlist_mutex); |
429 | 480 | ||
430 | mutex_lock(&dev->struct_mutex); | 481 | mutex_lock(&dev->struct_mutex); |
431 | if (file_priv->remove_auth_on_close == 1) { | 482 | |
483 | if (file_priv->is_master) { | ||
432 | struct drm_file *temp; | 484 | struct drm_file *temp; |
485 | list_for_each_entry(temp, &dev->filelist, lhead) { | ||
486 | if ((temp->master == file_priv->master) && | ||
487 | (temp != file_priv)) | ||
488 | temp->authenticated = 0; | ||
489 | } | ||
433 | 490 | ||
434 | list_for_each_entry(temp, &dev->filelist, lhead) | 491 | if (file_priv->minor->master == file_priv->master) { |
435 | temp->authenticated = 0; | 492 | /* drop the reference held my the minor */ |
493 | drm_master_put(&file_priv->minor->master); | ||
494 | } | ||
436 | } | 495 | } |
496 | |||
497 | /* drop the reference held my the file priv */ | ||
498 | drm_master_put(&file_priv->master); | ||
499 | file_priv->is_master = 0; | ||
437 | list_del(&file_priv->lhead); | 500 | list_del(&file_priv->lhead); |
438 | mutex_unlock(&dev->struct_mutex); | 501 | mutex_unlock(&dev->struct_mutex); |
439 | 502 | ||
@@ -448,9 +511,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
448 | atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); | 511 | atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); |
449 | spin_lock(&dev->count_lock); | 512 | spin_lock(&dev->count_lock); |
450 | if (!--dev->open_count) { | 513 | if (!--dev->open_count) { |
451 | if (atomic_read(&dev->ioctl_count) || dev->blocked) { | 514 | if (atomic_read(&dev->ioctl_count)) { |
452 | DRM_ERROR("Device busy: %d %d\n", | 515 | DRM_ERROR("Device busy: %d\n", |
453 | atomic_read(&dev->ioctl_count), dev->blocked); | 516 | atomic_read(&dev->ioctl_count)); |
454 | spin_unlock(&dev->count_lock); | 517 | spin_unlock(&dev->count_lock); |
455 | unlock_kernel(); | 518 | unlock_kernel(); |
456 | return -EBUSY; | 519 | return -EBUSY; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ccd1afdede02..6915fb82d0b0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -64,6 +64,13 @@ | |||
64 | * up at a later date, and as our interface with shmfs for memory allocation. | 64 | * up at a later date, and as our interface with shmfs for memory allocation. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | /* | ||
68 | * We make up offsets for buffer objects so we can recognize them at | ||
69 | * mmap time. | ||
70 | */ | ||
71 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) | ||
72 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) | ||
73 | |||
67 | /** | 74 | /** |
68 | * Initialize the GEM device fields | 75 | * Initialize the GEM device fields |
69 | */ | 76 | */ |
@@ -71,6 +78,8 @@ | |||
71 | int | 78 | int |
72 | drm_gem_init(struct drm_device *dev) | 79 | drm_gem_init(struct drm_device *dev) |
73 | { | 80 | { |
81 | struct drm_gem_mm *mm; | ||
82 | |||
74 | spin_lock_init(&dev->object_name_lock); | 83 | spin_lock_init(&dev->object_name_lock); |
75 | idr_init(&dev->object_name_idr); | 84 | idr_init(&dev->object_name_idr); |
76 | atomic_set(&dev->object_count, 0); | 85 | atomic_set(&dev->object_count, 0); |
@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) | |||
79 | atomic_set(&dev->pin_memory, 0); | 88 | atomic_set(&dev->pin_memory, 0); |
80 | atomic_set(&dev->gtt_count, 0); | 89 | atomic_set(&dev->gtt_count, 0); |
81 | atomic_set(&dev->gtt_memory, 0); | 90 | atomic_set(&dev->gtt_memory, 0); |
91 | |||
92 | mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
93 | if (!mm) { | ||
94 | DRM_ERROR("out of memory\n"); | ||
95 | return -ENOMEM; | ||
96 | } | ||
97 | |||
98 | dev->mm_private = mm; | ||
99 | |||
100 | if (drm_ht_create(&mm->offset_hash, 19)) { | ||
101 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
102 | return -ENOMEM; | ||
103 | } | ||
104 | |||
105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, | ||
106 | DRM_FILE_PAGE_OFFSET_SIZE)) { | ||
107 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
108 | drm_ht_remove(&mm->offset_hash); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | |||
82 | return 0; | 112 | return 0; |
83 | } | 113 | } |
84 | 114 | ||
115 | void | ||
116 | drm_gem_destroy(struct drm_device *dev) | ||
117 | { | ||
118 | struct drm_gem_mm *mm = dev->mm_private; | ||
119 | |||
120 | drm_mm_takedown(&mm->offset_manager); | ||
121 | drm_ht_remove(&mm->offset_hash); | ||
122 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
123 | dev->mm_private = NULL; | ||
124 | } | ||
125 | |||
85 | /** | 126 | /** |
86 | * Allocate a GEM object of the specified size with shmfs backing store | 127 | * Allocate a GEM object of the specified size with shmfs backing store |
87 | */ | 128 | */ |
@@ -95,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
95 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); | 136 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); |
96 | 137 | ||
97 | obj->dev = dev; | 138 | obj->dev = dev; |
98 | obj->filp = shmem_file_setup("drm mm object", size, 0); | 139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
99 | if (IS_ERR(obj->filp)) { | 140 | if (IS_ERR(obj->filp)) { |
100 | kfree(obj); | 141 | kfree(obj); |
101 | return NULL; | 142 | return NULL; |
@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref) | |||
419 | } | 460 | } |
420 | EXPORT_SYMBOL(drm_gem_object_handle_free); | 461 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
421 | 462 | ||
463 | /** | ||
464 | * drm_gem_mmap - memory map routine for GEM objects | ||
465 | * @filp: DRM file pointer | ||
466 | * @vma: VMA for the area to be mapped | ||
467 | * | ||
468 | * If a driver supports GEM object mapping, mmap calls on the DRM file | ||
469 | * descriptor will end up here. | ||
470 | * | ||
471 | * If we find the object based on the offset passed in (vma->vm_pgoff will | ||
472 | * contain the fake offset we created when the GTT map ioctl was called on | ||
473 | * the object), we set up the driver fault handler so that any accesses | ||
474 | * to the object can be trapped, to perform migration, GTT binding, surface | ||
475 | * register allocation, or performance monitoring. | ||
476 | */ | ||
477 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
478 | { | ||
479 | struct drm_file *priv = filp->private_data; | ||
480 | struct drm_device *dev = priv->minor->dev; | ||
481 | struct drm_gem_mm *mm = dev->mm_private; | ||
482 | struct drm_map *map = NULL; | ||
483 | struct drm_gem_object *obj; | ||
484 | struct drm_hash_item *hash; | ||
485 | unsigned long prot; | ||
486 | int ret = 0; | ||
487 | |||
488 | mutex_lock(&dev->struct_mutex); | ||
489 | |||
490 | if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { | ||
491 | mutex_unlock(&dev->struct_mutex); | ||
492 | return drm_mmap(filp, vma); | ||
493 | } | ||
494 | |||
495 | map = drm_hash_entry(hash, struct drm_map_list, hash)->map; | ||
496 | if (!map || | ||
497 | ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { | ||
498 | ret = -EPERM; | ||
499 | goto out_unlock; | ||
500 | } | ||
501 | |||
502 | /* Check for valid size. */ | ||
503 | if (map->size < vma->vm_end - vma->vm_start) { | ||
504 | ret = -EINVAL; | ||
505 | goto out_unlock; | ||
506 | } | ||
507 | |||
508 | obj = map->handle; | ||
509 | if (!obj->dev->driver->gem_vm_ops) { | ||
510 | ret = -EINVAL; | ||
511 | goto out_unlock; | ||
512 | } | ||
513 | |||
514 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; | ||
515 | vma->vm_ops = obj->dev->driver->gem_vm_ops; | ||
516 | vma->vm_private_data = map->handle; | ||
517 | /* FIXME: use pgprot_writecombine when available */ | ||
518 | prot = pgprot_val(vma->vm_page_prot); | ||
519 | #ifdef CONFIG_X86 | ||
520 | prot |= _PAGE_CACHE_WC; | ||
521 | #endif | ||
522 | vma->vm_page_prot = __pgprot(prot); | ||
523 | |||
524 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
525 | drm_vm_open_locked(vma); | ||
526 | |||
527 | out_unlock: | ||
528 | mutex_unlock(&dev->struct_mutex); | ||
529 | |||
530 | return ret; | ||
531 | } | ||
532 | EXPORT_SYMBOL(drm_gem_mmap); | ||
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index 33160673a7b7..af539f7d87dd 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c | |||
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) | |||
127 | } | 127 | } |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(drm_ht_insert_item); | ||
130 | 131 | ||
131 | /* | 132 | /* |
132 | * Just insert an item and return any "bits" bit key that hasn't been | 133 | * Just insert an item and return any "bits" bit key that hasn't been |
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) | |||
188 | ht->fill--; | 189 | ht->fill--; |
189 | return 0; | 190 | return 0; |
190 | } | 191 | } |
192 | EXPORT_SYMBOL(drm_ht_remove_item); | ||
191 | 193 | ||
192 | void drm_ht_remove(struct drm_open_hash *ht) | 194 | void drm_ht_remove(struct drm_open_hash *ht) |
193 | { | 195 | { |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 16829fb3089d..1fad76289e66 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data, | |||
53 | struct drm_file *file_priv) | 53 | struct drm_file *file_priv) |
54 | { | 54 | { |
55 | struct drm_unique *u = data; | 55 | struct drm_unique *u = data; |
56 | struct drm_master *master = file_priv->master; | ||
56 | 57 | ||
57 | if (u->unique_len >= dev->unique_len) { | 58 | if (u->unique_len >= master->unique_len) { |
58 | if (copy_to_user(u->unique, dev->unique, dev->unique_len)) | 59 | if (copy_to_user(u->unique, master->unique, master->unique_len)) |
59 | return -EFAULT; | 60 | return -EFAULT; |
60 | } | 61 | } |
61 | u->unique_len = dev->unique_len; | 62 | u->unique_len = master->unique_len; |
62 | 63 | ||
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
@@ -81,36 +82,38 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
81 | struct drm_file *file_priv) | 82 | struct drm_file *file_priv) |
82 | { | 83 | { |
83 | struct drm_unique *u = data; | 84 | struct drm_unique *u = data; |
85 | struct drm_master *master = file_priv->master; | ||
84 | int domain, bus, slot, func, ret; | 86 | int domain, bus, slot, func, ret; |
85 | 87 | ||
86 | if (dev->unique_len || dev->unique) | 88 | if (master->unique_len || master->unique) |
87 | return -EBUSY; | 89 | return -EBUSY; |
88 | 90 | ||
89 | if (!u->unique_len || u->unique_len > 1024) | 91 | if (!u->unique_len || u->unique_len > 1024) |
90 | return -EINVAL; | 92 | return -EINVAL; |
91 | 93 | ||
92 | dev->unique_len = u->unique_len; | 94 | master->unique_len = u->unique_len; |
93 | dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); | 95 | master->unique_size = u->unique_len + 1; |
94 | if (!dev->unique) | 96 | master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); |
97 | if (!master->unique) | ||
95 | return -ENOMEM; | 98 | return -ENOMEM; |
96 | if (copy_from_user(dev->unique, u->unique, dev->unique_len)) | 99 | if (copy_from_user(master->unique, u->unique, master->unique_len)) |
97 | return -EFAULT; | 100 | return -EFAULT; |
98 | 101 | ||
99 | dev->unique[dev->unique_len] = '\0'; | 102 | master->unique[master->unique_len] = '\0'; |
100 | 103 | ||
101 | dev->devname = | 104 | dev->devname = |
102 | drm_alloc(strlen(dev->driver->pci_driver.name) + | 105 | drm_alloc(strlen(dev->driver->pci_driver.name) + |
103 | strlen(dev->unique) + 2, DRM_MEM_DRIVER); | 106 | strlen(master->unique) + 2, DRM_MEM_DRIVER); |
104 | if (!dev->devname) | 107 | if (!dev->devname) |
105 | return -ENOMEM; | 108 | return -ENOMEM; |
106 | 109 | ||
107 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | 110 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, |
108 | dev->unique); | 111 | master->unique); |
109 | 112 | ||
110 | /* Return error if the busid submitted doesn't match the device's actual | 113 | /* Return error if the busid submitted doesn't match the device's actual |
111 | * busid. | 114 | * busid. |
112 | */ | 115 | */ |
113 | ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); | 116 | ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); |
114 | if (ret != 3) | 117 | if (ret != 3) |
115 | return -EINVAL; | 118 | return -EINVAL; |
116 | domain = bus >> 8; | 119 | domain = bus >> 8; |
@@ -125,34 +128,38 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
125 | return 0; | 128 | return 0; |
126 | } | 129 | } |
127 | 130 | ||
128 | static int drm_set_busid(struct drm_device * dev) | 131 | static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) |
129 | { | 132 | { |
133 | struct drm_master *master = file_priv->master; | ||
130 | int len; | 134 | int len; |
131 | 135 | ||
132 | if (dev->unique != NULL) | 136 | if (master->unique != NULL) |
133 | return 0; | 137 | return -EBUSY; |
134 | 138 | ||
135 | dev->unique_len = 40; | 139 | master->unique_len = 40; |
136 | dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); | 140 | master->unique_size = master->unique_len; |
137 | if (dev->unique == NULL) | 141 | master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); |
142 | if (master->unique == NULL) | ||
138 | return -ENOMEM; | 143 | return -ENOMEM; |
139 | 144 | ||
140 | len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", | 145 | len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d", |
141 | drm_get_pci_domain(dev), dev->pdev->bus->number, | 146 | drm_get_pci_domain(dev), |
147 | dev->pdev->bus->number, | ||
142 | PCI_SLOT(dev->pdev->devfn), | 148 | PCI_SLOT(dev->pdev->devfn), |
143 | PCI_FUNC(dev->pdev->devfn)); | 149 | PCI_FUNC(dev->pdev->devfn)); |
144 | 150 | if (len >= master->unique_len) | |
145 | if (len > dev->unique_len) | 151 | DRM_ERROR("buffer overflow"); |
146 | DRM_ERROR("Unique buffer overflowed\n"); | 152 | else |
153 | master->unique_len = len; | ||
147 | 154 | ||
148 | dev->devname = | 155 | dev->devname = |
149 | drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + | 156 | drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + |
150 | 2, DRM_MEM_DRIVER); | 157 | 2, DRM_MEM_DRIVER); |
151 | if (dev->devname == NULL) | 158 | if (dev->devname == NULL) |
152 | return -ENOMEM; | 159 | return -ENOMEM; |
153 | 160 | ||
154 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | 161 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, |
155 | dev->unique); | 162 | master->unique); |
156 | 163 | ||
157 | return 0; | 164 | return 0; |
158 | } | 165 | } |
@@ -276,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data, | |||
276 | for (i = 0; i < dev->counters; i++) { | 283 | for (i = 0; i < dev->counters; i++) { |
277 | if (dev->types[i] == _DRM_STAT_LOCK) | 284 | if (dev->types[i] == _DRM_STAT_LOCK) |
278 | stats->data[i].value = | 285 | stats->data[i].value = |
279 | (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); | 286 | (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); |
280 | else | 287 | else |
281 | stats->data[i].value = atomic_read(&dev->counts[i]); | 288 | stats->data[i].value = atomic_read(&dev->counts[i]); |
282 | stats->data[i].type = dev->types[i]; | 289 | stats->data[i].type = dev->types[i]; |
@@ -318,7 +325,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
318 | /* | 325 | /* |
319 | * Version 1.1 includes tying of DRM to specific device | 326 | * Version 1.1 includes tying of DRM to specific device |
320 | */ | 327 | */ |
321 | drm_set_busid(dev); | 328 | drm_set_busid(dev, file_priv); |
322 | } | 329 | } |
323 | } | 330 | } |
324 | 331 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 15c8dabc3e97..3795dbc0f50c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -94,7 +94,7 @@ static void vblank_disable_fn(unsigned long arg) | |||
94 | } | 94 | } |
95 | } | 95 | } |
96 | 96 | ||
97 | static void drm_vblank_cleanup(struct drm_device *dev) | 97 | void drm_vblank_cleanup(struct drm_device *dev) |
98 | { | 98 | { |
99 | /* Bail if the driver didn't call drm_vblank_init() */ | 99 | /* Bail if the driver didn't call drm_vblank_init() */ |
100 | if (dev->num_crtcs == 0) | 100 | if (dev->num_crtcs == 0) |
@@ -106,8 +106,6 @@ static void drm_vblank_cleanup(struct drm_device *dev) | |||
106 | 106 | ||
107 | drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, | 107 | drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, |
108 | DRM_MEM_DRIVER); | 108 | DRM_MEM_DRIVER); |
109 | drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, | ||
110 | DRM_MEM_DRIVER); | ||
111 | drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * | 109 | drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * |
112 | dev->num_crtcs, DRM_MEM_DRIVER); | 110 | dev->num_crtcs, DRM_MEM_DRIVER); |
113 | drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * | 111 | drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * |
@@ -116,6 +114,9 @@ static void drm_vblank_cleanup(struct drm_device *dev) | |||
116 | dev->num_crtcs, DRM_MEM_DRIVER); | 114 | dev->num_crtcs, DRM_MEM_DRIVER); |
117 | drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, | 115 | drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, |
118 | DRM_MEM_DRIVER); | 116 | DRM_MEM_DRIVER); |
117 | drm_free(dev->last_vblank_wait, | ||
118 | sizeof(*dev->last_vblank_wait) * dev->num_crtcs, | ||
119 | DRM_MEM_DRIVER); | ||
119 | drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * | 120 | drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * |
120 | dev->num_crtcs, DRM_MEM_DRIVER); | 121 | dev->num_crtcs, DRM_MEM_DRIVER); |
121 | 122 | ||
@@ -129,7 +130,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
129 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, | 130 | setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, |
130 | (unsigned long)dev); | 131 | (unsigned long)dev); |
131 | spin_lock_init(&dev->vbl_lock); | 132 | spin_lock_init(&dev->vbl_lock); |
132 | atomic_set(&dev->vbl_signal_pending, 0); | ||
133 | dev->num_crtcs = num_crtcs; | 133 | dev->num_crtcs = num_crtcs; |
134 | 134 | ||
135 | dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, | 135 | dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, |
@@ -137,11 +137,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
137 | if (!dev->vbl_queue) | 137 | if (!dev->vbl_queue) |
138 | goto err; | 138 | goto err; |
139 | 139 | ||
140 | dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, | ||
141 | DRM_MEM_DRIVER); | ||
142 | if (!dev->vbl_sigs) | ||
143 | goto err; | ||
144 | |||
145 | dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, | 140 | dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, |
146 | DRM_MEM_DRIVER); | 141 | DRM_MEM_DRIVER); |
147 | if (!dev->_vblank_count) | 142 | if (!dev->_vblank_count) |
@@ -161,6 +156,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
161 | if (!dev->last_vblank) | 156 | if (!dev->last_vblank) |
162 | goto err; | 157 | goto err; |
163 | 158 | ||
159 | dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32), | ||
160 | DRM_MEM_DRIVER); | ||
161 | if (!dev->last_vblank_wait) | ||
162 | goto err; | ||
163 | |||
164 | dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), | 164 | dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), |
165 | DRM_MEM_DRIVER); | 165 | DRM_MEM_DRIVER); |
166 | if (!dev->vblank_inmodeset) | 166 | if (!dev->vblank_inmodeset) |
@@ -169,7 +169,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
169 | /* Zero per-crtc vblank stuff */ | 169 | /* Zero per-crtc vblank stuff */ |
170 | for (i = 0; i < num_crtcs; i++) { | 170 | for (i = 0; i < num_crtcs; i++) { |
171 | init_waitqueue_head(&dev->vbl_queue[i]); | 171 | init_waitqueue_head(&dev->vbl_queue[i]); |
172 | INIT_LIST_HEAD(&dev->vbl_sigs[i]); | ||
173 | atomic_set(&dev->_vblank_count[i], 0); | 172 | atomic_set(&dev->_vblank_count[i], 0); |
174 | atomic_set(&dev->vblank_refcount[i], 0); | 173 | atomic_set(&dev->vblank_refcount[i], 0); |
175 | } | 174 | } |
@@ -259,7 +258,8 @@ EXPORT_SYMBOL(drm_irq_install); | |||
259 | */ | 258 | */ |
260 | int drm_irq_uninstall(struct drm_device * dev) | 259 | int drm_irq_uninstall(struct drm_device * dev) |
261 | { | 260 | { |
262 | int irq_enabled; | 261 | unsigned long irqflags; |
262 | int irq_enabled, i; | ||
263 | 263 | ||
264 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 264 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
265 | return -EINVAL; | 265 | return -EINVAL; |
@@ -269,6 +269,17 @@ int drm_irq_uninstall(struct drm_device * dev) | |||
269 | dev->irq_enabled = 0; | 269 | dev->irq_enabled = 0; |
270 | mutex_unlock(&dev->struct_mutex); | 270 | mutex_unlock(&dev->struct_mutex); |
271 | 271 | ||
272 | /* | ||
273 | * Wake up any waiters so they don't hang. | ||
274 | */ | ||
275 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
276 | for (i = 0; i < dev->num_crtcs; i++) { | ||
277 | DRM_WAKEUP(&dev->vbl_queue[i]); | ||
278 | dev->vblank_enabled[i] = 0; | ||
279 | dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); | ||
280 | } | ||
281 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
282 | |||
272 | if (!irq_enabled) | 283 | if (!irq_enabled) |
273 | return -EINVAL; | 284 | return -EINVAL; |
274 | 285 | ||
@@ -278,8 +289,6 @@ int drm_irq_uninstall(struct drm_device * dev) | |||
278 | 289 | ||
279 | free_irq(dev->pdev->irq, dev); | 290 | free_irq(dev->pdev->irq, dev); |
280 | 291 | ||
281 | drm_vblank_cleanup(dev); | ||
282 | |||
283 | return 0; | 292 | return 0; |
284 | } | 293 | } |
285 | EXPORT_SYMBOL(drm_irq_uninstall); | 294 | EXPORT_SYMBOL(drm_irq_uninstall); |
@@ -307,6 +316,8 @@ int drm_control(struct drm_device *dev, void *data, | |||
307 | case DRM_INST_HANDLER: | 316 | case DRM_INST_HANDLER: |
308 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 317 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
309 | return 0; | 318 | return 0; |
319 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
320 | return 0; | ||
310 | if (dev->if_version < DRM_IF_VERSION(1, 2) && | 321 | if (dev->if_version < DRM_IF_VERSION(1, 2) && |
311 | ctl->irq != dev->pdev->irq) | 322 | ctl->irq != dev->pdev->irq) |
312 | return -EINVAL; | 323 | return -EINVAL; |
@@ -314,6 +325,8 @@ int drm_control(struct drm_device *dev, void *data, | |||
314 | case DRM_UNINST_HANDLER: | 325 | case DRM_UNINST_HANDLER: |
315 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 326 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
316 | return 0; | 327 | return 0; |
328 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
329 | return 0; | ||
317 | return drm_irq_uninstall(dev); | 330 | return drm_irq_uninstall(dev); |
318 | default: | 331 | default: |
319 | return -EINVAL; | 332 | return -EINVAL; |
@@ -429,6 +442,45 @@ void drm_vblank_put(struct drm_device *dev, int crtc) | |||
429 | EXPORT_SYMBOL(drm_vblank_put); | 442 | EXPORT_SYMBOL(drm_vblank_put); |
430 | 443 | ||
431 | /** | 444 | /** |
445 | * drm_vblank_pre_modeset - account for vblanks across mode sets | ||
446 | * @dev: DRM device | ||
447 | * @crtc: CRTC in question | ||
448 | * @post: post or pre mode set? | ||
449 | * | ||
450 | * Account for vblank events across mode setting events, which will likely | ||
451 | * reset the hardware frame counter. | ||
452 | */ | ||
453 | void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) | ||
454 | { | ||
455 | /* | ||
456 | * To avoid all the problems that might happen if interrupts | ||
457 | * were enabled/disabled around or between these calls, we just | ||
458 | * have the kernel take a reference on the CRTC (just once though | ||
459 | * to avoid corrupting the count if multiple, mismatch calls occur), | ||
460 | * so that interrupts remain enabled in the interim. | ||
461 | */ | ||
462 | if (!dev->vblank_inmodeset[crtc]) { | ||
463 | dev->vblank_inmodeset[crtc] = 1; | ||
464 | drm_vblank_get(dev, crtc); | ||
465 | } | ||
466 | } | ||
467 | EXPORT_SYMBOL(drm_vblank_pre_modeset); | ||
468 | |||
469 | void drm_vblank_post_modeset(struct drm_device *dev, int crtc) | ||
470 | { | ||
471 | unsigned long irqflags; | ||
472 | |||
473 | if (dev->vblank_inmodeset[crtc]) { | ||
474 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
475 | dev->vblank_disable_allowed = 1; | ||
476 | dev->vblank_inmodeset[crtc] = 0; | ||
477 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
478 | drm_vblank_put(dev, crtc); | ||
479 | } | ||
480 | } | ||
481 | EXPORT_SYMBOL(drm_vblank_post_modeset); | ||
482 | |||
483 | /** | ||
432 | * drm_modeset_ctl - handle vblank event counter changes across mode switch | 484 | * drm_modeset_ctl - handle vblank event counter changes across mode switch |
433 | * @DRM_IOCTL_ARGS: standard ioctl arguments | 485 | * @DRM_IOCTL_ARGS: standard ioctl arguments |
434 | * | 486 | * |
@@ -443,7 +495,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
443 | struct drm_file *file_priv) | 495 | struct drm_file *file_priv) |
444 | { | 496 | { |
445 | struct drm_modeset_ctl *modeset = data; | 497 | struct drm_modeset_ctl *modeset = data; |
446 | unsigned long irqflags; | ||
447 | int crtc, ret = 0; | 498 | int crtc, ret = 0; |
448 | 499 | ||
449 | /* If drm_vblank_init() hasn't been called yet, just no-op */ | 500 | /* If drm_vblank_init() hasn't been called yet, just no-op */ |
@@ -456,28 +507,12 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
456 | goto out; | 507 | goto out; |
457 | } | 508 | } |
458 | 509 | ||
459 | /* | ||
460 | * To avoid all the problems that might happen if interrupts | ||
461 | * were enabled/disabled around or between these calls, we just | ||
462 | * have the kernel take a reference on the CRTC (just once though | ||
463 | * to avoid corrupting the count if multiple, mismatch calls occur), | ||
464 | * so that interrupts remain enabled in the interim. | ||
465 | */ | ||
466 | switch (modeset->cmd) { | 510 | switch (modeset->cmd) { |
467 | case _DRM_PRE_MODESET: | 511 | case _DRM_PRE_MODESET: |
468 | if (!dev->vblank_inmodeset[crtc]) { | 512 | drm_vblank_pre_modeset(dev, crtc); |
469 | dev->vblank_inmodeset[crtc] = 1; | ||
470 | drm_vblank_get(dev, crtc); | ||
471 | } | ||
472 | break; | 513 | break; |
473 | case _DRM_POST_MODESET: | 514 | case _DRM_POST_MODESET: |
474 | if (dev->vblank_inmodeset[crtc]) { | 515 | drm_vblank_post_modeset(dev, crtc); |
475 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
476 | dev->vblank_disable_allowed = 1; | ||
477 | dev->vblank_inmodeset[crtc] = 0; | ||
478 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
479 | drm_vblank_put(dev, crtc); | ||
480 | } | ||
481 | break; | 516 | break; |
482 | default: | 517 | default: |
483 | ret = -EINVAL; | 518 | ret = -EINVAL; |
@@ -497,15 +532,10 @@ out: | |||
497 | * \param data user argument, pointing to a drm_wait_vblank structure. | 532 | * \param data user argument, pointing to a drm_wait_vblank structure. |
498 | * \return zero on success or a negative number on failure. | 533 | * \return zero on success or a negative number on failure. |
499 | * | 534 | * |
500 | * Verifies the IRQ is installed. | 535 | * This function enables the vblank interrupt on the pipe requested, then |
501 | * | 536 | * sleeps waiting for the requested sequence number to occur, and drops |
502 | * If a signal is requested checks if this task has already scheduled the same signal | 537 | * the vblank interrupt refcount afterwards. (vblank irq disable follows that |
503 | * for the same vblank sequence number - nothing to be done in | 538 | * after a timeout with no further vblank waits scheduled). |
504 | * that case. If the number of tasks waiting for the interrupt exceeds 100 the | ||
505 | * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this | ||
506 | * task. | ||
507 | * | ||
508 | * If a signal is not requested, then calls vblank_wait(). | ||
509 | */ | 539 | */ |
510 | int drm_wait_vblank(struct drm_device *dev, void *data, | 540 | int drm_wait_vblank(struct drm_device *dev, void *data, |
511 | struct drm_file *file_priv) | 541 | struct drm_file *file_priv) |
@@ -517,6 +547,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
517 | if ((!dev->pdev->irq) || (!dev->irq_enabled)) | 547 | if ((!dev->pdev->irq) || (!dev->irq_enabled)) |
518 | return -EINVAL; | 548 | return -EINVAL; |
519 | 549 | ||
550 | if (vblwait->request.type & _DRM_VBLANK_SIGNAL) | ||
551 | return -EINVAL; | ||
552 | |||
520 | if (vblwait->request.type & | 553 | if (vblwait->request.type & |
521 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { | 554 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { |
522 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", | 555 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", |
@@ -554,87 +587,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
554 | vblwait->request.sequence = seq + 1; | 587 | vblwait->request.sequence = seq + 1; |
555 | } | 588 | } |
556 | 589 | ||
557 | if (flags & _DRM_VBLANK_SIGNAL) { | 590 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", |
558 | unsigned long irqflags; | 591 | vblwait->request.sequence, crtc); |
559 | struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; | 592 | dev->last_vblank_wait[crtc] = vblwait->request.sequence; |
560 | struct drm_vbl_sig *vbl_sig; | 593 | DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, |
594 | (((drm_vblank_count(dev, crtc) - | ||
595 | vblwait->request.sequence) <= (1 << 23)) || | ||
596 | !dev->irq_enabled)); | ||
561 | 597 | ||
562 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 598 | if (ret != -EINTR) { |
599 | struct timeval now; | ||
563 | 600 | ||
564 | /* Check if this task has already scheduled the same signal | 601 | do_gettimeofday(&now); |
565 | * for the same vblank sequence number; nothing to be done in | ||
566 | * that case | ||
567 | */ | ||
568 | list_for_each_entry(vbl_sig, vbl_sigs, head) { | ||
569 | if (vbl_sig->sequence == vblwait->request.sequence | ||
570 | && vbl_sig->info.si_signo == | ||
571 | vblwait->request.signal | ||
572 | && vbl_sig->task == current) { | ||
573 | spin_unlock_irqrestore(&dev->vbl_lock, | ||
574 | irqflags); | ||
575 | vblwait->reply.sequence = seq; | ||
576 | goto done; | ||
577 | } | ||
578 | } | ||
579 | 602 | ||
580 | if (atomic_read(&dev->vbl_signal_pending) >= 100) { | 603 | vblwait->reply.tval_sec = now.tv_sec; |
581 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | 604 | vblwait->reply.tval_usec = now.tv_usec; |
582 | ret = -EBUSY; | 605 | vblwait->reply.sequence = drm_vblank_count(dev, crtc); |
583 | goto done; | 606 | DRM_DEBUG("returning %d to client\n", |
584 | } | 607 | vblwait->reply.sequence); |
585 | |||
586 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
587 | |||
588 | vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), | ||
589 | DRM_MEM_DRIVER); | ||
590 | if (!vbl_sig) { | ||
591 | ret = -ENOMEM; | ||
592 | goto done; | ||
593 | } | ||
594 | |||
595 | /* Get a refcount on the vblank, which will be released by | ||
596 | * drm_vbl_send_signals(). | ||
597 | */ | ||
598 | ret = drm_vblank_get(dev, crtc); | ||
599 | if (ret) { | ||
600 | drm_free(vbl_sig, sizeof(struct drm_vbl_sig), | ||
601 | DRM_MEM_DRIVER); | ||
602 | goto done; | ||
603 | } | ||
604 | |||
605 | atomic_inc(&dev->vbl_signal_pending); | ||
606 | |||
607 | vbl_sig->sequence = vblwait->request.sequence; | ||
608 | vbl_sig->info.si_signo = vblwait->request.signal; | ||
609 | vbl_sig->task = current; | ||
610 | |||
611 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
612 | |||
613 | list_add_tail(&vbl_sig->head, vbl_sigs); | ||
614 | |||
615 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
616 | |||
617 | vblwait->reply.sequence = seq; | ||
618 | } else { | 608 | } else { |
619 | DRM_DEBUG("waiting on vblank count %d, crtc %d\n", | 609 | DRM_DEBUG("vblank wait interrupted by signal\n"); |
620 | vblwait->request.sequence, crtc); | ||
621 | DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, | ||
622 | ((drm_vblank_count(dev, crtc) | ||
623 | - vblwait->request.sequence) <= (1 << 23))); | ||
624 | |||
625 | if (ret != -EINTR) { | ||
626 | struct timeval now; | ||
627 | |||
628 | do_gettimeofday(&now); | ||
629 | |||
630 | vblwait->reply.tval_sec = now.tv_sec; | ||
631 | vblwait->reply.tval_usec = now.tv_usec; | ||
632 | vblwait->reply.sequence = drm_vblank_count(dev, crtc); | ||
633 | DRM_DEBUG("returning %d to client\n", | ||
634 | vblwait->reply.sequence); | ||
635 | } else { | ||
636 | DRM_DEBUG("vblank wait interrupted by signal\n"); | ||
637 | } | ||
638 | } | 610 | } |
639 | 611 | ||
640 | done: | 612 | done: |
@@ -643,46 +615,6 @@ done: | |||
643 | } | 615 | } |
644 | 616 | ||
645 | /** | 617 | /** |
646 | * Send the VBLANK signals. | ||
647 | * | ||
648 | * \param dev DRM device. | ||
649 | * \param crtc CRTC where the vblank event occurred | ||
650 | * | ||
651 | * Sends a signal for each task in drm_device::vbl_sigs and empties the list. | ||
652 | * | ||
653 | * If a signal is not requested, then calls vblank_wait(). | ||
654 | */ | ||
655 | static void drm_vbl_send_signals(struct drm_device *dev, int crtc) | ||
656 | { | ||
657 | struct drm_vbl_sig *vbl_sig, *tmp; | ||
658 | struct list_head *vbl_sigs; | ||
659 | unsigned int vbl_seq; | ||
660 | unsigned long flags; | ||
661 | |||
662 | spin_lock_irqsave(&dev->vbl_lock, flags); | ||
663 | |||
664 | vbl_sigs = &dev->vbl_sigs[crtc]; | ||
665 | vbl_seq = drm_vblank_count(dev, crtc); | ||
666 | |||
667 | list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { | ||
668 | if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { | ||
669 | vbl_sig->info.si_code = vbl_seq; | ||
670 | send_sig_info(vbl_sig->info.si_signo, | ||
671 | &vbl_sig->info, vbl_sig->task); | ||
672 | |||
673 | list_del(&vbl_sig->head); | ||
674 | |||
675 | drm_free(vbl_sig, sizeof(*vbl_sig), | ||
676 | DRM_MEM_DRIVER); | ||
677 | atomic_dec(&dev->vbl_signal_pending); | ||
678 | drm_vblank_put(dev, crtc); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | spin_unlock_irqrestore(&dev->vbl_lock, flags); | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * drm_handle_vblank - handle a vblank event | 618 | * drm_handle_vblank - handle a vblank event |
687 | * @dev: DRM device | 619 | * @dev: DRM device |
688 | * @crtc: where this event occurred | 620 | * @crtc: where this event occurred |
@@ -694,6 +626,5 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
694 | { | 626 | { |
695 | atomic_inc(&dev->_vblank_count[crtc]); | 627 | atomic_inc(&dev->_vblank_count[crtc]); |
696 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 628 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
697 | drm_vbl_send_signals(dev, crtc); | ||
698 | } | 629 | } |
699 | EXPORT_SYMBOL(drm_handle_vblank); | 630 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 1cfa72031f8f..46e7b28f0707 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
52 | { | 52 | { |
53 | DECLARE_WAITQUEUE(entry, current); | 53 | DECLARE_WAITQUEUE(entry, current); |
54 | struct drm_lock *lock = data; | 54 | struct drm_lock *lock = data; |
55 | struct drm_master *master = file_priv->master; | ||
55 | int ret = 0; | 56 | int ret = 0; |
56 | 57 | ||
57 | ++file_priv->lock_count; | 58 | ++file_priv->lock_count; |
@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
64 | 65 | ||
65 | DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", | 66 | DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", |
66 | lock->context, task_pid_nr(current), | 67 | lock->context, task_pid_nr(current), |
67 | dev->lock.hw_lock->lock, lock->flags); | 68 | master->lock.hw_lock->lock, lock->flags); |
68 | 69 | ||
69 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) | 70 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) |
70 | if (lock->context < 0) | 71 | if (lock->context < 0) |
71 | return -EINVAL; | 72 | return -EINVAL; |
72 | 73 | ||
73 | add_wait_queue(&dev->lock.lock_queue, &entry); | 74 | add_wait_queue(&master->lock.lock_queue, &entry); |
74 | spin_lock_bh(&dev->lock.spinlock); | 75 | spin_lock_bh(&master->lock.spinlock); |
75 | dev->lock.user_waiters++; | 76 | master->lock.user_waiters++; |
76 | spin_unlock_bh(&dev->lock.spinlock); | 77 | spin_unlock_bh(&master->lock.spinlock); |
78 | |||
77 | for (;;) { | 79 | for (;;) { |
78 | __set_current_state(TASK_INTERRUPTIBLE); | 80 | __set_current_state(TASK_INTERRUPTIBLE); |
79 | if (!dev->lock.hw_lock) { | 81 | if (!master->lock.hw_lock) { |
80 | /* Device has been unregistered */ | 82 | /* Device has been unregistered */ |
81 | ret = -EINTR; | 83 | ret = -EINTR; |
82 | break; | 84 | break; |
83 | } | 85 | } |
84 | if (drm_lock_take(&dev->lock, lock->context)) { | 86 | if (drm_lock_take(&master->lock, lock->context)) { |
85 | dev->lock.file_priv = file_priv; | 87 | master->lock.file_priv = file_priv; |
86 | dev->lock.lock_time = jiffies; | 88 | master->lock.lock_time = jiffies; |
87 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | 89 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); |
88 | break; /* Got lock */ | 90 | break; /* Got lock */ |
89 | } | 91 | } |
@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
95 | break; | 97 | break; |
96 | } | 98 | } |
97 | } | 99 | } |
98 | spin_lock_bh(&dev->lock.spinlock); | 100 | spin_lock_bh(&master->lock.spinlock); |
99 | dev->lock.user_waiters--; | 101 | master->lock.user_waiters--; |
100 | spin_unlock_bh(&dev->lock.spinlock); | 102 | spin_unlock_bh(&master->lock.spinlock); |
101 | __set_current_state(TASK_RUNNING); | 103 | __set_current_state(TASK_RUNNING); |
102 | remove_wait_queue(&dev->lock.lock_queue, &entry); | 104 | remove_wait_queue(&master->lock.lock_queue, &entry); |
103 | 105 | ||
104 | DRM_DEBUG("%d %s\n", lock->context, | 106 | DRM_DEBUG("%d %s\n", lock->context, |
105 | ret ? "interrupted" : "has lock"); | 107 | ret ? "interrupted" : "has lock"); |
@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
108 | /* don't set the block all signals on the master process for now | 110 | /* don't set the block all signals on the master process for now |
109 | * really probably not the correct answer but lets us debug xkb | 111 | * really probably not the correct answer but lets us debug xkb |
110 | * xserver for now */ | 112 | * xserver for now */ |
111 | if (!file_priv->master) { | 113 | if (!file_priv->is_master) { |
112 | sigemptyset(&dev->sigmask); | 114 | sigemptyset(&dev->sigmask); |
113 | sigaddset(&dev->sigmask, SIGSTOP); | 115 | sigaddset(&dev->sigmask, SIGSTOP); |
114 | sigaddset(&dev->sigmask, SIGTSTP); | 116 | sigaddset(&dev->sigmask, SIGTSTP); |
115 | sigaddset(&dev->sigmask, SIGTTIN); | 117 | sigaddset(&dev->sigmask, SIGTTIN); |
116 | sigaddset(&dev->sigmask, SIGTTOU); | 118 | sigaddset(&dev->sigmask, SIGTTOU); |
117 | dev->sigdata.context = lock->context; | 119 | dev->sigdata.context = lock->context; |
118 | dev->sigdata.lock = dev->lock.hw_lock; | 120 | dev->sigdata.lock = master->lock.hw_lock; |
119 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); | 121 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); |
120 | } | 122 | } |
121 | 123 | ||
@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
154 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | 156 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) |
155 | { | 157 | { |
156 | struct drm_lock *lock = data; | 158 | struct drm_lock *lock = data; |
159 | struct drm_master *master = file_priv->master; | ||
157 | 160 | ||
158 | if (lock->context == DRM_KERNEL_CONTEXT) { | 161 | if (lock->context == DRM_KERNEL_CONTEXT) { |
159 | DRM_ERROR("Process %d using kernel context %d\n", | 162 | DRM_ERROR("Process %d using kernel context %d\n", |
@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
169 | if (dev->driver->kernel_context_switch_unlock) | 172 | if (dev->driver->kernel_context_switch_unlock) |
170 | dev->driver->kernel_context_switch_unlock(dev); | 173 | dev->driver->kernel_context_switch_unlock(dev); |
171 | else { | 174 | else { |
172 | if (drm_lock_free(&dev->lock,lock->context)) { | 175 | if (drm_lock_free(&master->lock, lock->context)) { |
173 | /* FIXME: Should really bail out here. */ | 176 | /* FIXME: Should really bail out here. */ |
174 | } | 177 | } |
175 | } | 178 | } |
@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release); | |||
379 | 382 | ||
380 | int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) | 383 | int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) |
381 | { | 384 | { |
382 | return (file_priv->lock_count && dev->lock.hw_lock && | 385 | struct drm_master *master = file_priv->master; |
383 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | 386 | return (file_priv->lock_count && master->lock.hw_lock && |
384 | dev->lock.file_priv == file_priv); | 387 | _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && |
388 | master->lock.file_priv == file_priv); | ||
385 | } | 389 | } |
386 | 390 | ||
387 | EXPORT_SYMBOL(drm_i_have_hw_lock); | 391 | EXPORT_SYMBOL(drm_i_have_hw_lock); |
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 803bc9e7ce3c..bcc869bc4092 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c | |||
@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap); | |||
171 | 171 | ||
172 | void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) | 172 | void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) |
173 | { | 173 | { |
174 | map->handle = ioremap_wc(map->offset, map->size); | 174 | if (drm_core_has_AGP(dev) && |
175 | dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) | ||
176 | map->handle = agp_remap(map->offset, map->size, dev); | ||
177 | else | ||
178 | map->handle = ioremap_wc(map->offset, map->size); | ||
175 | } | 179 | } |
176 | EXPORT_SYMBOL(drm_core_ioremap_wc); | 180 | EXPORT_SYMBOL(drm_core_ioremap_wc); |
181 | |||
177 | void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) | 182 | void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) |
178 | { | 183 | { |
179 | if (!map->handle || !map->size) | 184 | if (!map->handle || !map->size) |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 217ad7dc7076..367c590ffbba 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
296 | 296 | ||
297 | drm_free(entry, sizeof(*entry), DRM_MEM_MM); | 297 | drm_free(entry, sizeof(*entry), DRM_MEM_MM); |
298 | } | 298 | } |
299 | EXPORT_SYMBOL(drm_mm_takedown); | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c new file mode 100644 index 000000000000..c9b80fdd4630 --- /dev/null +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -0,0 +1,576 @@ | |||
1 | /* | ||
2 | * The list_sort function is (presumably) licensed under the GPL (see the | ||
3 | * top level "COPYING" file for details). | ||
4 | * | ||
5 | * The remainder of this file is: | ||
6 | * | ||
7 | * Copyright © 1997-2003 by The XFree86 Project, Inc. | ||
8 | * Copyright © 2007 Dave Airlie | ||
9 | * Copyright © 2007-2008 Intel Corporation | ||
10 | * Jesse Barnes <jesse.barnes@intel.com> | ||
11 | * | ||
12 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
13 | * copy of this software and associated documentation files (the "Software"), | ||
14 | * to deal in the Software without restriction, including without limitation | ||
15 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
16 | * and/or sell copies of the Software, and to permit persons to whom the | ||
17 | * Software is furnished to do so, subject to the following conditions: | ||
18 | * | ||
19 | * The above copyright notice and this permission notice shall be included in | ||
20 | * all copies or substantial portions of the Software. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
25 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
26 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
27 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
28 | * OTHER DEALINGS IN THE SOFTWARE. | ||
29 | * | ||
30 | * Except as contained in this notice, the name of the copyright holder(s) | ||
31 | * and author(s) shall not be used in advertising or otherwise to promote | ||
32 | * the sale, use or other dealings in this Software without prior written | ||
33 | * authorization from the copyright holder(s) and author(s). | ||
34 | */ | ||
35 | |||
36 | #include <linux/list.h> | ||
37 | #include "drmP.h" | ||
38 | #include "drm.h" | ||
39 | #include "drm_crtc.h" | ||
40 | |||
41 | /** | ||
42 | * drm_mode_debug_printmodeline - debug print a mode | ||
43 | * @dev: DRM device | ||
44 | * @mode: mode to print | ||
45 | * | ||
46 | * LOCKING: | ||
47 | * None. | ||
48 | * | ||
49 | * Describe @mode using DRM_DEBUG. | ||
50 | */ | ||
51 | void drm_mode_debug_printmodeline(struct drm_display_mode *mode) | ||
52 | { | ||
53 | DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", | ||
54 | mode->base.id, mode->name, mode->vrefresh, mode->clock, | ||
55 | mode->hdisplay, mode->hsync_start, | ||
56 | mode->hsync_end, mode->htotal, | ||
57 | mode->vdisplay, mode->vsync_start, | ||
58 | mode->vsync_end, mode->vtotal, mode->type, mode->flags); | ||
59 | } | ||
60 | EXPORT_SYMBOL(drm_mode_debug_printmodeline); | ||
61 | |||
62 | /** | ||
63 | * drm_mode_set_name - set the name on a mode | ||
64 | * @mode: name will be set in this mode | ||
65 | * | ||
66 | * LOCKING: | ||
67 | * None. | ||
68 | * | ||
69 | * Set the name of @mode to a standard format. | ||
70 | */ | ||
71 | void drm_mode_set_name(struct drm_display_mode *mode) | ||
72 | { | ||
73 | snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, | ||
74 | mode->vdisplay); | ||
75 | } | ||
76 | EXPORT_SYMBOL(drm_mode_set_name); | ||
77 | |||
78 | /** | ||
79 | * drm_mode_list_concat - move modes from one list to another | ||
80 | * @head: source list | ||
81 | * @new: dst list | ||
82 | * | ||
83 | * LOCKING: | ||
84 | * Caller must ensure both lists are locked. | ||
85 | * | ||
86 | * Move all the modes from @head to @new. | ||
87 | */ | ||
88 | void drm_mode_list_concat(struct list_head *head, struct list_head *new) | ||
89 | { | ||
90 | |||
91 | struct list_head *entry, *tmp; | ||
92 | |||
93 | list_for_each_safe(entry, tmp, head) { | ||
94 | list_move_tail(entry, new); | ||
95 | } | ||
96 | } | ||
97 | EXPORT_SYMBOL(drm_mode_list_concat); | ||
98 | |||
99 | /** | ||
100 | * drm_mode_width - get the width of a mode | ||
101 | * @mode: mode | ||
102 | * | ||
103 | * LOCKING: | ||
104 | * None. | ||
105 | * | ||
106 | * Return @mode's width (hdisplay) value. | ||
107 | * | ||
108 | * FIXME: is this needed? | ||
109 | * | ||
110 | * RETURNS: | ||
111 | * @mode->hdisplay | ||
112 | */ | ||
113 | int drm_mode_width(struct drm_display_mode *mode) | ||
114 | { | ||
115 | return mode->hdisplay; | ||
116 | |||
117 | } | ||
118 | EXPORT_SYMBOL(drm_mode_width); | ||
119 | |||
120 | /** | ||
121 | * drm_mode_height - get the height of a mode | ||
122 | * @mode: mode | ||
123 | * | ||
124 | * LOCKING: | ||
125 | * None. | ||
126 | * | ||
127 | * Return @mode's height (vdisplay) value. | ||
128 | * | ||
129 | * FIXME: is this needed? | ||
130 | * | ||
131 | * RETURNS: | ||
132 | * @mode->vdisplay | ||
133 | */ | ||
134 | int drm_mode_height(struct drm_display_mode *mode) | ||
135 | { | ||
136 | return mode->vdisplay; | ||
137 | } | ||
138 | EXPORT_SYMBOL(drm_mode_height); | ||
139 | |||
140 | /** | ||
141 | * drm_mode_vrefresh - get the vrefresh of a mode | ||
142 | * @mode: mode | ||
143 | * | ||
144 | * LOCKING: | ||
145 | * None. | ||
146 | * | ||
147 | * Return @mode's vrefresh rate or calculate it if necessary. | ||
148 | * | ||
149 | * FIXME: why is this needed? shouldn't vrefresh be set already? | ||
150 | * | ||
151 | * RETURNS: | ||
152 | * Vertical refresh rate of @mode x 1000. For precision reasons. | ||
153 | */ | ||
154 | int drm_mode_vrefresh(struct drm_display_mode *mode) | ||
155 | { | ||
156 | int refresh = 0; | ||
157 | unsigned int calc_val; | ||
158 | |||
159 | if (mode->vrefresh > 0) | ||
160 | refresh = mode->vrefresh; | ||
161 | else if (mode->htotal > 0 && mode->vtotal > 0) { | ||
162 | /* work out vrefresh the value will be x1000 */ | ||
163 | calc_val = (mode->clock * 1000); | ||
164 | |||
165 | calc_val /= mode->htotal; | ||
166 | calc_val *= 1000; | ||
167 | calc_val /= mode->vtotal; | ||
168 | |||
169 | refresh = calc_val; | ||
170 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
171 | refresh *= 2; | ||
172 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
173 | refresh /= 2; | ||
174 | if (mode->vscan > 1) | ||
175 | refresh /= mode->vscan; | ||
176 | } | ||
177 | return refresh; | ||
178 | } | ||
179 | EXPORT_SYMBOL(drm_mode_vrefresh); | ||
180 | |||
181 | /** | ||
182 | * drm_mode_set_crtcinfo - set CRTC modesetting parameters | ||
183 | * @p: mode | ||
184 | * @adjust_flags: unused? (FIXME) | ||
185 | * | ||
186 | * LOCKING: | ||
187 | * None. | ||
188 | * | ||
189 | * Setup the CRTC modesetting parameters for @p, adjusting if necessary. | ||
190 | */ | ||
191 | void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) | ||
192 | { | ||
193 | if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) | ||
194 | return; | ||
195 | |||
196 | p->crtc_hdisplay = p->hdisplay; | ||
197 | p->crtc_hsync_start = p->hsync_start; | ||
198 | p->crtc_hsync_end = p->hsync_end; | ||
199 | p->crtc_htotal = p->htotal; | ||
200 | p->crtc_hskew = p->hskew; | ||
201 | p->crtc_vdisplay = p->vdisplay; | ||
202 | p->crtc_vsync_start = p->vsync_start; | ||
203 | p->crtc_vsync_end = p->vsync_end; | ||
204 | p->crtc_vtotal = p->vtotal; | ||
205 | |||
206 | if (p->flags & DRM_MODE_FLAG_INTERLACE) { | ||
207 | if (adjust_flags & CRTC_INTERLACE_HALVE_V) { | ||
208 | p->crtc_vdisplay /= 2; | ||
209 | p->crtc_vsync_start /= 2; | ||
210 | p->crtc_vsync_end /= 2; | ||
211 | p->crtc_vtotal /= 2; | ||
212 | } | ||
213 | |||
214 | p->crtc_vtotal |= 1; | ||
215 | } | ||
216 | |||
217 | if (p->flags & DRM_MODE_FLAG_DBLSCAN) { | ||
218 | p->crtc_vdisplay *= 2; | ||
219 | p->crtc_vsync_start *= 2; | ||
220 | p->crtc_vsync_end *= 2; | ||
221 | p->crtc_vtotal *= 2; | ||
222 | } | ||
223 | |||
224 | if (p->vscan > 1) { | ||
225 | p->crtc_vdisplay *= p->vscan; | ||
226 | p->crtc_vsync_start *= p->vscan; | ||
227 | p->crtc_vsync_end *= p->vscan; | ||
228 | p->crtc_vtotal *= p->vscan; | ||
229 | } | ||
230 | |||
231 | p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); | ||
232 | p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); | ||
233 | p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); | ||
234 | p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); | ||
235 | |||
236 | p->crtc_hadjusted = false; | ||
237 | p->crtc_vadjusted = false; | ||
238 | } | ||
239 | EXPORT_SYMBOL(drm_mode_set_crtcinfo); | ||
240 | |||
241 | |||
242 | /** | ||
243 | * drm_mode_duplicate - allocate and duplicate an existing mode | ||
244 | * @m: mode to duplicate | ||
245 | * | ||
246 | * LOCKING: | ||
247 | * None. | ||
248 | * | ||
249 | * Just allocate a new mode, copy the existing mode into it, and return | ||
250 | * a pointer to it. Used to create new instances of established modes. | ||
251 | */ | ||
252 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | ||
253 | struct drm_display_mode *mode) | ||
254 | { | ||
255 | struct drm_display_mode *nmode; | ||
256 | int new_id; | ||
257 | |||
258 | nmode = drm_mode_create(dev); | ||
259 | if (!nmode) | ||
260 | return NULL; | ||
261 | |||
262 | new_id = nmode->base.id; | ||
263 | *nmode = *mode; | ||
264 | nmode->base.id = new_id; | ||
265 | INIT_LIST_HEAD(&nmode->head); | ||
266 | return nmode; | ||
267 | } | ||
268 | EXPORT_SYMBOL(drm_mode_duplicate); | ||
269 | |||
270 | /** | ||
271 | * drm_mode_equal - test modes for equality | ||
272 | * @mode1: first mode | ||
273 | * @mode2: second mode | ||
274 | * | ||
275 | * LOCKING: | ||
276 | * None. | ||
277 | * | ||
278 | * Check to see if @mode1 and @mode2 are equivalent. | ||
279 | * | ||
280 | * RETURNS: | ||
281 | * True if the modes are equal, false otherwise. | ||
282 | */ | ||
283 | bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) | ||
284 | { | ||
285 | /* do clock check convert to PICOS so fb modes get matched | ||
286 | * the same */ | ||
287 | if (mode1->clock && mode2->clock) { | ||
288 | if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock)) | ||
289 | return false; | ||
290 | } else if (mode1->clock != mode2->clock) | ||
291 | return false; | ||
292 | |||
293 | if (mode1->hdisplay == mode2->hdisplay && | ||
294 | mode1->hsync_start == mode2->hsync_start && | ||
295 | mode1->hsync_end == mode2->hsync_end && | ||
296 | mode1->htotal == mode2->htotal && | ||
297 | mode1->hskew == mode2->hskew && | ||
298 | mode1->vdisplay == mode2->vdisplay && | ||
299 | mode1->vsync_start == mode2->vsync_start && | ||
300 | mode1->vsync_end == mode2->vsync_end && | ||
301 | mode1->vtotal == mode2->vtotal && | ||
302 | mode1->vscan == mode2->vscan && | ||
303 | mode1->flags == mode2->flags) | ||
304 | return true; | ||
305 | |||
306 | return false; | ||
307 | } | ||
308 | EXPORT_SYMBOL(drm_mode_equal); | ||
309 | |||
310 | /** | ||
311 | * drm_mode_validate_size - make sure modes adhere to size constraints | ||
312 | * @dev: DRM device | ||
313 | * @mode_list: list of modes to check | ||
314 | * @maxX: maximum width | ||
315 | * @maxY: maximum height | ||
316 | * @maxPitch: max pitch | ||
317 | * | ||
318 | * LOCKING: | ||
319 | * Caller must hold a lock protecting @mode_list. | ||
320 | * | ||
321 | * The DRM device (@dev) has size and pitch limits. Here we validate the | ||
322 | * modes we probed for @dev against those limits and set their status as | ||
323 | * necessary. | ||
324 | */ | ||
325 | void drm_mode_validate_size(struct drm_device *dev, | ||
326 | struct list_head *mode_list, | ||
327 | int maxX, int maxY, int maxPitch) | ||
328 | { | ||
329 | struct drm_display_mode *mode; | ||
330 | |||
331 | list_for_each_entry(mode, mode_list, head) { | ||
332 | if (maxPitch > 0 && mode->hdisplay > maxPitch) | ||
333 | mode->status = MODE_BAD_WIDTH; | ||
334 | |||
335 | if (maxX > 0 && mode->hdisplay > maxX) | ||
336 | mode->status = MODE_VIRTUAL_X; | ||
337 | |||
338 | if (maxY > 0 && mode->vdisplay > maxY) | ||
339 | mode->status = MODE_VIRTUAL_Y; | ||
340 | } | ||
341 | } | ||
342 | EXPORT_SYMBOL(drm_mode_validate_size); | ||
343 | |||
344 | /** | ||
345 | * drm_mode_validate_clocks - validate modes against clock limits | ||
346 | * @dev: DRM device | ||
347 | * @mode_list: list of modes to check | ||
348 | * @min: minimum clock rate array | ||
349 | * @max: maximum clock rate array | ||
350 | * @n_ranges: number of clock ranges (size of arrays) | ||
351 | * | ||
352 | * LOCKING: | ||
353 | * Caller must hold a lock protecting @mode_list. | ||
354 | * | ||
355 | * Some code may need to check a mode list against the clock limits of the | ||
356 | * device in question. This function walks the mode list, testing to make | ||
357 | * sure each mode falls within a given range (defined by @min and @max | ||
358 | * arrays) and sets @mode->status as needed. | ||
359 | */ | ||
360 | void drm_mode_validate_clocks(struct drm_device *dev, | ||
361 | struct list_head *mode_list, | ||
362 | int *min, int *max, int n_ranges) | ||
363 | { | ||
364 | struct drm_display_mode *mode; | ||
365 | int i; | ||
366 | |||
367 | list_for_each_entry(mode, mode_list, head) { | ||
368 | bool good = false; | ||
369 | for (i = 0; i < n_ranges; i++) { | ||
370 | if (mode->clock >= min[i] && mode->clock <= max[i]) { | ||
371 | good = true; | ||
372 | break; | ||
373 | } | ||
374 | } | ||
375 | if (!good) | ||
376 | mode->status = MODE_CLOCK_RANGE; | ||
377 | } | ||
378 | } | ||
379 | EXPORT_SYMBOL(drm_mode_validate_clocks); | ||
380 | |||
381 | /** | ||
382 | * drm_mode_prune_invalid - remove invalid modes from mode list | ||
383 | * @dev: DRM device | ||
384 | * @mode_list: list of modes to check | ||
385 | * @verbose: be verbose about it | ||
386 | * | ||
387 | * LOCKING: | ||
388 | * Caller must hold a lock protecting @mode_list. | ||
389 | * | ||
390 | * Once mode list generation is complete, a caller can use this routine to | ||
391 | * remove invalid modes from a mode list. If any of the modes have a | ||
392 | * status other than %MODE_OK, they are removed from @mode_list and freed. | ||
393 | */ | ||
394 | void drm_mode_prune_invalid(struct drm_device *dev, | ||
395 | struct list_head *mode_list, bool verbose) | ||
396 | { | ||
397 | struct drm_display_mode *mode, *t; | ||
398 | |||
399 | list_for_each_entry_safe(mode, t, mode_list, head) { | ||
400 | if (mode->status != MODE_OK) { | ||
401 | list_del(&mode->head); | ||
402 | if (verbose) { | ||
403 | drm_mode_debug_printmodeline(mode); | ||
404 | DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); | ||
405 | } | ||
406 | drm_mode_destroy(dev, mode); | ||
407 | } | ||
408 | } | ||
409 | } | ||
410 | EXPORT_SYMBOL(drm_mode_prune_invalid); | ||
411 | |||
412 | /** | ||
413 | * drm_mode_compare - compare modes for favorability | ||
414 | * @lh_a: list_head for first mode | ||
415 | * @lh_b: list_head for second mode | ||
416 | * | ||
417 | * LOCKING: | ||
418 | * None. | ||
419 | * | ||
420 | * Compare two modes, given by @lh_a and @lh_b, returning a value indicating | ||
421 | * which is better. | ||
422 | * | ||
423 | * RETURNS: | ||
424 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or | ||
425 | * positive if @lh_b is better than @lh_a. | ||
426 | */ | ||
427 | static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) | ||
428 | { | ||
429 | struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); | ||
430 | struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); | ||
431 | int diff; | ||
432 | |||
433 | diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) - | ||
434 | ((a->type & DRM_MODE_TYPE_PREFERRED) != 0); | ||
435 | if (diff) | ||
436 | return diff; | ||
437 | diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay; | ||
438 | if (diff) | ||
439 | return diff; | ||
440 | diff = b->clock - a->clock; | ||
441 | return diff; | ||
442 | } | ||
443 | |||
444 | /* FIXME: what we don't have a list sort function? */ | ||
445 | /* list sort from Mark J Roberts (mjr@znex.org) */ | ||
446 | void list_sort(struct list_head *head, | ||
447 | int (*cmp)(struct list_head *a, struct list_head *b)) | ||
448 | { | ||
449 | struct list_head *p, *q, *e, *list, *tail, *oldhead; | ||
450 | int insize, nmerges, psize, qsize, i; | ||
451 | |||
452 | list = head->next; | ||
453 | list_del(head); | ||
454 | insize = 1; | ||
455 | for (;;) { | ||
456 | p = oldhead = list; | ||
457 | list = tail = NULL; | ||
458 | nmerges = 0; | ||
459 | |||
460 | while (p) { | ||
461 | nmerges++; | ||
462 | q = p; | ||
463 | psize = 0; | ||
464 | for (i = 0; i < insize; i++) { | ||
465 | psize++; | ||
466 | q = q->next == oldhead ? NULL : q->next; | ||
467 | if (!q) | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | qsize = insize; | ||
472 | while (psize > 0 || (qsize > 0 && q)) { | ||
473 | if (!psize) { | ||
474 | e = q; | ||
475 | q = q->next; | ||
476 | qsize--; | ||
477 | if (q == oldhead) | ||
478 | q = NULL; | ||
479 | } else if (!qsize || !q) { | ||
480 | e = p; | ||
481 | p = p->next; | ||
482 | psize--; | ||
483 | if (p == oldhead) | ||
484 | p = NULL; | ||
485 | } else if (cmp(p, q) <= 0) { | ||
486 | e = p; | ||
487 | p = p->next; | ||
488 | psize--; | ||
489 | if (p == oldhead) | ||
490 | p = NULL; | ||
491 | } else { | ||
492 | e = q; | ||
493 | q = q->next; | ||
494 | qsize--; | ||
495 | if (q == oldhead) | ||
496 | q = NULL; | ||
497 | } | ||
498 | if (tail) | ||
499 | tail->next = e; | ||
500 | else | ||
501 | list = e; | ||
502 | e->prev = tail; | ||
503 | tail = e; | ||
504 | } | ||
505 | p = q; | ||
506 | } | ||
507 | |||
508 | tail->next = list; | ||
509 | list->prev = tail; | ||
510 | |||
511 | if (nmerges <= 1) | ||
512 | break; | ||
513 | |||
514 | insize *= 2; | ||
515 | } | ||
516 | |||
517 | head->next = list; | ||
518 | head->prev = list->prev; | ||
519 | list->prev->next = head; | ||
520 | list->prev = head; | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * drm_mode_sort - sort mode list | ||
525 | * @mode_list: list to sort | ||
526 | * | ||
527 | * LOCKING: | ||
528 | * Caller must hold a lock protecting @mode_list. | ||
529 | * | ||
530 | * Sort @mode_list by favorability, putting good modes first. | ||
531 | */ | ||
532 | void drm_mode_sort(struct list_head *mode_list) | ||
533 | { | ||
534 | list_sort(mode_list, drm_mode_compare); | ||
535 | } | ||
536 | EXPORT_SYMBOL(drm_mode_sort); | ||
537 | |||
538 | /** | ||
539 | * drm_mode_connector_list_update - update the mode list for the connector | ||
540 | * @connector: the connector to update | ||
541 | * | ||
542 | * LOCKING: | ||
543 | * Caller must hold a lock protecting @mode_list. | ||
544 | * | ||
545 | * This moves the modes from the @connector probed_modes list | ||
546 | * to the actual mode list. It compares the probed mode against the current | ||
547 | * list and only adds different modes. All modes unverified after this point | ||
548 | * will be removed by the prune invalid modes. | ||
549 | */ | ||
550 | void drm_mode_connector_list_update(struct drm_connector *connector) | ||
551 | { | ||
552 | struct drm_display_mode *mode; | ||
553 | struct drm_display_mode *pmode, *pt; | ||
554 | int found_it; | ||
555 | |||
556 | list_for_each_entry_safe(pmode, pt, &connector->probed_modes, | ||
557 | head) { | ||
558 | found_it = 0; | ||
559 | /* go through current modes checking for the new probed mode */ | ||
560 | list_for_each_entry(mode, &connector->modes, head) { | ||
561 | if (drm_mode_equal(pmode, mode)) { | ||
562 | found_it = 1; | ||
563 | /* if equal delete the probed mode */ | ||
564 | mode->status = pmode->status; | ||
565 | list_del(&pmode->head); | ||
566 | drm_mode_destroy(connector->dev, pmode); | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | if (!found_it) { | ||
572 | list_move_tail(&pmode->head, &connector->modes); | ||
573 | } | ||
574 | } | ||
575 | } | ||
576 | EXPORT_SYMBOL(drm_mode_connector_list_update); | ||
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index ae73b7f7249a..b756f043a5f4 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset, | |||
49 | int request, int *eof, void *data); | 49 | int request, int *eof, void *data); |
50 | static int drm_bufs_info(char *buf, char **start, off_t offset, | 50 | static int drm_bufs_info(char *buf, char **start, off_t offset, |
51 | int request, int *eof, void *data); | 51 | int request, int *eof, void *data); |
52 | static int drm_vblank_info(char *buf, char **start, off_t offset, | ||
53 | int request, int *eof, void *data); | ||
52 | static int drm_gem_name_info(char *buf, char **start, off_t offset, | 54 | static int drm_gem_name_info(char *buf, char **start, off_t offset, |
53 | int request, int *eof, void *data); | 55 | int request, int *eof, void *data); |
54 | static int drm_gem_object_info(char *buf, char **start, off_t offset, | 56 | static int drm_gem_object_info(char *buf, char **start, off_t offset, |
@@ -72,6 +74,7 @@ static struct drm_proc_list { | |||
72 | {"clients", drm_clients_info, 0}, | 74 | {"clients", drm_clients_info, 0}, |
73 | {"queues", drm_queues_info, 0}, | 75 | {"queues", drm_queues_info, 0}, |
74 | {"bufs", drm_bufs_info, 0}, | 76 | {"bufs", drm_bufs_info, 0}, |
77 | {"vblank", drm_vblank_info, 0}, | ||
75 | {"gem_names", drm_gem_name_info, DRIVER_GEM}, | 78 | {"gem_names", drm_gem_name_info, DRIVER_GEM}, |
76 | {"gem_objects", drm_gem_object_info, DRIVER_GEM}, | 79 | {"gem_objects", drm_gem_object_info, DRIVER_GEM}, |
77 | #if DRM_DEBUG_CODE | 80 | #if DRM_DEBUG_CODE |
@@ -195,6 +198,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, | |||
195 | int *eof, void *data) | 198 | int *eof, void *data) |
196 | { | 199 | { |
197 | struct drm_minor *minor = (struct drm_minor *) data; | 200 | struct drm_minor *minor = (struct drm_minor *) data; |
201 | struct drm_master *master = minor->master; | ||
198 | struct drm_device *dev = minor->dev; | 202 | struct drm_device *dev = minor->dev; |
199 | int len = 0; | 203 | int len = 0; |
200 | 204 | ||
@@ -203,13 +207,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, | |||
203 | return 0; | 207 | return 0; |
204 | } | 208 | } |
205 | 209 | ||
210 | if (!master) | ||
211 | return 0; | ||
212 | |||
206 | *start = &buf[offset]; | 213 | *start = &buf[offset]; |
207 | *eof = 0; | 214 | *eof = 0; |
208 | 215 | ||
209 | if (dev->unique) { | 216 | if (master->unique) { |
210 | DRM_PROC_PRINT("%s %s %s\n", | 217 | DRM_PROC_PRINT("%s %s %s\n", |
211 | dev->driver->pci_driver.name, | 218 | dev->driver->pci_driver.name, |
212 | pci_name(dev->pdev), dev->unique); | 219 | pci_name(dev->pdev), master->unique); |
213 | } else { | 220 | } else { |
214 | DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, | 221 | DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, |
215 | pci_name(dev->pdev)); | 222 | pci_name(dev->pdev)); |
@@ -454,6 +461,66 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, | |||
454 | } | 461 | } |
455 | 462 | ||
456 | /** | 463 | /** |
464 | * Called when "/proc/dri/.../vblank" is read. | ||
465 | * | ||
466 | * \param buf output buffer. | ||
467 | * \param start start of output data. | ||
468 | * \param offset requested start offset. | ||
469 | * \param request requested number of bytes. | ||
470 | * \param eof whether there is no more data to return. | ||
471 | * \param data private data. | ||
472 | * \return number of written bytes. | ||
473 | */ | ||
474 | static int drm__vblank_info(char *buf, char **start, off_t offset, int request, | ||
475 | int *eof, void *data) | ||
476 | { | ||
477 | struct drm_minor *minor = (struct drm_minor *) data; | ||
478 | struct drm_device *dev = minor->dev; | ||
479 | int len = 0; | ||
480 | int crtc; | ||
481 | |||
482 | if (offset > DRM_PROC_LIMIT) { | ||
483 | *eof = 1; | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | *start = &buf[offset]; | ||
488 | *eof = 0; | ||
489 | |||
490 | for (crtc = 0; crtc < dev->num_crtcs; crtc++) { | ||
491 | DRM_PROC_PRINT("CRTC %d enable: %d\n", | ||
492 | crtc, atomic_read(&dev->vblank_refcount[crtc])); | ||
493 | DRM_PROC_PRINT("CRTC %d counter: %d\n", | ||
494 | crtc, drm_vblank_count(dev, crtc)); | ||
495 | DRM_PROC_PRINT("CRTC %d last wait: %d\n", | ||
496 | crtc, dev->last_vblank_wait[crtc]); | ||
497 | DRM_PROC_PRINT("CRTC %d in modeset: %d\n", | ||
498 | crtc, dev->vblank_inmodeset[crtc]); | ||
499 | } | ||
500 | |||
501 | if (len > request + offset) | ||
502 | return request; | ||
503 | *eof = 1; | ||
504 | return len - offset; | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock. | ||
509 | */ | ||
510 | static int drm_vblank_info(char *buf, char **start, off_t offset, int request, | ||
511 | int *eof, void *data) | ||
512 | { | ||
513 | struct drm_minor *minor = (struct drm_minor *) data; | ||
514 | struct drm_device *dev = minor->dev; | ||
515 | int ret; | ||
516 | |||
517 | mutex_lock(&dev->struct_mutex); | ||
518 | ret = drm__vblank_info(buf, start, offset, request, eof, data); | ||
519 | mutex_unlock(&dev->struct_mutex); | ||
520 | return ret; | ||
521 | } | ||
522 | |||
523 | /** | ||
457 | * Called when "/proc/dri/.../clients" is read. | 524 | * Called when "/proc/dri/.../clients" is read. |
458 | * | 525 | * |
459 | * \param buf output buffer. | 526 | * \param buf output buffer. |
@@ -611,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, | |||
611 | *start = &buf[offset]; | 678 | *start = &buf[offset]; |
612 | *eof = 0; | 679 | *eof = 0; |
613 | 680 | ||
614 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", | 681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n", |
615 | atomic_read(&dev->vma_count), | 682 | atomic_read(&dev->vma_count), |
616 | high_memory, virt_to_phys(high_memory)); | 683 | high_memory, (u64)virt_to_phys(high_memory)); |
617 | list_for_each_entry(pt, &dev->vmalist, head) { | 684 | list_for_each_entry(pt, &dev->vmalist, head) { |
618 | if (!(vma = pt->vma)) | 685 | if (!(vma = pt->vma)) |
619 | continue; | 686 | continue; |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 66c96ec66672..46bb923b097c 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -57,6 +57,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type) | |||
57 | int ret; | 57 | int ret; |
58 | int base = 0, limit = 63; | 58 | int base = 0, limit = 63; |
59 | 59 | ||
60 | if (type == DRM_MINOR_CONTROL) { | ||
61 | base += 64; | ||
62 | limit = base + 127; | ||
63 | } else if (type == DRM_MINOR_RENDER) { | ||
64 | base += 128; | ||
65 | limit = base + 255; | ||
66 | } | ||
67 | |||
60 | again: | 68 | again: |
61 | if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { | 69 | if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { |
62 | DRM_ERROR("Out of memory expanding drawable idr\n"); | 70 | DRM_ERROR("Out of memory expanding drawable idr\n"); |
@@ -79,6 +87,112 @@ again: | |||
79 | return new_id; | 87 | return new_id; |
80 | } | 88 | } |
81 | 89 | ||
90 | struct drm_master *drm_master_create(struct drm_minor *minor) | ||
91 | { | ||
92 | struct drm_master *master; | ||
93 | |||
94 | master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); | ||
95 | if (!master) | ||
96 | return NULL; | ||
97 | |||
98 | kref_init(&master->refcount); | ||
99 | spin_lock_init(&master->lock.spinlock); | ||
100 | init_waitqueue_head(&master->lock.lock_queue); | ||
101 | drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); | ||
102 | INIT_LIST_HEAD(&master->magicfree); | ||
103 | master->minor = minor; | ||
104 | |||
105 | list_add_tail(&master->head, &minor->master_list); | ||
106 | |||
107 | return master; | ||
108 | } | ||
109 | |||
110 | struct drm_master *drm_master_get(struct drm_master *master) | ||
111 | { | ||
112 | kref_get(&master->refcount); | ||
113 | return master; | ||
114 | } | ||
115 | |||
116 | static void drm_master_destroy(struct kref *kref) | ||
117 | { | ||
118 | struct drm_master *master = container_of(kref, struct drm_master, refcount); | ||
119 | struct drm_magic_entry *pt, *next; | ||
120 | struct drm_device *dev = master->minor->dev; | ||
121 | struct drm_map_list *r_list, *list_temp; | ||
122 | |||
123 | list_del(&master->head); | ||
124 | |||
125 | if (dev->driver->master_destroy) | ||
126 | dev->driver->master_destroy(dev, master); | ||
127 | |||
128 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { | ||
129 | if (r_list->master == master) { | ||
130 | drm_rmmap_locked(dev, r_list->map); | ||
131 | r_list = NULL; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | if (master->unique) { | ||
136 | drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); | ||
137 | master->unique = NULL; | ||
138 | master->unique_len = 0; | ||
139 | } | ||
140 | |||
141 | list_for_each_entry_safe(pt, next, &master->magicfree, head) { | ||
142 | list_del(&pt->head); | ||
143 | drm_ht_remove_item(&master->magiclist, &pt->hash_item); | ||
144 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); | ||
145 | } | ||
146 | |||
147 | drm_ht_remove(&master->magiclist); | ||
148 | |||
149 | if (master->lock.hw_lock) { | ||
150 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
151 | dev->sigdata.lock = NULL; | ||
152 | master->lock.hw_lock = NULL; | ||
153 | master->lock.file_priv = NULL; | ||
154 | wake_up_interruptible(&master->lock.lock_queue); | ||
155 | } | ||
156 | |||
157 | drm_free(master, sizeof(*master), DRM_MEM_DRIVER); | ||
158 | } | ||
159 | |||
160 | void drm_master_put(struct drm_master **master) | ||
161 | { | ||
162 | kref_put(&(*master)->refcount, drm_master_destroy); | ||
163 | *master = NULL; | ||
164 | } | ||
165 | |||
166 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, | ||
167 | struct drm_file *file_priv) | ||
168 | { | ||
169 | if (file_priv->minor->master && file_priv->minor->master != file_priv->master) | ||
170 | return -EINVAL; | ||
171 | |||
172 | if (!file_priv->master) | ||
173 | return -EINVAL; | ||
174 | |||
175 | if (!file_priv->minor->master && | ||
176 | file_priv->minor->master != file_priv->master) { | ||
177 | mutex_lock(&dev->struct_mutex); | ||
178 | file_priv->minor->master = drm_master_get(file_priv->master); | ||
179 | mutex_lock(&dev->struct_mutex); | ||
180 | } | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | ||
186 | struct drm_file *file_priv) | ||
187 | { | ||
188 | if (!file_priv->master) | ||
189 | return -EINVAL; | ||
190 | mutex_lock(&dev->struct_mutex); | ||
191 | drm_master_put(&file_priv->minor->master); | ||
192 | mutex_unlock(&dev->struct_mutex); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
82 | static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | 196 | static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, |
83 | const struct pci_device_id *ent, | 197 | const struct pci_device_id *ent, |
84 | struct drm_driver *driver) | 198 | struct drm_driver *driver) |
@@ -92,7 +206,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
92 | 206 | ||
93 | spin_lock_init(&dev->count_lock); | 207 | spin_lock_init(&dev->count_lock); |
94 | spin_lock_init(&dev->drw_lock); | 208 | spin_lock_init(&dev->drw_lock); |
95 | spin_lock_init(&dev->lock.spinlock); | ||
96 | init_timer(&dev->timer); | 209 | init_timer(&dev->timer); |
97 | mutex_init(&dev->struct_mutex); | 210 | mutex_init(&dev->struct_mutex); |
98 | mutex_init(&dev->ctxlist_mutex); | 211 | mutex_init(&dev->ctxlist_mutex); |
@@ -140,9 +253,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
140 | } | 253 | } |
141 | } | 254 | } |
142 | 255 | ||
143 | if (dev->driver->load) | ||
144 | if ((retcode = dev->driver->load(dev, ent->driver_data))) | ||
145 | goto error_out_unreg; | ||
146 | 256 | ||
147 | retcode = drm_ctxbitmap_init(dev); | 257 | retcode = drm_ctxbitmap_init(dev); |
148 | if (retcode) { | 258 | if (retcode) { |
@@ -200,6 +310,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t | |||
200 | new_minor->device = MKDEV(DRM_MAJOR, minor_id); | 310 | new_minor->device = MKDEV(DRM_MAJOR, minor_id); |
201 | new_minor->dev = dev; | 311 | new_minor->dev = dev; |
202 | new_minor->index = minor_id; | 312 | new_minor->index = minor_id; |
313 | INIT_LIST_HEAD(&new_minor->master_list); | ||
203 | 314 | ||
204 | idr_replace(&drm_minors_idr, new_minor, minor_id); | 315 | idr_replace(&drm_minors_idr, new_minor, minor_id); |
205 | 316 | ||
@@ -267,8 +378,30 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
267 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | 378 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
268 | goto err_g2; | 379 | goto err_g2; |
269 | } | 380 | } |
381 | |||
382 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
383 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | ||
384 | if (ret) | ||
385 | goto err_g2; | ||
386 | } | ||
387 | |||
270 | if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) | 388 | if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) |
271 | goto err_g2; | 389 | goto err_g3; |
390 | |||
391 | if (dev->driver->load) { | ||
392 | ret = dev->driver->load(dev, ent->driver_data); | ||
393 | if (ret) | ||
394 | goto err_g3; | ||
395 | } | ||
396 | |||
397 | /* setup the grouping for the legacy output */ | ||
398 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
399 | ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); | ||
400 | if (ret) | ||
401 | goto err_g3; | ||
402 | } | ||
403 | |||
404 | list_add_tail(&dev->driver_item, &driver->device_list); | ||
272 | 405 | ||
273 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | 406 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", |
274 | driver->name, driver->major, driver->minor, driver->patchlevel, | 407 | driver->name, driver->major, driver->minor, driver->patchlevel, |
@@ -276,6 +409,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
276 | 409 | ||
277 | return 0; | 410 | return 0; |
278 | 411 | ||
412 | err_g3: | ||
413 | drm_put_minor(&dev->primary); | ||
279 | err_g2: | 414 | err_g2: |
280 | pci_disable_device(pdev); | 415 | pci_disable_device(pdev); |
281 | err_g1: | 416 | err_g1: |
@@ -297,11 +432,6 @@ int drm_put_dev(struct drm_device * dev) | |||
297 | { | 432 | { |
298 | DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); | 433 | DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); |
299 | 434 | ||
300 | if (dev->unique) { | ||
301 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); | ||
302 | dev->unique = NULL; | ||
303 | dev->unique_len = 0; | ||
304 | } | ||
305 | if (dev->devname) { | 435 | if (dev->devname) { |
306 | drm_free(dev->devname, strlen(dev->devname) + 1, | 436 | drm_free(dev->devname, strlen(dev->devname) + 1, |
307 | DRM_MEM_DRIVER); | 437 | DRM_MEM_DRIVER); |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 1611b9bcbe7f..5aa6780652aa 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "drmP.h" | 20 | #include "drmP.h" |
21 | 21 | ||
22 | #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) | 22 | #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) |
23 | #define to_drm_connector(d) container_of(d, struct drm_connector, kdev) | ||
23 | 24 | ||
24 | /** | 25 | /** |
25 | * drm_sysfs_suspend - DRM class suspend hook | 26 | * drm_sysfs_suspend - DRM class suspend hook |
@@ -34,7 +35,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state) | |||
34 | struct drm_minor *drm_minor = to_drm_minor(dev); | 35 | struct drm_minor *drm_minor = to_drm_minor(dev); |
35 | struct drm_device *drm_dev = drm_minor->dev; | 36 | struct drm_device *drm_dev = drm_minor->dev; |
36 | 37 | ||
37 | if (drm_dev->driver->suspend) | 38 | if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend) |
38 | return drm_dev->driver->suspend(drm_dev, state); | 39 | return drm_dev->driver->suspend(drm_dev, state); |
39 | 40 | ||
40 | return 0; | 41 | return 0; |
@@ -52,7 +53,7 @@ static int drm_sysfs_resume(struct device *dev) | |||
52 | struct drm_minor *drm_minor = to_drm_minor(dev); | 53 | struct drm_minor *drm_minor = to_drm_minor(dev); |
53 | struct drm_device *drm_dev = drm_minor->dev; | 54 | struct drm_device *drm_dev = drm_minor->dev; |
54 | 55 | ||
55 | if (drm_dev->driver->resume) | 56 | if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume) |
56 | return drm_dev->driver->resume(drm_dev); | 57 | return drm_dev->driver->resume(drm_dev); |
57 | 58 | ||
58 | return 0; | 59 | return 0; |
@@ -144,6 +145,323 @@ static void drm_sysfs_device_release(struct device *dev) | |||
144 | return; | 145 | return; |
145 | } | 146 | } |
146 | 147 | ||
148 | /* | ||
149 | * Connector properties | ||
150 | */ | ||
151 | static ssize_t status_show(struct device *device, | ||
152 | struct device_attribute *attr, | ||
153 | char *buf) | ||
154 | { | ||
155 | struct drm_connector *connector = to_drm_connector(device); | ||
156 | enum drm_connector_status status; | ||
157 | |||
158 | status = connector->funcs->detect(connector); | ||
159 | return snprintf(buf, PAGE_SIZE, "%s", | ||
160 | drm_get_connector_status_name(status)); | ||
161 | } | ||
162 | |||
163 | static ssize_t dpms_show(struct device *device, | ||
164 | struct device_attribute *attr, | ||
165 | char *buf) | ||
166 | { | ||
167 | struct drm_connector *connector = to_drm_connector(device); | ||
168 | struct drm_device *dev = connector->dev; | ||
169 | uint64_t dpms_status; | ||
170 | int ret; | ||
171 | |||
172 | ret = drm_connector_property_get_value(connector, | ||
173 | dev->mode_config.dpms_property, | ||
174 | &dpms_status); | ||
175 | if (ret) | ||
176 | return 0; | ||
177 | |||
178 | return snprintf(buf, PAGE_SIZE, "%s", | ||
179 | drm_get_dpms_name((int)dpms_status)); | ||
180 | } | ||
181 | |||
182 | static ssize_t enabled_show(struct device *device, | ||
183 | struct device_attribute *attr, | ||
184 | char *buf) | ||
185 | { | ||
186 | struct drm_connector *connector = to_drm_connector(device); | ||
187 | |||
188 | return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : | ||
189 | "disabled"); | ||
190 | } | ||
191 | |||
192 | static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr, | ||
193 | char *buf, loff_t off, size_t count) | ||
194 | { | ||
195 | struct device *connector_dev = container_of(kobj, struct device, kobj); | ||
196 | struct drm_connector *connector = to_drm_connector(connector_dev); | ||
197 | unsigned char *edid; | ||
198 | size_t size; | ||
199 | |||
200 | if (!connector->edid_blob_ptr) | ||
201 | return 0; | ||
202 | |||
203 | edid = connector->edid_blob_ptr->data; | ||
204 | size = connector->edid_blob_ptr->length; | ||
205 | if (!edid) | ||
206 | return 0; | ||
207 | |||
208 | if (off >= size) | ||
209 | return 0; | ||
210 | |||
211 | if (off + count > size) | ||
212 | count = size - off; | ||
213 | memcpy(buf, edid + off, count); | ||
214 | |||
215 | return count; | ||
216 | } | ||
217 | |||
218 | static ssize_t modes_show(struct device *device, | ||
219 | struct device_attribute *attr, | ||
220 | char *buf) | ||
221 | { | ||
222 | struct drm_connector *connector = to_drm_connector(device); | ||
223 | struct drm_display_mode *mode; | ||
224 | int written = 0; | ||
225 | |||
226 | list_for_each_entry(mode, &connector->modes, head) { | ||
227 | written += snprintf(buf + written, PAGE_SIZE - written, "%s\n", | ||
228 | mode->name); | ||
229 | } | ||
230 | |||
231 | return written; | ||
232 | } | ||
233 | |||
234 | static ssize_t subconnector_show(struct device *device, | ||
235 | struct device_attribute *attr, | ||
236 | char *buf) | ||
237 | { | ||
238 | struct drm_connector *connector = to_drm_connector(device); | ||
239 | struct drm_device *dev = connector->dev; | ||
240 | struct drm_property *prop = NULL; | ||
241 | uint64_t subconnector; | ||
242 | int is_tv = 0; | ||
243 | int ret; | ||
244 | |||
245 | switch (connector->connector_type) { | ||
246 | case DRM_MODE_CONNECTOR_DVII: | ||
247 | prop = dev->mode_config.dvi_i_subconnector_property; | ||
248 | break; | ||
249 | case DRM_MODE_CONNECTOR_Composite: | ||
250 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
251 | case DRM_MODE_CONNECTOR_Component: | ||
252 | prop = dev->mode_config.tv_subconnector_property; | ||
253 | is_tv = 1; | ||
254 | break; | ||
255 | default: | ||
256 | DRM_ERROR("Wrong connector type for this property\n"); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | if (!prop) { | ||
261 | DRM_ERROR("Unable to find subconnector property\n"); | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | ret = drm_connector_property_get_value(connector, prop, &subconnector); | ||
266 | if (ret) | ||
267 | return 0; | ||
268 | |||
269 | return snprintf(buf, PAGE_SIZE, "%s", is_tv ? | ||
270 | drm_get_tv_subconnector_name((int)subconnector) : | ||
271 | drm_get_dvi_i_subconnector_name((int)subconnector)); | ||
272 | } | ||
273 | |||
274 | static ssize_t select_subconnector_show(struct device *device, | ||
275 | struct device_attribute *attr, | ||
276 | char *buf) | ||
277 | { | ||
278 | struct drm_connector *connector = to_drm_connector(device); | ||
279 | struct drm_device *dev = connector->dev; | ||
280 | struct drm_property *prop = NULL; | ||
281 | uint64_t subconnector; | ||
282 | int is_tv = 0; | ||
283 | int ret; | ||
284 | |||
285 | switch (connector->connector_type) { | ||
286 | case DRM_MODE_CONNECTOR_DVII: | ||
287 | prop = dev->mode_config.dvi_i_select_subconnector_property; | ||
288 | break; | ||
289 | case DRM_MODE_CONNECTOR_Composite: | ||
290 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
291 | case DRM_MODE_CONNECTOR_Component: | ||
292 | prop = dev->mode_config.tv_select_subconnector_property; | ||
293 | is_tv = 1; | ||
294 | break; | ||
295 | default: | ||
296 | DRM_ERROR("Wrong connector type for this property\n"); | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | if (!prop) { | ||
301 | DRM_ERROR("Unable to find select subconnector property\n"); | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | ret = drm_connector_property_get_value(connector, prop, &subconnector); | ||
306 | if (ret) | ||
307 | return 0; | ||
308 | |||
309 | return snprintf(buf, PAGE_SIZE, "%s", is_tv ? | ||
310 | drm_get_tv_select_name((int)subconnector) : | ||
311 | drm_get_dvi_i_select_name((int)subconnector)); | ||
312 | } | ||
313 | |||
314 | static struct device_attribute connector_attrs[] = { | ||
315 | __ATTR_RO(status), | ||
316 | __ATTR_RO(enabled), | ||
317 | __ATTR_RO(dpms), | ||
318 | __ATTR_RO(modes), | ||
319 | }; | ||
320 | |||
321 | /* These attributes are for both DVI-I connectors and all types of tv-out. */ | ||
322 | static struct device_attribute connector_attrs_opt1[] = { | ||
323 | __ATTR_RO(subconnector), | ||
324 | __ATTR_RO(select_subconnector), | ||
325 | }; | ||
326 | |||
327 | static struct bin_attribute edid_attr = { | ||
328 | .attr.name = "edid", | ||
329 | .size = 128, | ||
330 | .read = edid_show, | ||
331 | }; | ||
332 | |||
333 | /** | ||
334 | * drm_sysfs_connector_add - add an connector to sysfs | ||
335 | * @connector: connector to add | ||
336 | * | ||
337 | * Create an connector device in sysfs, along with its associated connector | ||
338 | * properties (so far, connection status, dpms, mode list & edid) and | ||
339 | * generate a hotplug event so userspace knows there's a new connector | ||
340 | * available. | ||
341 | * | ||
342 | * Note: | ||
343 | * This routine should only be called *once* for each DRM minor registered. | ||
344 | * A second call for an already registered device will trigger the BUG_ON | ||
345 | * below. | ||
346 | */ | ||
347 | int drm_sysfs_connector_add(struct drm_connector *connector) | ||
348 | { | ||
349 | struct drm_device *dev = connector->dev; | ||
350 | int ret = 0, i, j; | ||
351 | |||
352 | /* We shouldn't get called more than once for the same connector */ | ||
353 | BUG_ON(device_is_registered(&connector->kdev)); | ||
354 | |||
355 | connector->kdev.parent = &dev->primary->kdev; | ||
356 | connector->kdev.class = drm_class; | ||
357 | connector->kdev.release = drm_sysfs_device_release; | ||
358 | |||
359 | DRM_DEBUG("adding \"%s\" to sysfs\n", | ||
360 | drm_get_connector_name(connector)); | ||
361 | |||
362 | snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s", | ||
363 | dev->primary->index, drm_get_connector_name(connector)); | ||
364 | ret = device_register(&connector->kdev); | ||
365 | |||
366 | if (ret) { | ||
367 | DRM_ERROR("failed to register connector device: %d\n", ret); | ||
368 | goto out; | ||
369 | } | ||
370 | |||
371 | /* Standard attributes */ | ||
372 | |||
373 | for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) { | ||
374 | ret = device_create_file(&connector->kdev, &connector_attrs[i]); | ||
375 | if (ret) | ||
376 | goto err_out_files; | ||
377 | } | ||
378 | |||
379 | /* Optional attributes */ | ||
380 | /* | ||
381 | * In the long run it maybe a good idea to make one set of | ||
382 | * optionals per connector type. | ||
383 | */ | ||
384 | switch (connector->connector_type) { | ||
385 | case DRM_MODE_CONNECTOR_DVII: | ||
386 | case DRM_MODE_CONNECTOR_Composite: | ||
387 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
388 | case DRM_MODE_CONNECTOR_Component: | ||
389 | for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { | ||
390 | ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); | ||
391 | if (ret) | ||
392 | goto err_out_files; | ||
393 | } | ||
394 | break; | ||
395 | default: | ||
396 | break; | ||
397 | } | ||
398 | |||
399 | ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr); | ||
400 | if (ret) | ||
401 | goto err_out_files; | ||
402 | |||
403 | /* Let userspace know we have a new connector */ | ||
404 | drm_sysfs_hotplug_event(dev); | ||
405 | |||
406 | return 0; | ||
407 | |||
408 | err_out_files: | ||
409 | if (i > 0) | ||
410 | for (j = 0; j < i; j++) | ||
411 | device_remove_file(&connector->kdev, | ||
412 | &connector_attrs[i]); | ||
413 | device_unregister(&connector->kdev); | ||
414 | |||
415 | out: | ||
416 | return ret; | ||
417 | } | ||
418 | EXPORT_SYMBOL(drm_sysfs_connector_add); | ||
419 | |||
420 | /** | ||
421 | * drm_sysfs_connector_remove - remove an connector device from sysfs | ||
422 | * @connector: connector to remove | ||
423 | * | ||
424 | * Remove @connector and its associated attributes from sysfs. Note that | ||
425 | * the device model core will take care of sending the "remove" uevent | ||
426 | * at this time, so we don't need to do it. | ||
427 | * | ||
428 | * Note: | ||
429 | * This routine should only be called if the connector was previously | ||
430 | * successfully registered. If @connector hasn't been registered yet, | ||
431 | * you'll likely see a panic somewhere deep in sysfs code when called. | ||
432 | */ | ||
433 | void drm_sysfs_connector_remove(struct drm_connector *connector) | ||
434 | { | ||
435 | int i; | ||
436 | |||
437 | DRM_DEBUG("removing \"%s\" from sysfs\n", | ||
438 | drm_get_connector_name(connector)); | ||
439 | |||
440 | for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) | ||
441 | device_remove_file(&connector->kdev, &connector_attrs[i]); | ||
442 | sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); | ||
443 | device_unregister(&connector->kdev); | ||
444 | } | ||
445 | EXPORT_SYMBOL(drm_sysfs_connector_remove); | ||
446 | |||
447 | /** | ||
448 | * drm_sysfs_hotplug_event - generate a DRM uevent | ||
449 | * @dev: DRM device | ||
450 | * | ||
451 | * Send a uevent for the DRM device specified by @dev. Currently we only | ||
452 | * set HOTPLUG=1 in the uevent environment, but this could be expanded to | ||
453 | * deal with other types of events. | ||
454 | */ | ||
455 | void drm_sysfs_hotplug_event(struct drm_device *dev) | ||
456 | { | ||
457 | char *event_string = "HOTPLUG=1"; | ||
458 | char *envp[] = { event_string, NULL }; | ||
459 | |||
460 | DRM_DEBUG("generating hotplug event\n"); | ||
461 | |||
462 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); | ||
463 | } | ||
464 | |||
147 | /** | 465 | /** |
148 | * drm_sysfs_device_add - adds a class device to sysfs for a character driver | 466 | * drm_sysfs_device_add - adds a class device to sysfs for a character driver |
149 | * @dev: DRM device to be added | 467 | * @dev: DRM device to be added |
@@ -163,9 +481,14 @@ int drm_sysfs_device_add(struct drm_minor *minor) | |||
163 | minor->kdev.class = drm_class; | 481 | minor->kdev.class = drm_class; |
164 | minor->kdev.release = drm_sysfs_device_release; | 482 | minor->kdev.release = drm_sysfs_device_release; |
165 | minor->kdev.devt = minor->device; | 483 | minor->kdev.devt = minor->device; |
166 | minor_str = "card%d"; | 484 | if (minor->type == DRM_MINOR_CONTROL) |
167 | 485 | minor_str = "controlD%d"; | |
168 | snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); | 486 | else if (minor->type == DRM_MINOR_RENDER) |
487 | minor_str = "renderD%d"; | ||
488 | else | ||
489 | minor_str = "card%d"; | ||
490 | |||
491 | dev_set_name(&minor->kdev, minor_str, minor->index); | ||
169 | 492 | ||
170 | err = device_register(&minor->kdev); | 493 | err = device_register(&minor->kdev); |
171 | if (err) { | 494 | if (err) { |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c234c6f24a8d..3ffae021d280 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
267 | dmah.size = map->size; | 267 | dmah.size = map->size; |
268 | __drm_pci_free(dev, &dmah); | 268 | __drm_pci_free(dev, &dmah); |
269 | break; | 269 | break; |
270 | case _DRM_GEM: | ||
271 | DRM_ERROR("tried to rmmap GEM object\n"); | ||
272 | break; | ||
270 | } | 273 | } |
271 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 274 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
272 | } | 275 | } |
@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { | |||
399 | * Create a new drm_vma_entry structure as the \p vma private data entry and | 402 | * Create a new drm_vma_entry structure as the \p vma private data entry and |
400 | * add it to drm_device::vmalist. | 403 | * add it to drm_device::vmalist. |
401 | */ | 404 | */ |
402 | static void drm_vm_open_locked(struct vm_area_struct *vma) | 405 | void drm_vm_open_locked(struct vm_area_struct *vma) |
403 | { | 406 | { |
404 | struct drm_file *priv = vma->vm_file->private_data; | 407 | struct drm_file *priv = vma->vm_file->private_data; |
405 | struct drm_device *dev = priv->minor->dev; | 408 | struct drm_device *dev = priv->minor->dev; |
@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); | |||
540 | * according to the mapping type and remaps the pages. Finally sets the file | 543 | * according to the mapping type and remaps the pages. Finally sets the file |
541 | * pointer and calls vm_open(). | 544 | * pointer and calls vm_open(). |
542 | */ | 545 | */ |
543 | static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | 546 | int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) |
544 | { | 547 | { |
545 | struct drm_file *priv = filp->private_data; | 548 | struct drm_file *priv = filp->private_data; |
546 | struct drm_device *dev = priv->minor->dev; | 549 | struct drm_device *dev = priv->minor->dev; |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index d8fb5d8ee7ea..793cba39d832 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -8,7 +8,23 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
8 | i915_gem.o \ | 8 | i915_gem.o \ |
9 | i915_gem_debug.o \ | 9 | i915_gem_debug.o \ |
10 | i915_gem_proc.o \ | 10 | i915_gem_proc.o \ |
11 | i915_gem_tiling.o | 11 | i915_gem_tiling.o \ |
12 | intel_display.o \ | ||
13 | intel_crt.o \ | ||
14 | intel_lvds.o \ | ||
15 | intel_bios.o \ | ||
16 | intel_hdmi.o \ | ||
17 | intel_sdvo.o \ | ||
18 | intel_modes.o \ | ||
19 | intel_i2c.o \ | ||
20 | intel_fb.o \ | ||
21 | intel_tv.o \ | ||
22 | intel_dvo.o \ | ||
23 | dvo_ch7xxx.o \ | ||
24 | dvo_ch7017.o \ | ||
25 | dvo_ivch.o \ | ||
26 | dvo_tfp410.o \ | ||
27 | dvo_sil164.o | ||
12 | 28 | ||
13 | i915-$(CONFIG_ACPI) += i915_opregion.o | 29 | i915-$(CONFIG_ACPI) += i915_opregion.o |
14 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 30 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h new file mode 100644 index 000000000000..e747ac42fe3a --- /dev/null +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Eric Anholt | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _INTEL_DVO_H | ||
24 | #define _INTEL_DVO_H | ||
25 | |||
26 | #include <linux/i2c.h> | ||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "drm_crtc.h" | ||
30 | #include "intel_drv.h" | ||
31 | |||
32 | struct intel_dvo_device { | ||
33 | char *name; | ||
34 | int type; | ||
35 | /* DVOA/B/C output register */ | ||
36 | u32 dvo_reg; | ||
37 | /* GPIO register used for i2c bus to control this device */ | ||
38 | u32 gpio; | ||
39 | int slave_addr; | ||
40 | struct intel_i2c_chan *i2c_bus; | ||
41 | |||
42 | const struct intel_dvo_dev_ops *dev_ops; | ||
43 | void *dev_priv; | ||
44 | |||
45 | struct drm_display_mode *panel_fixed_mode; | ||
46 | bool panel_wants_dither; | ||
47 | }; | ||
48 | |||
49 | struct intel_dvo_dev_ops { | ||
50 | /* | ||
51 | * Initialize the device at startup time. | ||
52 | * Returns NULL if the device does not exist. | ||
53 | */ | ||
54 | bool (*init)(struct intel_dvo_device *dvo, | ||
55 | struct intel_i2c_chan *i2cbus); | ||
56 | |||
57 | /* | ||
58 | * Called to allow the output a chance to create properties after the | ||
59 | * RandR objects have been created. | ||
60 | */ | ||
61 | void (*create_resources)(struct intel_dvo_device *dvo); | ||
62 | |||
63 | /* | ||
64 | * Turn on/off output or set intermediate power levels if available. | ||
65 | * | ||
66 | * Unsupported intermediate modes drop to the lower power setting. | ||
67 | * If the mode is DPMSModeOff, the output must be disabled, | ||
68 | * as the DPLL may be disabled afterwards. | ||
69 | */ | ||
70 | void (*dpms)(struct intel_dvo_device *dvo, int mode); | ||
71 | |||
72 | /* | ||
73 | * Saves the output's state for restoration on VT switch. | ||
74 | */ | ||
75 | void (*save)(struct intel_dvo_device *dvo); | ||
76 | |||
77 | /* | ||
78 | * Restore's the output's state at VT switch. | ||
79 | */ | ||
80 | void (*restore)(struct intel_dvo_device *dvo); | ||
81 | |||
82 | /* | ||
83 | * Callback for testing a video mode for a given output. | ||
84 | * | ||
85 | * This function should only check for cases where a mode can't | ||
86 | * be supported on the output specifically, and not represent | ||
87 | * generic CRTC limitations. | ||
88 | * | ||
89 | * \return MODE_OK if the mode is valid, or another MODE_* otherwise. | ||
90 | */ | ||
91 | int (*mode_valid)(struct intel_dvo_device *dvo, | ||
92 | struct drm_display_mode *mode); | ||
93 | |||
94 | /* | ||
95 | * Callback to adjust the mode to be set in the CRTC. | ||
96 | * | ||
97 | * This allows an output to adjust the clock or even the entire set of | ||
98 | * timings, which is used for panels with fixed timings or for | ||
99 | * buses with clock limitations. | ||
100 | */ | ||
101 | bool (*mode_fixup)(struct intel_dvo_device *dvo, | ||
102 | struct drm_display_mode *mode, | ||
103 | struct drm_display_mode *adjusted_mode); | ||
104 | |||
105 | /* | ||
106 | * Callback for preparing mode changes on an output | ||
107 | */ | ||
108 | void (*prepare)(struct intel_dvo_device *dvo); | ||
109 | |||
110 | /* | ||
111 | * Callback for committing mode changes on an output | ||
112 | */ | ||
113 | void (*commit)(struct intel_dvo_device *dvo); | ||
114 | |||
115 | /* | ||
116 | * Callback for setting up a video mode after fixups have been made. | ||
117 | * | ||
118 | * This is only called while the output is disabled. The dpms callback | ||
119 | * must be all that's necessary for the output, to turn the output on | ||
120 | * after this function is called. | ||
121 | */ | ||
122 | void (*mode_set)(struct intel_dvo_device *dvo, | ||
123 | struct drm_display_mode *mode, | ||
124 | struct drm_display_mode *adjusted_mode); | ||
125 | |||
126 | /* | ||
127 | * Probe for a connected output, and return detect_status. | ||
128 | */ | ||
129 | enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); | ||
130 | |||
131 | /** | ||
132 | * Query the device for the modes it provides. | ||
133 | * | ||
134 | * This function may also update MonInfo, mm_width, and mm_height. | ||
135 | * | ||
136 | * \return singly-linked list of modes or NULL if no modes found. | ||
137 | */ | ||
138 | struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); | ||
139 | |||
140 | /** | ||
141 | * Clean up driver-specific bits of the output | ||
142 | */ | ||
143 | void (*destroy) (struct intel_dvo_device *dvo); | ||
144 | |||
145 | /** | ||
146 | * Debugging hook to dump device registers to log file | ||
147 | */ | ||
148 | void (*dump_regs)(struct intel_dvo_device *dvo); | ||
149 | }; | ||
150 | |||
151 | extern struct intel_dvo_dev_ops sil164_ops; | ||
152 | extern struct intel_dvo_dev_ops ch7xxx_ops; | ||
153 | extern struct intel_dvo_dev_ops ivch_ops; | ||
154 | extern struct intel_dvo_dev_ops tfp410_ops; | ||
155 | extern struct intel_dvo_dev_ops ch7017_ops; | ||
156 | |||
157 | #endif /* _INTEL_DVO_H */ | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c new file mode 100644 index 000000000000..03d4b4973b02 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -0,0 +1,454 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include "dvo.h" | ||
29 | |||
30 | #define CH7017_TV_DISPLAY_MODE 0x00 | ||
31 | #define CH7017_FLICKER_FILTER 0x01 | ||
32 | #define CH7017_VIDEO_BANDWIDTH 0x02 | ||
33 | #define CH7017_TEXT_ENHANCEMENT 0x03 | ||
34 | #define CH7017_START_ACTIVE_VIDEO 0x04 | ||
35 | #define CH7017_HORIZONTAL_POSITION 0x05 | ||
36 | #define CH7017_VERTICAL_POSITION 0x06 | ||
37 | #define CH7017_BLACK_LEVEL 0x07 | ||
38 | #define CH7017_CONTRAST_ENHANCEMENT 0x08 | ||
39 | #define CH7017_TV_PLL 0x09 | ||
40 | #define CH7017_TV_PLL_M 0x0a | ||
41 | #define CH7017_TV_PLL_N 0x0b | ||
42 | #define CH7017_SUB_CARRIER_0 0x0c | ||
43 | #define CH7017_CIV_CONTROL 0x10 | ||
44 | #define CH7017_CIV_0 0x11 | ||
45 | #define CH7017_CHROMA_BOOST 0x14 | ||
46 | #define CH7017_CLOCK_MODE 0x1c | ||
47 | #define CH7017_INPUT_CLOCK 0x1d | ||
48 | #define CH7017_GPIO_CONTROL 0x1e | ||
49 | #define CH7017_INPUT_DATA_FORMAT 0x1f | ||
50 | #define CH7017_CONNECTION_DETECT 0x20 | ||
51 | #define CH7017_DAC_CONTROL 0x21 | ||
52 | #define CH7017_BUFFERED_CLOCK_OUTPUT 0x22 | ||
53 | #define CH7017_DEFEAT_VSYNC 0x47 | ||
54 | #define CH7017_TEST_PATTERN 0x48 | ||
55 | |||
56 | #define CH7017_POWER_MANAGEMENT 0x49 | ||
57 | /** Enables the TV output path. */ | ||
58 | #define CH7017_TV_EN (1 << 0) | ||
59 | #define CH7017_DAC0_POWER_DOWN (1 << 1) | ||
60 | #define CH7017_DAC1_POWER_DOWN (1 << 2) | ||
61 | #define CH7017_DAC2_POWER_DOWN (1 << 3) | ||
62 | #define CH7017_DAC3_POWER_DOWN (1 << 4) | ||
63 | /** Powers down the TV out block, and DAC0-3 */ | ||
64 | #define CH7017_TV_POWER_DOWN_EN (1 << 5) | ||
65 | |||
66 | #define CH7017_VERSION_ID 0x4a | ||
67 | |||
68 | #define CH7017_DEVICE_ID 0x4b | ||
69 | #define CH7017_DEVICE_ID_VALUE 0x1b | ||
70 | #define CH7018_DEVICE_ID_VALUE 0x1a | ||
71 | #define CH7019_DEVICE_ID_VALUE 0x19 | ||
72 | |||
73 | #define CH7017_XCLK_D2_ADJUST 0x53 | ||
74 | #define CH7017_UP_SCALER_COEFF_0 0x55 | ||
75 | #define CH7017_UP_SCALER_COEFF_1 0x56 | ||
76 | #define CH7017_UP_SCALER_COEFF_2 0x57 | ||
77 | #define CH7017_UP_SCALER_COEFF_3 0x58 | ||
78 | #define CH7017_UP_SCALER_COEFF_4 0x59 | ||
79 | #define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a | ||
80 | #define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b | ||
81 | #define CH7017_GPIO_INVERT 0x5c | ||
82 | #define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d | ||
83 | #define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e | ||
84 | |||
85 | #define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f | ||
86 | /**< Low bits of horizontal active pixel input */ | ||
87 | |||
88 | #define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60 | ||
89 | /** High bits of horizontal active pixel input */ | ||
90 | #define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0) | ||
91 | /** High bits of vertical active line output */ | ||
92 | #define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3) | ||
93 | |||
94 | #define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61 | ||
95 | /**< Low bits of vertical active line output */ | ||
96 | |||
97 | #define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62 | ||
98 | /**< Low bits of horizontal active pixel output */ | ||
99 | |||
100 | #define CH7017_LVDS_POWER_DOWN 0x63 | ||
101 | /** High bits of horizontal active pixel output */ | ||
102 | #define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0) | ||
103 | /** Enables the LVDS power down state transition */ | ||
104 | #define CH7017_LVDS_POWER_DOWN_EN (1 << 6) | ||
105 | /** Enables the LVDS upscaler */ | ||
106 | #define CH7017_LVDS_UPSCALER_EN (1 << 7) | ||
107 | #define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08 | ||
108 | |||
109 | #define CH7017_LVDS_ENCODING 0x64 | ||
110 | #define CH7017_LVDS_DITHER_2D (1 << 2) | ||
111 | #define CH7017_LVDS_DITHER_DIS (1 << 3) | ||
112 | #define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4) | ||
113 | #define CH7017_LVDS_24_BIT (1 << 5) | ||
114 | |||
115 | #define CH7017_LVDS_ENCODING_2 0x65 | ||
116 | |||
117 | #define CH7017_LVDS_PLL_CONTROL 0x66 | ||
118 | /** Enables the LVDS panel output path */ | ||
119 | #define CH7017_LVDS_PANEN (1 << 0) | ||
120 | /** Enables the LVDS panel backlight */ | ||
121 | #define CH7017_LVDS_BKLEN (1 << 3) | ||
122 | |||
123 | #define CH7017_POWER_SEQUENCING_T1 0x67 | ||
124 | #define CH7017_POWER_SEQUENCING_T2 0x68 | ||
125 | #define CH7017_POWER_SEQUENCING_T3 0x69 | ||
126 | #define CH7017_POWER_SEQUENCING_T4 0x6a | ||
127 | #define CH7017_POWER_SEQUENCING_T5 0x6b | ||
128 | #define CH7017_GPIO_DRIVER_TYPE 0x6c | ||
129 | #define CH7017_GPIO_DATA 0x6d | ||
130 | #define CH7017_GPIO_DIRECTION_CONTROL 0x6e | ||
131 | |||
132 | #define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71 | ||
133 | # define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4 | ||
134 | # define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0 | ||
135 | # define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80 | ||
136 | |||
137 | #define CH7017_LVDS_PLL_VCO_CONTROL 0x72 | ||
138 | # define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80 | ||
139 | # define CH7017_LVDS_PLL_VCO_SHIFT 4 | ||
140 | # define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0 | ||
141 | |||
142 | #define CH7017_OUTPUTS_ENABLE 0x73 | ||
143 | # define CH7017_CHARGE_PUMP_LOW 0x0 | ||
144 | # define CH7017_CHARGE_PUMP_HIGH 0x3 | ||
145 | # define CH7017_LVDS_CHANNEL_A (1 << 3) | ||
146 | # define CH7017_LVDS_CHANNEL_B (1 << 4) | ||
147 | # define CH7017_TV_DAC_A (1 << 5) | ||
148 | # define CH7017_TV_DAC_B (1 << 6) | ||
149 | # define CH7017_DDC_SELECT_DC2 (1 << 7) | ||
150 | |||
151 | #define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74 | ||
152 | #define CH7017_LVDS_PLL_EMI_REDUCTION 0x75 | ||
153 | #define CH7017_LVDS_POWER_DOWN_FLICKER 0x76 | ||
154 | |||
155 | #define CH7017_LVDS_CONTROL_2 0x78 | ||
156 | # define CH7017_LOOP_FILTER_SHIFT 5 | ||
157 | # define CH7017_PHASE_DETECTOR_SHIFT 0 | ||
158 | |||
159 | #define CH7017_BANG_LIMIT_CONTROL 0x7f | ||
160 | |||
161 | struct ch7017_priv { | ||
162 | uint8_t save_hapi; | ||
163 | uint8_t save_vali; | ||
164 | uint8_t save_valo; | ||
165 | uint8_t save_ailo; | ||
166 | uint8_t save_lvds_pll_vco; | ||
167 | uint8_t save_feedback_div; | ||
168 | uint8_t save_lvds_control_2; | ||
169 | uint8_t save_outputs_enable; | ||
170 | uint8_t save_lvds_power_down; | ||
171 | uint8_t save_power_management; | ||
172 | }; | ||
173 | |||
174 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); | ||
175 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); | ||
176 | |||
177 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | ||
178 | { | ||
179 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
180 | u8 out_buf[2]; | ||
181 | u8 in_buf[2]; | ||
182 | |||
183 | struct i2c_msg msgs[] = { | ||
184 | { | ||
185 | .addr = i2cbus->slave_addr, | ||
186 | .flags = 0, | ||
187 | .len = 1, | ||
188 | .buf = out_buf, | ||
189 | }, | ||
190 | { | ||
191 | .addr = i2cbus->slave_addr, | ||
192 | .flags = I2C_M_RD, | ||
193 | .len = 1, | ||
194 | .buf = in_buf, | ||
195 | } | ||
196 | }; | ||
197 | |||
198 | out_buf[0] = addr; | ||
199 | out_buf[1] = 0; | ||
200 | |||
201 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | ||
202 | *val= in_buf[0]; | ||
203 | return true; | ||
204 | }; | ||
205 | |||
206 | return false; | ||
207 | } | ||
208 | |||
209 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | ||
210 | { | ||
211 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
212 | uint8_t out_buf[2]; | ||
213 | struct i2c_msg msg = { | ||
214 | .addr = i2cbus->slave_addr, | ||
215 | .flags = 0, | ||
216 | .len = 2, | ||
217 | .buf = out_buf, | ||
218 | }; | ||
219 | |||
220 | out_buf[0] = addr; | ||
221 | out_buf[1] = val; | ||
222 | |||
223 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
224 | return true; | ||
225 | |||
226 | return false; | ||
227 | } | ||
228 | |||
229 | /** Probes for a CH7017 on the given bus and slave address. */ | ||
230 | static bool ch7017_init(struct intel_dvo_device *dvo, | ||
231 | struct intel_i2c_chan *i2cbus) | ||
232 | { | ||
233 | struct ch7017_priv *priv; | ||
234 | uint8_t val; | ||
235 | |||
236 | priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); | ||
237 | if (priv == NULL) | ||
238 | return false; | ||
239 | |||
240 | dvo->i2c_bus = i2cbus; | ||
241 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
242 | dvo->dev_priv = priv; | ||
243 | |||
244 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) | ||
245 | goto fail; | ||
246 | |||
247 | if (val != CH7017_DEVICE_ID_VALUE && | ||
248 | val != CH7018_DEVICE_ID_VALUE && | ||
249 | val != CH7019_DEVICE_ID_VALUE) { | ||
250 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", | ||
251 | val, i2cbus->adapter.name,i2cbus->slave_addr); | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | return true; | ||
256 | fail: | ||
257 | kfree(priv); | ||
258 | return false; | ||
259 | } | ||
260 | |||
261 | static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) | ||
262 | { | ||
263 | return connector_status_unknown; | ||
264 | } | ||
265 | |||
266 | static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, | ||
267 | struct drm_display_mode *mode) | ||
268 | { | ||
269 | if (mode->clock > 160000) | ||
270 | return MODE_CLOCK_HIGH; | ||
271 | |||
272 | return MODE_OK; | ||
273 | } | ||
274 | |||
275 | static void ch7017_mode_set(struct intel_dvo_device *dvo, | ||
276 | struct drm_display_mode *mode, | ||
277 | struct drm_display_mode *adjusted_mode) | ||
278 | { | ||
279 | uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; | ||
280 | uint8_t outputs_enable, lvds_control_2, lvds_power_down; | ||
281 | uint8_t horizontal_active_pixel_input; | ||
282 | uint8_t horizontal_active_pixel_output, vertical_active_line_output; | ||
283 | uint8_t active_input_line_output; | ||
284 | |||
285 | DRM_DEBUG("Registers before mode setting\n"); | ||
286 | ch7017_dump_regs(dvo); | ||
287 | |||
288 | /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ | ||
289 | if (mode->clock < 100000) { | ||
290 | outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW; | ||
291 | lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | | ||
292 | (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | | ||
293 | (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); | ||
294 | lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | | ||
295 | (2 << CH7017_LVDS_PLL_VCO_SHIFT) | | ||
296 | (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); | ||
297 | lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) | | ||
298 | (0 << CH7017_PHASE_DETECTOR_SHIFT); | ||
299 | } else { | ||
300 | outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH; | ||
301 | lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | | ||
302 | (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | | ||
303 | (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); | ||
304 | lvds_pll_feedback_div = 35; | ||
305 | lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) | | ||
306 | (0 << CH7017_PHASE_DETECTOR_SHIFT); | ||
307 | if (1) { /* XXX: dual channel panel detection. Assume yes for now. */ | ||
308 | outputs_enable |= CH7017_LVDS_CHANNEL_B; | ||
309 | lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | | ||
310 | (2 << CH7017_LVDS_PLL_VCO_SHIFT) | | ||
311 | (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); | ||
312 | } else { | ||
313 | lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | | ||
314 | (1 << CH7017_LVDS_PLL_VCO_SHIFT) | | ||
315 | (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | horizontal_active_pixel_input = mode->hdisplay & 0x00ff; | ||
320 | |||
321 | vertical_active_line_output = mode->vdisplay & 0x00ff; | ||
322 | horizontal_active_pixel_output = mode->hdisplay & 0x00ff; | ||
323 | |||
324 | active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) | | ||
325 | (((mode->vdisplay & 0x0700) >> 8) << 3); | ||
326 | |||
327 | lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | | ||
328 | (mode->hdisplay & 0x0700) >> 8; | ||
329 | |||
330 | ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); | ||
331 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, | ||
332 | horizontal_active_pixel_input); | ||
333 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, | ||
334 | horizontal_active_pixel_output); | ||
335 | ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, | ||
336 | vertical_active_line_output); | ||
337 | ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, | ||
338 | active_input_line_output); | ||
339 | ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control); | ||
340 | ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div); | ||
341 | ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2); | ||
342 | ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable); | ||
343 | |||
344 | /* Turn the LVDS back on with new settings. */ | ||
345 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); | ||
346 | |||
347 | DRM_DEBUG("Registers after mode setting\n"); | ||
348 | ch7017_dump_regs(dvo); | ||
349 | } | ||
350 | |||
351 | /* set the CH7017 power state */ | ||
352 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) | ||
353 | { | ||
354 | uint8_t val; | ||
355 | |||
356 | ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); | ||
357 | |||
358 | /* Turn off TV/VGA, and never turn it on since we don't support it. */ | ||
359 | ch7017_write(dvo, CH7017_POWER_MANAGEMENT, | ||
360 | CH7017_DAC0_POWER_DOWN | | ||
361 | CH7017_DAC1_POWER_DOWN | | ||
362 | CH7017_DAC2_POWER_DOWN | | ||
363 | CH7017_DAC3_POWER_DOWN | | ||
364 | CH7017_TV_POWER_DOWN_EN); | ||
365 | |||
366 | if (mode == DRM_MODE_DPMS_ON) { | ||
367 | /* Turn on the LVDS */ | ||
368 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, | ||
369 | val & ~CH7017_LVDS_POWER_DOWN_EN); | ||
370 | } else { | ||
371 | /* Turn off the LVDS */ | ||
372 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, | ||
373 | val | CH7017_LVDS_POWER_DOWN_EN); | ||
374 | } | ||
375 | |||
376 | /* XXX: Should actually wait for update power status somehow */ | ||
377 | udelay(20000); | ||
378 | } | ||
379 | |||
380 | static void ch7017_dump_regs(struct intel_dvo_device *dvo) | ||
381 | { | ||
382 | uint8_t val; | ||
383 | |||
384 | #define DUMP(reg) \ | ||
385 | do { \ | ||
386 | ch7017_read(dvo, reg, &val); \ | ||
387 | DRM_DEBUG(#reg ": %02x\n", val); \ | ||
388 | } while (0) | ||
389 | |||
390 | DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); | ||
391 | DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT); | ||
392 | DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT); | ||
393 | DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT); | ||
394 | DUMP(CH7017_LVDS_PLL_VCO_CONTROL); | ||
395 | DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV); | ||
396 | DUMP(CH7017_LVDS_CONTROL_2); | ||
397 | DUMP(CH7017_OUTPUTS_ENABLE); | ||
398 | DUMP(CH7017_LVDS_POWER_DOWN); | ||
399 | } | ||
400 | |||
401 | static void ch7017_save(struct intel_dvo_device *dvo) | ||
402 | { | ||
403 | struct ch7017_priv *priv = dvo->dev_priv; | ||
404 | |||
405 | ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); | ||
406 | ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); | ||
407 | ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); | ||
408 | ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); | ||
409 | ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); | ||
410 | ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); | ||
411 | ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); | ||
412 | ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); | ||
413 | ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); | ||
414 | } | ||
415 | |||
416 | static void ch7017_restore(struct intel_dvo_device *dvo) | ||
417 | { | ||
418 | struct ch7017_priv *priv = dvo->dev_priv; | ||
419 | |||
420 | /* Power down before changing mode */ | ||
421 | ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); | ||
422 | |||
423 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); | ||
424 | ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); | ||
425 | ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); | ||
426 | ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); | ||
427 | ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); | ||
428 | ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); | ||
429 | ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); | ||
430 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); | ||
431 | ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); | ||
432 | } | ||
433 | |||
434 | static void ch7017_destroy(struct intel_dvo_device *dvo) | ||
435 | { | ||
436 | struct ch7017_priv *priv = dvo->dev_priv; | ||
437 | |||
438 | if (priv) { | ||
439 | kfree(priv); | ||
440 | dvo->dev_priv = NULL; | ||
441 | } | ||
442 | } | ||
443 | |||
444 | struct intel_dvo_dev_ops ch7017_ops = { | ||
445 | .init = ch7017_init, | ||
446 | .detect = ch7017_detect, | ||
447 | .mode_valid = ch7017_mode_valid, | ||
448 | .mode_set = ch7017_mode_set, | ||
449 | .dpms = ch7017_dpms, | ||
450 | .dump_regs = ch7017_dump_regs, | ||
451 | .save = ch7017_save, | ||
452 | .restore = ch7017_restore, | ||
453 | .destroy = ch7017_destroy, | ||
454 | }; | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c new file mode 100644 index 000000000000..d2fd95dbd034 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /************************************************************************** | ||
2 | |||
3 | Copyright © 2006 Dave Airlie | ||
4 | |||
5 | All Rights Reserved. | ||
6 | |||
7 | Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | copy of this software and associated documentation files (the | ||
9 | "Software"), to deal in the Software without restriction, including | ||
10 | without limitation the rights to use, copy, modify, merge, publish, | ||
11 | distribute, sub license, and/or sell copies of the Software, and to | ||
12 | permit persons to whom the Software is furnished to do so, subject to | ||
13 | the following conditions: | ||
14 | |||
15 | The above copyright notice and this permission notice (including the | ||
16 | next paragraph) shall be included in all copies or substantial portions | ||
17 | of the Software. | ||
18 | |||
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
22 | IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
23 | ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
24 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
25 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | |||
27 | **************************************************************************/ | ||
28 | |||
29 | #include "dvo.h" | ||
30 | |||
31 | #define CH7xxx_REG_VID 0x4a | ||
32 | #define CH7xxx_REG_DID 0x4b | ||
33 | |||
34 | #define CH7011_VID 0x83 /* 7010 as well */ | ||
35 | #define CH7009A_VID 0x84 | ||
36 | #define CH7009B_VID 0x85 | ||
37 | #define CH7301_VID 0x95 | ||
38 | |||
39 | #define CH7xxx_VID 0x84 | ||
40 | #define CH7xxx_DID 0x17 | ||
41 | |||
42 | #define CH7xxx_NUM_REGS 0x4c | ||
43 | |||
44 | #define CH7xxx_CM 0x1c | ||
45 | #define CH7xxx_CM_XCM (1<<0) | ||
46 | #define CH7xxx_CM_MCP (1<<2) | ||
47 | #define CH7xxx_INPUT_CLOCK 0x1d | ||
48 | #define CH7xxx_GPIO 0x1e | ||
49 | #define CH7xxx_GPIO_HPIR (1<<3) | ||
50 | #define CH7xxx_IDF 0x1f | ||
51 | |||
52 | #define CH7xxx_IDF_HSP (1<<3) | ||
53 | #define CH7xxx_IDF_VSP (1<<4) | ||
54 | |||
55 | #define CH7xxx_CONNECTION_DETECT 0x20 | ||
56 | #define CH7xxx_CDET_DVI (1<<5) | ||
57 | |||
58 | #define CH7301_DAC_CNTL 0x21 | ||
59 | #define CH7301_HOTPLUG 0x23 | ||
60 | #define CH7xxx_TCTL 0x31 | ||
61 | #define CH7xxx_TVCO 0x32 | ||
62 | #define CH7xxx_TPCP 0x33 | ||
63 | #define CH7xxx_TPD 0x34 | ||
64 | #define CH7xxx_TPVT 0x35 | ||
65 | #define CH7xxx_TLPF 0x36 | ||
66 | #define CH7xxx_TCT 0x37 | ||
67 | #define CH7301_TEST_PATTERN 0x48 | ||
68 | |||
69 | #define CH7xxx_PM 0x49 | ||
70 | #define CH7xxx_PM_FPD (1<<0) | ||
71 | #define CH7301_PM_DACPD0 (1<<1) | ||
72 | #define CH7301_PM_DACPD1 (1<<2) | ||
73 | #define CH7301_PM_DACPD2 (1<<3) | ||
74 | #define CH7xxx_PM_DVIL (1<<6) | ||
75 | #define CH7xxx_PM_DVIP (1<<7) | ||
76 | |||
77 | #define CH7301_SYNC_POLARITY 0x56 | ||
78 | #define CH7301_SYNC_RGB_YUV (1<<0) | ||
79 | #define CH7301_SYNC_POL_DVI (1<<5) | ||
80 | |||
81 | /** @file | ||
82 | * driver for the Chrontel 7xxx DVI chip over DVO. | ||
83 | */ | ||
84 | |||
85 | static struct ch7xxx_id_struct { | ||
86 | uint8_t vid; | ||
87 | char *name; | ||
88 | } ch7xxx_ids[] = { | ||
89 | { CH7011_VID, "CH7011" }, | ||
90 | { CH7009A_VID, "CH7009A" }, | ||
91 | { CH7009B_VID, "CH7009B" }, | ||
92 | { CH7301_VID, "CH7301" }, | ||
93 | }; | ||
94 | |||
95 | struct ch7xxx_reg_state { | ||
96 | uint8_t regs[CH7xxx_NUM_REGS]; | ||
97 | }; | ||
98 | |||
99 | struct ch7xxx_priv { | ||
100 | bool quiet; | ||
101 | |||
102 | struct ch7xxx_reg_state save_reg; | ||
103 | struct ch7xxx_reg_state mode_reg; | ||
104 | uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; | ||
105 | uint8_t save_TLPF, save_TCT, save_PM, save_IDF; | ||
106 | }; | ||
107 | |||
108 | static void ch7xxx_save(struct intel_dvo_device *dvo); | ||
109 | |||
110 | static char *ch7xxx_get_id(uint8_t vid) | ||
111 | { | ||
112 | int i; | ||
113 | |||
114 | for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) { | ||
115 | if (ch7xxx_ids[i].vid == vid) | ||
116 | return ch7xxx_ids[i].name; | ||
117 | } | ||
118 | |||
119 | return NULL; | ||
120 | } | ||
121 | |||
122 | /** Reads an 8 bit register */ | ||
123 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | ||
124 | { | ||
125 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | ||
126 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
127 | u8 out_buf[2]; | ||
128 | u8 in_buf[2]; | ||
129 | |||
130 | struct i2c_msg msgs[] = { | ||
131 | { | ||
132 | .addr = i2cbus->slave_addr, | ||
133 | .flags = 0, | ||
134 | .len = 1, | ||
135 | .buf = out_buf, | ||
136 | }, | ||
137 | { | ||
138 | .addr = i2cbus->slave_addr, | ||
139 | .flags = I2C_M_RD, | ||
140 | .len = 1, | ||
141 | .buf = in_buf, | ||
142 | } | ||
143 | }; | ||
144 | |||
145 | out_buf[0] = addr; | ||
146 | out_buf[1] = 0; | ||
147 | |||
148 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | ||
149 | *ch = in_buf[0]; | ||
150 | return true; | ||
151 | }; | ||
152 | |||
153 | if (!ch7xxx->quiet) { | ||
154 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | ||
155 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
156 | } | ||
157 | return false; | ||
158 | } | ||
159 | |||
160 | /** Writes an 8 bit register */ | ||
161 | static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | ||
162 | { | ||
163 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
164 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
165 | uint8_t out_buf[2]; | ||
166 | struct i2c_msg msg = { | ||
167 | .addr = i2cbus->slave_addr, | ||
168 | .flags = 0, | ||
169 | .len = 2, | ||
170 | .buf = out_buf, | ||
171 | }; | ||
172 | |||
173 | out_buf[0] = addr; | ||
174 | out_buf[1] = ch; | ||
175 | |||
176 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
177 | return true; | ||
178 | |||
179 | if (!ch7xxx->quiet) { | ||
180 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | ||
181 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
182 | } | ||
183 | |||
184 | return false; | ||
185 | } | ||
186 | |||
187 | static bool ch7xxx_init(struct intel_dvo_device *dvo, | ||
188 | struct intel_i2c_chan *i2cbus) | ||
189 | { | ||
190 | /* this will detect the CH7xxx chip on the specified i2c bus */ | ||
191 | struct ch7xxx_priv *ch7xxx; | ||
192 | uint8_t vendor, device; | ||
193 | char *name; | ||
194 | |||
195 | ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); | ||
196 | if (ch7xxx == NULL) | ||
197 | return false; | ||
198 | |||
199 | dvo->i2c_bus = i2cbus; | ||
200 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
201 | dvo->dev_priv = ch7xxx; | ||
202 | ch7xxx->quiet = true; | ||
203 | |||
204 | if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor)) | ||
205 | goto out; | ||
206 | |||
207 | name = ch7xxx_get_id(vendor); | ||
208 | if (!name) { | ||
209 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | ||
210 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | |||
215 | if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) | ||
216 | goto out; | ||
217 | |||
218 | if (device != CH7xxx_DID) { | ||
219 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | ||
220 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | ch7xxx->quiet = false; | ||
225 | DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", | ||
226 | name, vendor, device); | ||
227 | return true; | ||
228 | out: | ||
229 | kfree(ch7xxx); | ||
230 | return false; | ||
231 | } | ||
232 | |||
233 | static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) | ||
234 | { | ||
235 | uint8_t cdet, orig_pm, pm; | ||
236 | |||
237 | ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); | ||
238 | |||
239 | pm = orig_pm; | ||
240 | pm &= ~CH7xxx_PM_FPD; | ||
241 | pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP; | ||
242 | |||
243 | ch7xxx_writeb(dvo, CH7xxx_PM, pm); | ||
244 | |||
245 | ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet); | ||
246 | |||
247 | ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm); | ||
248 | |||
249 | if (cdet & CH7xxx_CDET_DVI) | ||
250 | return connector_status_connected; | ||
251 | return connector_status_disconnected; | ||
252 | } | ||
253 | |||
254 | static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo, | ||
255 | struct drm_display_mode *mode) | ||
256 | { | ||
257 | if (mode->clock > 165000) | ||
258 | return MODE_CLOCK_HIGH; | ||
259 | |||
260 | return MODE_OK; | ||
261 | } | ||
262 | |||
263 | static void ch7xxx_mode_set(struct intel_dvo_device *dvo, | ||
264 | struct drm_display_mode *mode, | ||
265 | struct drm_display_mode *adjusted_mode) | ||
266 | { | ||
267 | uint8_t tvco, tpcp, tpd, tlpf, idf; | ||
268 | |||
269 | if (mode->clock <= 65000) { | ||
270 | tvco = 0x23; | ||
271 | tpcp = 0x08; | ||
272 | tpd = 0x16; | ||
273 | tlpf = 0x60; | ||
274 | } else { | ||
275 | tvco = 0x2d; | ||
276 | tpcp = 0x06; | ||
277 | tpd = 0x26; | ||
278 | tlpf = 0xa0; | ||
279 | } | ||
280 | |||
281 | ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00); | ||
282 | ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco); | ||
283 | ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp); | ||
284 | ch7xxx_writeb(dvo, CH7xxx_TPD, tpd); | ||
285 | ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30); | ||
286 | ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf); | ||
287 | ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00); | ||
288 | |||
289 | ch7xxx_readb(dvo, CH7xxx_IDF, &idf); | ||
290 | |||
291 | idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP); | ||
292 | if (mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
293 | idf |= CH7xxx_IDF_HSP; | ||
294 | |||
295 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
296 | idf |= CH7xxx_IDF_HSP; | ||
297 | |||
298 | ch7xxx_writeb(dvo, CH7xxx_IDF, idf); | ||
299 | } | ||
300 | |||
301 | /* set the CH7xxx power state */ | ||
302 | static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) | ||
303 | { | ||
304 | if (mode == DRM_MODE_DPMS_ON) | ||
305 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); | ||
306 | else | ||
307 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); | ||
308 | } | ||
309 | |||
310 | static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) | ||
311 | { | ||
312 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
313 | int i; | ||
314 | |||
315 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { | ||
316 | if ((i % 8) == 0 ) | ||
317 | DRM_DEBUG("\n %02X: ", i); | ||
318 | DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static void ch7xxx_save(struct intel_dvo_device *dvo) | ||
323 | { | ||
324 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | ||
325 | |||
326 | ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); | ||
327 | ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); | ||
328 | ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); | ||
329 | ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); | ||
330 | ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); | ||
331 | ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); | ||
332 | ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); | ||
333 | } | ||
334 | |||
335 | static void ch7xxx_restore(struct intel_dvo_device *dvo) | ||
336 | { | ||
337 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
338 | |||
339 | ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); | ||
340 | ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); | ||
341 | ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); | ||
342 | ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); | ||
343 | ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); | ||
344 | ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); | ||
345 | ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); | ||
346 | } | ||
347 | |||
348 | static void ch7xxx_destroy(struct intel_dvo_device *dvo) | ||
349 | { | ||
350 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
351 | |||
352 | if (ch7xxx) { | ||
353 | kfree(ch7xxx); | ||
354 | dvo->dev_priv = NULL; | ||
355 | } | ||
356 | } | ||
357 | |||
358 | struct intel_dvo_dev_ops ch7xxx_ops = { | ||
359 | .init = ch7xxx_init, | ||
360 | .detect = ch7xxx_detect, | ||
361 | .mode_valid = ch7xxx_mode_valid, | ||
362 | .mode_set = ch7xxx_mode_set, | ||
363 | .dpms = ch7xxx_dpms, | ||
364 | .dump_regs = ch7xxx_dump_regs, | ||
365 | .save = ch7xxx_save, | ||
366 | .restore = ch7xxx_restore, | ||
367 | .destroy = ch7xxx_destroy, | ||
368 | }; | ||
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c new file mode 100644 index 000000000000..0c8d375e8e37 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -0,0 +1,442 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include "dvo.h" | ||
29 | |||
30 | /* | ||
31 | * register definitions for the i82807aa. | ||
32 | * | ||
33 | * Documentation on this chipset can be found in datasheet #29069001 at | ||
34 | * intel.com. | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * VCH Revision & GMBus Base Addr | ||
39 | */ | ||
40 | #define VR00 0x00 | ||
41 | # define VR00_BASE_ADDRESS_MASK 0x007f | ||
42 | |||
43 | /* | ||
44 | * Functionality Enable | ||
45 | */ | ||
46 | #define VR01 0x01 | ||
47 | |||
48 | /* | ||
49 | * Enable the panel fitter | ||
50 | */ | ||
51 | # define VR01_PANEL_FIT_ENABLE (1 << 3) | ||
52 | /* | ||
53 | * Enables the LCD display. | ||
54 | * | ||
55 | * This must not be set while VR01_DVO_BYPASS_ENABLE is set. | ||
56 | */ | ||
57 | # define VR01_LCD_ENABLE (1 << 2) | ||
58 | /** Enables the DVO repeater. */ | ||
59 | # define VR01_DVO_BYPASS_ENABLE (1 << 1) | ||
60 | /** Enables the DVO clock */ | ||
61 | # define VR01_DVO_ENABLE (1 << 0) | ||
62 | |||
63 | /* | ||
64 | * LCD Interface Format | ||
65 | */ | ||
66 | #define VR10 0x10 | ||
67 | /** Enables LVDS output instead of CMOS */ | ||
68 | # define VR10_LVDS_ENABLE (1 << 4) | ||
69 | /** Enables 18-bit LVDS output. */ | ||
70 | # define VR10_INTERFACE_1X18 (0 << 2) | ||
71 | /** Enables 24-bit LVDS or CMOS output */ | ||
72 | # define VR10_INTERFACE_1X24 (1 << 2) | ||
73 | /** Enables 2x18-bit LVDS or CMOS output. */ | ||
74 | # define VR10_INTERFACE_2X18 (2 << 2) | ||
75 | /** Enables 2x24-bit LVDS output */ | ||
76 | # define VR10_INTERFACE_2X24 (3 << 2) | ||
77 | |||
78 | /* | ||
79 | * VR20 LCD Horizontal Display Size | ||
80 | */ | ||
81 | #define VR20 0x20 | ||
82 | |||
83 | /* | ||
84 | * LCD Vertical Display Size | ||
85 | */ | ||
86 | #define VR21 0x20 | ||
87 | |||
88 | /* | ||
89 | * Panel power down status | ||
90 | */ | ||
91 | #define VR30 0x30 | ||
92 | /** Read only bit indicating that the panel is not in a safe poweroff state. */ | ||
93 | # define VR30_PANEL_ON (1 << 15) | ||
94 | |||
95 | #define VR40 0x40 | ||
96 | # define VR40_STALL_ENABLE (1 << 13) | ||
97 | # define VR40_VERTICAL_INTERP_ENABLE (1 << 12) | ||
98 | # define VR40_ENHANCED_PANEL_FITTING (1 << 11) | ||
99 | # define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) | ||
100 | # define VR40_AUTO_RATIO_ENABLE (1 << 9) | ||
101 | # define VR40_CLOCK_GATING_ENABLE (1 << 8) | ||
102 | |||
103 | /* | ||
104 | * Panel Fitting Vertical Ratio | ||
105 | * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 | ||
106 | */ | ||
107 | #define VR41 0x41 | ||
108 | |||
109 | /* | ||
110 | * Panel Fitting Horizontal Ratio | ||
111 | * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 | ||
112 | */ | ||
113 | #define VR42 0x42 | ||
114 | |||
115 | /* | ||
116 | * Horizontal Image Size | ||
117 | */ | ||
118 | #define VR43 0x43 | ||
119 | |||
120 | /* VR80 GPIO 0 | ||
121 | */ | ||
122 | #define VR80 0x80 | ||
123 | #define VR81 0x81 | ||
124 | #define VR82 0x82 | ||
125 | #define VR83 0x83 | ||
126 | #define VR84 0x84 | ||
127 | #define VR85 0x85 | ||
128 | #define VR86 0x86 | ||
129 | #define VR87 0x87 | ||
130 | |||
131 | /* VR88 GPIO 8 | ||
132 | */ | ||
133 | #define VR88 0x88 | ||
134 | |||
135 | /* Graphics BIOS scratch 0 | ||
136 | */ | ||
137 | #define VR8E 0x8E | ||
138 | # define VR8E_PANEL_TYPE_MASK (0xf << 0) | ||
139 | # define VR8E_PANEL_INTERFACE_CMOS (0 << 4) | ||
140 | # define VR8E_PANEL_INTERFACE_LVDS (1 << 4) | ||
141 | # define VR8E_FORCE_DEFAULT_PANEL (1 << 5) | ||
142 | |||
143 | /* Graphics BIOS scratch 1 | ||
144 | */ | ||
145 | #define VR8F 0x8F | ||
146 | # define VR8F_VCH_PRESENT (1 << 0) | ||
147 | # define VR8F_DISPLAY_CONN (1 << 1) | ||
148 | # define VR8F_POWER_MASK (0x3c) | ||
149 | # define VR8F_POWER_POS (2) | ||
150 | |||
151 | |||
152 | struct ivch_priv { | ||
153 | bool quiet; | ||
154 | |||
155 | uint16_t width, height; | ||
156 | |||
157 | uint16_t save_VR01; | ||
158 | uint16_t save_VR40; | ||
159 | }; | ||
160 | |||
161 | |||
162 | static void ivch_dump_regs(struct intel_dvo_device *dvo); | ||
163 | |||
164 | /** | ||
165 | * Reads a register on the ivch. | ||
166 | * | ||
167 | * Each of the 256 registers are 16 bits long. | ||
168 | */ | ||
169 | static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | ||
170 | { | ||
171 | struct ivch_priv *priv = dvo->dev_priv; | ||
172 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
173 | u8 out_buf[1]; | ||
174 | u8 in_buf[2]; | ||
175 | |||
176 | struct i2c_msg msgs[] = { | ||
177 | { | ||
178 | .addr = i2cbus->slave_addr, | ||
179 | .flags = I2C_M_RD, | ||
180 | .len = 0, | ||
181 | }, | ||
182 | { | ||
183 | .addr = 0, | ||
184 | .flags = I2C_M_NOSTART, | ||
185 | .len = 1, | ||
186 | .buf = out_buf, | ||
187 | }, | ||
188 | { | ||
189 | .addr = i2cbus->slave_addr, | ||
190 | .flags = I2C_M_RD | I2C_M_NOSTART, | ||
191 | .len = 2, | ||
192 | .buf = in_buf, | ||
193 | } | ||
194 | }; | ||
195 | |||
196 | out_buf[0] = addr; | ||
197 | |||
198 | if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { | ||
199 | *data = (in_buf[1] << 8) | in_buf[0]; | ||
200 | return true; | ||
201 | }; | ||
202 | |||
203 | if (!priv->quiet) { | ||
204 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | ||
205 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
206 | } | ||
207 | return false; | ||
208 | } | ||
209 | |||
210 | /** Writes a 16-bit register on the ivch */ | ||
211 | static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | ||
212 | { | ||
213 | struct ivch_priv *priv = dvo->dev_priv; | ||
214 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
215 | u8 out_buf[3]; | ||
216 | struct i2c_msg msg = { | ||
217 | .addr = i2cbus->slave_addr, | ||
218 | .flags = 0, | ||
219 | .len = 3, | ||
220 | .buf = out_buf, | ||
221 | }; | ||
222 | |||
223 | out_buf[0] = addr; | ||
224 | out_buf[1] = data & 0xff; | ||
225 | out_buf[2] = data >> 8; | ||
226 | |||
227 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
228 | return true; | ||
229 | |||
230 | if (!priv->quiet) { | ||
231 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | ||
232 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
233 | } | ||
234 | |||
235 | return false; | ||
236 | } | ||
237 | |||
238 | /** Probes the given bus and slave address for an ivch */ | ||
239 | static bool ivch_init(struct intel_dvo_device *dvo, | ||
240 | struct intel_i2c_chan *i2cbus) | ||
241 | { | ||
242 | struct ivch_priv *priv; | ||
243 | uint16_t temp; | ||
244 | |||
245 | priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); | ||
246 | if (priv == NULL) | ||
247 | return false; | ||
248 | |||
249 | dvo->i2c_bus = i2cbus; | ||
250 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
251 | dvo->dev_priv = priv; | ||
252 | priv->quiet = true; | ||
253 | |||
254 | if (!ivch_read(dvo, VR00, &temp)) | ||
255 | goto out; | ||
256 | priv->quiet = false; | ||
257 | |||
258 | /* Since the identification bits are probably zeroes, which doesn't seem | ||
259 | * very unique, check that the value in the base address field matches | ||
260 | * the address it's responding on. | ||
261 | */ | ||
262 | if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { | ||
263 | DRM_DEBUG("ivch detect failed due to address mismatch " | ||
264 | "(%d vs %d)\n", | ||
265 | (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); | ||
266 | goto out; | ||
267 | } | ||
268 | |||
269 | ivch_read(dvo, VR20, &priv->width); | ||
270 | ivch_read(dvo, VR21, &priv->height); | ||
271 | |||
272 | return true; | ||
273 | |||
274 | out: | ||
275 | kfree(priv); | ||
276 | return false; | ||
277 | } | ||
278 | |||
279 | static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) | ||
280 | { | ||
281 | return connector_status_connected; | ||
282 | } | ||
283 | |||
284 | static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, | ||
285 | struct drm_display_mode *mode) | ||
286 | { | ||
287 | if (mode->clock > 112000) | ||
288 | return MODE_CLOCK_HIGH; | ||
289 | |||
290 | return MODE_OK; | ||
291 | } | ||
292 | |||
293 | /** Sets the power state of the panel connected to the ivch */ | ||
294 | static void ivch_dpms(struct intel_dvo_device *dvo, int mode) | ||
295 | { | ||
296 | int i; | ||
297 | uint16_t vr01, vr30, backlight; | ||
298 | |||
299 | /* Set the new power state of the panel. */ | ||
300 | if (!ivch_read(dvo, VR01, &vr01)) | ||
301 | return; | ||
302 | |||
303 | if (mode == DRM_MODE_DPMS_ON) | ||
304 | backlight = 1; | ||
305 | else | ||
306 | backlight = 0; | ||
307 | ivch_write(dvo, VR80, backlight); | ||
308 | |||
309 | if (mode == DRM_MODE_DPMS_ON) | ||
310 | vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; | ||
311 | else | ||
312 | vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); | ||
313 | |||
314 | ivch_write(dvo, VR01, vr01); | ||
315 | |||
316 | /* Wait for the panel to make its state transition */ | ||
317 | for (i = 0; i < 100; i++) { | ||
318 | if (!ivch_read(dvo, VR30, &vr30)) | ||
319 | break; | ||
320 | |||
321 | if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) | ||
322 | break; | ||
323 | udelay(1000); | ||
324 | } | ||
325 | /* wait some more; vch may fail to resync sometimes without this */ | ||
326 | udelay(16 * 1000); | ||
327 | } | ||
328 | |||
329 | static void ivch_mode_set(struct intel_dvo_device *dvo, | ||
330 | struct drm_display_mode *mode, | ||
331 | struct drm_display_mode *adjusted_mode) | ||
332 | { | ||
333 | uint16_t vr40 = 0; | ||
334 | uint16_t vr01; | ||
335 | |||
336 | vr01 = 0; | ||
337 | vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | | ||
338 | VR40_HORIZONTAL_INTERP_ENABLE); | ||
339 | |||
340 | if (mode->hdisplay != adjusted_mode->hdisplay || | ||
341 | mode->vdisplay != adjusted_mode->vdisplay) { | ||
342 | uint16_t x_ratio, y_ratio; | ||
343 | |||
344 | vr01 |= VR01_PANEL_FIT_ENABLE; | ||
345 | vr40 |= VR40_CLOCK_GATING_ENABLE; | ||
346 | x_ratio = (((mode->hdisplay - 1) << 16) / | ||
347 | (adjusted_mode->hdisplay - 1)) >> 2; | ||
348 | y_ratio = (((mode->vdisplay - 1) << 16) / | ||
349 | (adjusted_mode->vdisplay - 1)) >> 2; | ||
350 | ivch_write (dvo, VR42, x_ratio); | ||
351 | ivch_write (dvo, VR41, y_ratio); | ||
352 | } else { | ||
353 | vr01 &= ~VR01_PANEL_FIT_ENABLE; | ||
354 | vr40 &= ~VR40_CLOCK_GATING_ENABLE; | ||
355 | } | ||
356 | vr40 &= ~VR40_AUTO_RATIO_ENABLE; | ||
357 | |||
358 | ivch_write(dvo, VR01, vr01); | ||
359 | ivch_write(dvo, VR40, vr40); | ||
360 | |||
361 | ivch_dump_regs(dvo); | ||
362 | } | ||
363 | |||
364 | static void ivch_dump_regs(struct intel_dvo_device *dvo) | ||
365 | { | ||
366 | uint16_t val; | ||
367 | |||
368 | ivch_read(dvo, VR00, &val); | ||
369 | DRM_DEBUG("VR00: 0x%04x\n", val); | ||
370 | ivch_read(dvo, VR01, &val); | ||
371 | DRM_DEBUG("VR01: 0x%04x\n", val); | ||
372 | ivch_read(dvo, VR30, &val); | ||
373 | DRM_DEBUG("VR30: 0x%04x\n", val); | ||
374 | ivch_read(dvo, VR40, &val); | ||
375 | DRM_DEBUG("VR40: 0x%04x\n", val); | ||
376 | |||
377 | /* GPIO registers */ | ||
378 | ivch_read(dvo, VR80, &val); | ||
379 | DRM_DEBUG("VR80: 0x%04x\n", val); | ||
380 | ivch_read(dvo, VR81, &val); | ||
381 | DRM_DEBUG("VR81: 0x%04x\n", val); | ||
382 | ivch_read(dvo, VR82, &val); | ||
383 | DRM_DEBUG("VR82: 0x%04x\n", val); | ||
384 | ivch_read(dvo, VR83, &val); | ||
385 | DRM_DEBUG("VR83: 0x%04x\n", val); | ||
386 | ivch_read(dvo, VR84, &val); | ||
387 | DRM_DEBUG("VR84: 0x%04x\n", val); | ||
388 | ivch_read(dvo, VR85, &val); | ||
389 | DRM_DEBUG("VR85: 0x%04x\n", val); | ||
390 | ivch_read(dvo, VR86, &val); | ||
391 | DRM_DEBUG("VR86: 0x%04x\n", val); | ||
392 | ivch_read(dvo, VR87, &val); | ||
393 | DRM_DEBUG("VR87: 0x%04x\n", val); | ||
394 | ivch_read(dvo, VR88, &val); | ||
395 | DRM_DEBUG("VR88: 0x%04x\n", val); | ||
396 | |||
397 | /* Scratch register 0 - AIM Panel type */ | ||
398 | ivch_read(dvo, VR8E, &val); | ||
399 | DRM_DEBUG("VR8E: 0x%04x\n", val); | ||
400 | |||
401 | /* Scratch register 1 - Status register */ | ||
402 | ivch_read(dvo, VR8F, &val); | ||
403 | DRM_DEBUG("VR8F: 0x%04x\n", val); | ||
404 | } | ||
405 | |||
406 | static void ivch_save(struct intel_dvo_device *dvo) | ||
407 | { | ||
408 | struct ivch_priv *priv = dvo->dev_priv; | ||
409 | |||
410 | ivch_read(dvo, VR01, &priv->save_VR01); | ||
411 | ivch_read(dvo, VR40, &priv->save_VR40); | ||
412 | } | ||
413 | |||
414 | static void ivch_restore(struct intel_dvo_device *dvo) | ||
415 | { | ||
416 | struct ivch_priv *priv = dvo->dev_priv; | ||
417 | |||
418 | ivch_write(dvo, VR01, priv->save_VR01); | ||
419 | ivch_write(dvo, VR40, priv->save_VR40); | ||
420 | } | ||
421 | |||
422 | static void ivch_destroy(struct intel_dvo_device *dvo) | ||
423 | { | ||
424 | struct ivch_priv *priv = dvo->dev_priv; | ||
425 | |||
426 | if (priv) { | ||
427 | kfree(priv); | ||
428 | dvo->dev_priv = NULL; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | struct intel_dvo_dev_ops ivch_ops= { | ||
433 | .init = ivch_init, | ||
434 | .dpms = ivch_dpms, | ||
435 | .save = ivch_save, | ||
436 | .restore = ivch_restore, | ||
437 | .mode_valid = ivch_mode_valid, | ||
438 | .mode_set = ivch_mode_set, | ||
439 | .detect = ivch_detect, | ||
440 | .dump_regs = ivch_dump_regs, | ||
441 | .destroy = ivch_destroy, | ||
442 | }; | ||
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c new file mode 100644 index 000000000000..033a4bb070b2 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /************************************************************************** | ||
2 | |||
3 | Copyright © 2006 Dave Airlie | ||
4 | |||
5 | All Rights Reserved. | ||
6 | |||
7 | Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | copy of this software and associated documentation files (the | ||
9 | "Software"), to deal in the Software without restriction, including | ||
10 | without limitation the rights to use, copy, modify, merge, publish, | ||
11 | distribute, sub license, and/or sell copies of the Software, and to | ||
12 | permit persons to whom the Software is furnished to do so, subject to | ||
13 | the following conditions: | ||
14 | |||
15 | The above copyright notice and this permission notice (including the | ||
16 | next paragraph) shall be included in all copies or substantial portions | ||
17 | of the Software. | ||
18 | |||
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
22 | IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
23 | ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
24 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
25 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | |||
27 | **************************************************************************/ | ||
28 | |||
29 | #include "dvo.h" | ||
30 | |||
31 | #define SIL164_VID 0x0001 | ||
32 | #define SIL164_DID 0x0006 | ||
33 | |||
34 | #define SIL164_VID_LO 0x00 | ||
35 | #define SIL164_VID_HI 0x01 | ||
36 | #define SIL164_DID_LO 0x02 | ||
37 | #define SIL164_DID_HI 0x03 | ||
38 | #define SIL164_REV 0x04 | ||
39 | #define SIL164_RSVD 0x05 | ||
40 | #define SIL164_FREQ_LO 0x06 | ||
41 | #define SIL164_FREQ_HI 0x07 | ||
42 | |||
43 | #define SIL164_REG8 0x08 | ||
44 | #define SIL164_8_VEN (1<<5) | ||
45 | #define SIL164_8_HEN (1<<4) | ||
46 | #define SIL164_8_DSEL (1<<3) | ||
47 | #define SIL164_8_BSEL (1<<2) | ||
48 | #define SIL164_8_EDGE (1<<1) | ||
49 | #define SIL164_8_PD (1<<0) | ||
50 | |||
51 | #define SIL164_REG9 0x09 | ||
52 | #define SIL164_9_VLOW (1<<7) | ||
53 | #define SIL164_9_MSEL_MASK (0x7<<4) | ||
54 | #define SIL164_9_TSEL (1<<3) | ||
55 | #define SIL164_9_RSEN (1<<2) | ||
56 | #define SIL164_9_HTPLG (1<<1) | ||
57 | #define SIL164_9_MDI (1<<0) | ||
58 | |||
59 | #define SIL164_REGC 0x0c | ||
60 | |||
61 | struct sil164_save_rec { | ||
62 | uint8_t reg8; | ||
63 | uint8_t reg9; | ||
64 | uint8_t regc; | ||
65 | }; | ||
66 | |||
67 | struct sil164_priv { | ||
68 | //I2CDevRec d; | ||
69 | bool quiet; | ||
70 | struct sil164_save_rec save_regs; | ||
71 | struct sil164_save_rec mode_regs; | ||
72 | }; | ||
73 | |||
74 | #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) | ||
75 | |||
76 | static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | ||
77 | { | ||
78 | struct sil164_priv *sil = dvo->dev_priv; | ||
79 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
80 | u8 out_buf[2]; | ||
81 | u8 in_buf[2]; | ||
82 | |||
83 | struct i2c_msg msgs[] = { | ||
84 | { | ||
85 | .addr = i2cbus->slave_addr, | ||
86 | .flags = 0, | ||
87 | .len = 1, | ||
88 | .buf = out_buf, | ||
89 | }, | ||
90 | { | ||
91 | .addr = i2cbus->slave_addr, | ||
92 | .flags = I2C_M_RD, | ||
93 | .len = 1, | ||
94 | .buf = in_buf, | ||
95 | } | ||
96 | }; | ||
97 | |||
98 | out_buf[0] = addr; | ||
99 | out_buf[1] = 0; | ||
100 | |||
101 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | ||
102 | *ch = in_buf[0]; | ||
103 | return true; | ||
104 | }; | ||
105 | |||
106 | if (!sil->quiet) { | ||
107 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | ||
108 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
109 | } | ||
110 | return false; | ||
111 | } | ||
112 | |||
113 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | ||
114 | { | ||
115 | struct sil164_priv *sil= dvo->dev_priv; | ||
116 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
117 | uint8_t out_buf[2]; | ||
118 | struct i2c_msg msg = { | ||
119 | .addr = i2cbus->slave_addr, | ||
120 | .flags = 0, | ||
121 | .len = 2, | ||
122 | .buf = out_buf, | ||
123 | }; | ||
124 | |||
125 | out_buf[0] = addr; | ||
126 | out_buf[1] = ch; | ||
127 | |||
128 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
129 | return true; | ||
130 | |||
131 | if (!sil->quiet) { | ||
132 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | ||
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
134 | } | ||
135 | |||
136 | return false; | ||
137 | } | ||
138 | |||
139 | /* Silicon Image 164 driver for chip on i2c bus */ | ||
140 | static bool sil164_init(struct intel_dvo_device *dvo, | ||
141 | struct intel_i2c_chan *i2cbus) | ||
142 | { | ||
143 | /* this will detect the SIL164 chip on the specified i2c bus */ | ||
144 | struct sil164_priv *sil; | ||
145 | unsigned char ch; | ||
146 | |||
147 | sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); | ||
148 | if (sil == NULL) | ||
149 | return false; | ||
150 | |||
151 | dvo->i2c_bus = i2cbus; | ||
152 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
153 | dvo->dev_priv = sil; | ||
154 | sil->quiet = true; | ||
155 | |||
156 | if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) | ||
157 | goto out; | ||
158 | |||
159 | if (ch != (SIL164_VID & 0xff)) { | ||
160 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | ||
161 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | ||
162 | goto out; | ||
163 | } | ||
164 | |||
165 | if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) | ||
166 | goto out; | ||
167 | |||
168 | if (ch != (SIL164_DID & 0xff)) { | ||
169 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | ||
170 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | ||
171 | goto out; | ||
172 | } | ||
173 | sil->quiet = false; | ||
174 | |||
175 | DRM_DEBUG("init sil164 dvo controller successfully!\n"); | ||
176 | return true; | ||
177 | |||
178 | out: | ||
179 | kfree(sil); | ||
180 | return false; | ||
181 | } | ||
182 | |||
183 | static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) | ||
184 | { | ||
185 | uint8_t reg9; | ||
186 | |||
187 | sil164_readb(dvo, SIL164_REG9, ®9); | ||
188 | |||
189 | if (reg9 & SIL164_9_HTPLG) | ||
190 | return connector_status_connected; | ||
191 | else | ||
192 | return connector_status_disconnected; | ||
193 | } | ||
194 | |||
195 | static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, | ||
196 | struct drm_display_mode *mode) | ||
197 | { | ||
198 | return MODE_OK; | ||
199 | } | ||
200 | |||
201 | static void sil164_mode_set(struct intel_dvo_device *dvo, | ||
202 | struct drm_display_mode *mode, | ||
203 | struct drm_display_mode *adjusted_mode) | ||
204 | { | ||
205 | /* As long as the basics are set up, since we don't have clock | ||
206 | * dependencies in the mode setup, we can just leave the | ||
207 | * registers alone and everything will work fine. | ||
208 | */ | ||
209 | /* recommended programming sequence from doc */ | ||
210 | /*sil164_writeb(sil, 0x08, 0x30); | ||
211 | sil164_writeb(sil, 0x09, 0x00); | ||
212 | sil164_writeb(sil, 0x0a, 0x90); | ||
213 | sil164_writeb(sil, 0x0c, 0x89); | ||
214 | sil164_writeb(sil, 0x08, 0x31);*/ | ||
215 | /* don't do much */ | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | /* set the SIL164 power state */ | ||
220 | static void sil164_dpms(struct intel_dvo_device *dvo, int mode) | ||
221 | { | ||
222 | int ret; | ||
223 | unsigned char ch; | ||
224 | |||
225 | ret = sil164_readb(dvo, SIL164_REG8, &ch); | ||
226 | if (ret == false) | ||
227 | return; | ||
228 | |||
229 | if (mode == DRM_MODE_DPMS_ON) | ||
230 | ch |= SIL164_8_PD; | ||
231 | else | ||
232 | ch &= ~SIL164_8_PD; | ||
233 | |||
234 | sil164_writeb(dvo, SIL164_REG8, ch); | ||
235 | return; | ||
236 | } | ||
237 | |||
238 | static void sil164_dump_regs(struct intel_dvo_device *dvo) | ||
239 | { | ||
240 | uint8_t val; | ||
241 | |||
242 | sil164_readb(dvo, SIL164_FREQ_LO, &val); | ||
243 | DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); | ||
244 | sil164_readb(dvo, SIL164_FREQ_HI, &val); | ||
245 | DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); | ||
246 | sil164_readb(dvo, SIL164_REG8, &val); | ||
247 | DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); | ||
248 | sil164_readb(dvo, SIL164_REG9, &val); | ||
249 | DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); | ||
250 | sil164_readb(dvo, SIL164_REGC, &val); | ||
251 | DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); | ||
252 | } | ||
253 | |||
254 | static void sil164_save(struct intel_dvo_device *dvo) | ||
255 | { | ||
256 | struct sil164_priv *sil= dvo->dev_priv; | ||
257 | |||
258 | if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) | ||
259 | return; | ||
260 | |||
261 | if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) | ||
262 | return; | ||
263 | |||
264 | if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) | ||
265 | return; | ||
266 | |||
267 | return; | ||
268 | } | ||
269 | |||
270 | static void sil164_restore(struct intel_dvo_device *dvo) | ||
271 | { | ||
272 | struct sil164_priv *sil = dvo->dev_priv; | ||
273 | |||
274 | /* Restore it powered down initially */ | ||
275 | sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); | ||
276 | |||
277 | sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); | ||
278 | sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); | ||
279 | sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); | ||
280 | } | ||
281 | |||
282 | static void sil164_destroy(struct intel_dvo_device *dvo) | ||
283 | { | ||
284 | struct sil164_priv *sil = dvo->dev_priv; | ||
285 | |||
286 | if (sil) { | ||
287 | kfree(sil); | ||
288 | dvo->dev_priv = NULL; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | struct intel_dvo_dev_ops sil164_ops = { | ||
293 | .init = sil164_init, | ||
294 | .detect = sil164_detect, | ||
295 | .mode_valid = sil164_mode_valid, | ||
296 | .mode_set = sil164_mode_set, | ||
297 | .dpms = sil164_dpms, | ||
298 | .dump_regs = sil164_dump_regs, | ||
299 | .save = sil164_save, | ||
300 | .restore = sil164_restore, | ||
301 | .destroy = sil164_destroy, | ||
302 | }; | ||
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c new file mode 100644 index 000000000000..207fda806ebf --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | * Copyright © 2007 Dave Mueller | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Dave Mueller <dave.mueller@gmx.ch> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include "dvo.h" | ||
29 | |||
30 | /* register definitions according to the TFP410 data sheet */ | ||
31 | #define TFP410_VID 0x014C | ||
32 | #define TFP410_DID 0x0410 | ||
33 | |||
34 | #define TFP410_VID_LO 0x00 | ||
35 | #define TFP410_VID_HI 0x01 | ||
36 | #define TFP410_DID_LO 0x02 | ||
37 | #define TFP410_DID_HI 0x03 | ||
38 | #define TFP410_REV 0x04 | ||
39 | |||
40 | #define TFP410_CTL_1 0x08 | ||
41 | #define TFP410_CTL_1_TDIS (1<<6) | ||
42 | #define TFP410_CTL_1_VEN (1<<5) | ||
43 | #define TFP410_CTL_1_HEN (1<<4) | ||
44 | #define TFP410_CTL_1_DSEL (1<<3) | ||
45 | #define TFP410_CTL_1_BSEL (1<<2) | ||
46 | #define TFP410_CTL_1_EDGE (1<<1) | ||
47 | #define TFP410_CTL_1_PD (1<<0) | ||
48 | |||
49 | #define TFP410_CTL_2 0x09 | ||
50 | #define TFP410_CTL_2_VLOW (1<<7) | ||
51 | #define TFP410_CTL_2_MSEL_MASK (0x7<<4) | ||
52 | #define TFP410_CTL_2_MSEL (1<<4) | ||
53 | #define TFP410_CTL_2_TSEL (1<<3) | ||
54 | #define TFP410_CTL_2_RSEN (1<<2) | ||
55 | #define TFP410_CTL_2_HTPLG (1<<1) | ||
56 | #define TFP410_CTL_2_MDI (1<<0) | ||
57 | |||
58 | #define TFP410_CTL_3 0x0A | ||
59 | #define TFP410_CTL_3_DK_MASK (0x7<<5) | ||
60 | #define TFP410_CTL_3_DK (1<<5) | ||
61 | #define TFP410_CTL_3_DKEN (1<<4) | ||
62 | #define TFP410_CTL_3_CTL_MASK (0x7<<1) | ||
63 | #define TFP410_CTL_3_CTL (1<<1) | ||
64 | |||
65 | #define TFP410_USERCFG 0x0B | ||
66 | |||
67 | #define TFP410_DE_DLY 0x32 | ||
68 | |||
69 | #define TFP410_DE_CTL 0x33 | ||
70 | #define TFP410_DE_CTL_DEGEN (1<<6) | ||
71 | #define TFP410_DE_CTL_VSPOL (1<<5) | ||
72 | #define TFP410_DE_CTL_HSPOL (1<<4) | ||
73 | #define TFP410_DE_CTL_DEDLY8 (1<<0) | ||
74 | |||
75 | #define TFP410_DE_TOP 0x34 | ||
76 | |||
77 | #define TFP410_DE_CNT_LO 0x36 | ||
78 | #define TFP410_DE_CNT_HI 0x37 | ||
79 | |||
80 | #define TFP410_DE_LIN_LO 0x38 | ||
81 | #define TFP410_DE_LIN_HI 0x39 | ||
82 | |||
83 | #define TFP410_H_RES_LO 0x3A | ||
84 | #define TFP410_H_RES_HI 0x3B | ||
85 | |||
86 | #define TFP410_V_RES_LO 0x3C | ||
87 | #define TFP410_V_RES_HI 0x3D | ||
88 | |||
89 | struct tfp410_save_rec { | ||
90 | uint8_t ctl1; | ||
91 | uint8_t ctl2; | ||
92 | }; | ||
93 | |||
94 | struct tfp410_priv { | ||
95 | bool quiet; | ||
96 | |||
97 | struct tfp410_save_rec saved_reg; | ||
98 | struct tfp410_save_rec mode_reg; | ||
99 | }; | ||
100 | |||
101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | ||
102 | { | ||
103 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
104 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
105 | u8 out_buf[2]; | ||
106 | u8 in_buf[2]; | ||
107 | |||
108 | struct i2c_msg msgs[] = { | ||
109 | { | ||
110 | .addr = i2cbus->slave_addr, | ||
111 | .flags = 0, | ||
112 | .len = 1, | ||
113 | .buf = out_buf, | ||
114 | }, | ||
115 | { | ||
116 | .addr = i2cbus->slave_addr, | ||
117 | .flags = I2C_M_RD, | ||
118 | .len = 1, | ||
119 | .buf = in_buf, | ||
120 | } | ||
121 | }; | ||
122 | |||
123 | out_buf[0] = addr; | ||
124 | out_buf[1] = 0; | ||
125 | |||
126 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | ||
127 | *ch = in_buf[0]; | ||
128 | return true; | ||
129 | }; | ||
130 | |||
131 | if (!tfp->quiet) { | ||
132 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | ||
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
134 | } | ||
135 | return false; | ||
136 | } | ||
137 | |||
138 | static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | ||
139 | { | ||
140 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
141 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | ||
142 | uint8_t out_buf[2]; | ||
143 | struct i2c_msg msg = { | ||
144 | .addr = i2cbus->slave_addr, | ||
145 | .flags = 0, | ||
146 | .len = 2, | ||
147 | .buf = out_buf, | ||
148 | }; | ||
149 | |||
150 | out_buf[0] = addr; | ||
151 | out_buf[1] = ch; | ||
152 | |||
153 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
154 | return true; | ||
155 | |||
156 | if (!tfp->quiet) { | ||
157 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | ||
158 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | ||
159 | } | ||
160 | |||
161 | return false; | ||
162 | } | ||
163 | |||
164 | static int tfp410_getid(struct intel_dvo_device *dvo, int addr) | ||
165 | { | ||
166 | uint8_t ch1, ch2; | ||
167 | |||
168 | if (tfp410_readb(dvo, addr+0, &ch1) && | ||
169 | tfp410_readb(dvo, addr+1, &ch2)) | ||
170 | return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); | ||
171 | |||
172 | return -1; | ||
173 | } | ||
174 | |||
175 | /* Ti TFP410 driver for chip on i2c bus */ | ||
176 | static bool tfp410_init(struct intel_dvo_device *dvo, | ||
177 | struct intel_i2c_chan *i2cbus) | ||
178 | { | ||
179 | /* this will detect the tfp410 chip on the specified i2c bus */ | ||
180 | struct tfp410_priv *tfp; | ||
181 | int id; | ||
182 | |||
183 | tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); | ||
184 | if (tfp == NULL) | ||
185 | return false; | ||
186 | |||
187 | dvo->i2c_bus = i2cbus; | ||
188 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
189 | dvo->dev_priv = tfp; | ||
190 | tfp->quiet = true; | ||
191 | |||
192 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { | ||
193 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", | ||
194 | id, i2cbus->adapter.name, i2cbus->slave_addr); | ||
195 | goto out; | ||
196 | } | ||
197 | |||
198 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { | ||
199 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", | ||
200 | id, i2cbus->adapter.name, i2cbus->slave_addr); | ||
201 | goto out; | ||
202 | } | ||
203 | tfp->quiet = false; | ||
204 | return true; | ||
205 | out: | ||
206 | kfree(tfp); | ||
207 | return false; | ||
208 | } | ||
209 | |||
210 | static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) | ||
211 | { | ||
212 | enum drm_connector_status ret = connector_status_disconnected; | ||
213 | uint8_t ctl2; | ||
214 | |||
215 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { | ||
216 | if (ctl2 & TFP410_CTL_2_HTPLG) | ||
217 | ret = connector_status_connected; | ||
218 | else | ||
219 | ret = connector_status_disconnected; | ||
220 | } | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, | ||
226 | struct drm_display_mode *mode) | ||
227 | { | ||
228 | return MODE_OK; | ||
229 | } | ||
230 | |||
231 | static void tfp410_mode_set(struct intel_dvo_device *dvo, | ||
232 | struct drm_display_mode *mode, | ||
233 | struct drm_display_mode *adjusted_mode) | ||
234 | { | ||
235 | /* As long as the basics are set up, since we don't have clock dependencies | ||
236 | * in the mode setup, we can just leave the registers alone and everything | ||
237 | * will work fine. | ||
238 | */ | ||
239 | /* don't do much */ | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | /* set the tfp410 power state */ | ||
244 | static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) | ||
245 | { | ||
246 | uint8_t ctl1; | ||
247 | |||
248 | if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) | ||
249 | return; | ||
250 | |||
251 | if (mode == DRM_MODE_DPMS_ON) | ||
252 | ctl1 |= TFP410_CTL_1_PD; | ||
253 | else | ||
254 | ctl1 &= ~TFP410_CTL_1_PD; | ||
255 | |||
256 | tfp410_writeb(dvo, TFP410_CTL_1, ctl1); | ||
257 | } | ||
258 | |||
259 | static void tfp410_dump_regs(struct intel_dvo_device *dvo) | ||
260 | { | ||
261 | uint8_t val, val2; | ||
262 | |||
263 | tfp410_readb(dvo, TFP410_REV, &val); | ||
264 | DRM_DEBUG("TFP410_REV: 0x%02X\n", val); | ||
265 | tfp410_readb(dvo, TFP410_CTL_1, &val); | ||
266 | DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); | ||
267 | tfp410_readb(dvo, TFP410_CTL_2, &val); | ||
268 | DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); | ||
269 | tfp410_readb(dvo, TFP410_CTL_3, &val); | ||
270 | DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); | ||
271 | tfp410_readb(dvo, TFP410_USERCFG, &val); | ||
272 | DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); | ||
273 | tfp410_readb(dvo, TFP410_DE_DLY, &val); | ||
274 | DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); | ||
275 | tfp410_readb(dvo, TFP410_DE_CTL, &val); | ||
276 | DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); | ||
277 | tfp410_readb(dvo, TFP410_DE_TOP, &val); | ||
278 | DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); | ||
279 | tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); | ||
280 | tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); | ||
281 | DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); | ||
282 | tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); | ||
283 | tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); | ||
284 | DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); | ||
285 | tfp410_readb(dvo, TFP410_H_RES_LO, &val); | ||
286 | tfp410_readb(dvo, TFP410_H_RES_HI, &val2); | ||
287 | DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); | ||
288 | tfp410_readb(dvo, TFP410_V_RES_LO, &val); | ||
289 | tfp410_readb(dvo, TFP410_V_RES_HI, &val2); | ||
290 | DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); | ||
291 | } | ||
292 | |||
293 | static void tfp410_save(struct intel_dvo_device *dvo) | ||
294 | { | ||
295 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
296 | |||
297 | if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) | ||
298 | return; | ||
299 | |||
300 | if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) | ||
301 | return; | ||
302 | } | ||
303 | |||
304 | static void tfp410_restore(struct intel_dvo_device *dvo) | ||
305 | { | ||
306 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
307 | |||
308 | /* Restore it powered down initially */ | ||
309 | tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); | ||
310 | |||
311 | tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); | ||
312 | tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); | ||
313 | } | ||
314 | |||
315 | static void tfp410_destroy(struct intel_dvo_device *dvo) | ||
316 | { | ||
317 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
318 | |||
319 | if (tfp) { | ||
320 | kfree(tfp); | ||
321 | dvo->dev_priv = NULL; | ||
322 | } | ||
323 | } | ||
324 | |||
325 | struct intel_dvo_dev_ops tfp410_ops = { | ||
326 | .init = tfp410_init, | ||
327 | .detect = tfp410_detect, | ||
328 | .mode_valid = tfp410_mode_valid, | ||
329 | .mode_set = tfp410_mode_set, | ||
330 | .dpms = tfp410_dpms, | ||
331 | .dump_regs = tfp410_dump_regs, | ||
332 | .save = tfp410_save, | ||
333 | .restore = tfp410_restore, | ||
334 | .destroy = tfp410_destroy, | ||
335 | }; | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0d215e38606a..81f1cff56fd5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -28,6 +28,8 @@ | |||
28 | 28 | ||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "drm_crtc_helper.h" | ||
32 | #include "intel_drv.h" | ||
31 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
32 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
33 | 35 | ||
@@ -39,6 +41,7 @@ | |||
39 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | 41 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
40 | { | 42 | { |
41 | drm_i915_private_t *dev_priv = dev->dev_private; | 43 | drm_i915_private_t *dev_priv = dev->dev_private; |
44 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
42 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 45 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
43 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 46 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
44 | u32 last_acthd = I915_READ(acthd_reg); | 47 | u32 last_acthd = I915_READ(acthd_reg); |
@@ -55,8 +58,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
55 | if (ring->space >= n) | 58 | if (ring->space >= n) |
56 | return 0; | 59 | return 0; |
57 | 60 | ||
58 | if (dev_priv->sarea_priv) | 61 | if (master_priv->sarea_priv) |
59 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 62 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
60 | 63 | ||
61 | if (ring->head != last_head) | 64 | if (ring->head != last_head) |
62 | i = 0; | 65 | i = 0; |
@@ -121,16 +124,28 @@ static void i915_free_hws(struct drm_device *dev) | |||
121 | void i915_kernel_lost_context(struct drm_device * dev) | 124 | void i915_kernel_lost_context(struct drm_device * dev) |
122 | { | 125 | { |
123 | drm_i915_private_t *dev_priv = dev->dev_private; | 126 | drm_i915_private_t *dev_priv = dev->dev_private; |
127 | struct drm_i915_master_private *master_priv; | ||
124 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 128 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
125 | 129 | ||
130 | /* | ||
131 | * We should never lose context on the ring with modesetting | ||
132 | * as we don't expose it to userspace | ||
133 | */ | ||
134 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
135 | return; | ||
136 | |||
126 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 137 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
127 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 138 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; |
128 | ring->space = ring->head - (ring->tail + 8); | 139 | ring->space = ring->head - (ring->tail + 8); |
129 | if (ring->space < 0) | 140 | if (ring->space < 0) |
130 | ring->space += ring->Size; | 141 | ring->space += ring->Size; |
131 | 142 | ||
132 | if (ring->head == ring->tail && dev_priv->sarea_priv) | 143 | if (!dev->primary->master) |
133 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | 144 | return; |
145 | |||
146 | master_priv = dev->primary->master->driver_priv; | ||
147 | if (ring->head == ring->tail && master_priv->sarea_priv) | ||
148 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | ||
134 | } | 149 | } |
135 | 150 | ||
136 | static int i915_dma_cleanup(struct drm_device * dev) | 151 | static int i915_dma_cleanup(struct drm_device * dev) |
@@ -154,26 +169,22 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
154 | if (I915_NEED_GFX_HWS(dev)) | 169 | if (I915_NEED_GFX_HWS(dev)) |
155 | i915_free_hws(dev); | 170 | i915_free_hws(dev); |
156 | 171 | ||
157 | dev_priv->sarea = NULL; | ||
158 | dev_priv->sarea_priv = NULL; | ||
159 | |||
160 | return 0; | 172 | return 0; |
161 | } | 173 | } |
162 | 174 | ||
163 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | 175 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
164 | { | 176 | { |
165 | drm_i915_private_t *dev_priv = dev->dev_private; | 177 | drm_i915_private_t *dev_priv = dev->dev_private; |
178 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
166 | 179 | ||
167 | dev_priv->sarea = drm_getsarea(dev); | 180 | master_priv->sarea = drm_getsarea(dev); |
168 | if (!dev_priv->sarea) { | 181 | if (master_priv->sarea) { |
169 | DRM_ERROR("can not find sarea!\n"); | 182 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
170 | i915_dma_cleanup(dev); | 183 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
171 | return -EINVAL; | 184 | } else { |
185 | DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); | ||
172 | } | 186 | } |
173 | 187 | ||
174 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | ||
175 | ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); | ||
176 | |||
177 | if (init->ring_size != 0) { | 188 | if (init->ring_size != 0) { |
178 | if (dev_priv->ring.ring_obj != NULL) { | 189 | if (dev_priv->ring.ring_obj != NULL) { |
179 | i915_dma_cleanup(dev); | 190 | i915_dma_cleanup(dev); |
@@ -207,7 +218,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
207 | dev_priv->back_offset = init->back_offset; | 218 | dev_priv->back_offset = init->back_offset; |
208 | dev_priv->front_offset = init->front_offset; | 219 | dev_priv->front_offset = init->front_offset; |
209 | dev_priv->current_page = 0; | 220 | dev_priv->current_page = 0; |
210 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 221 | if (master_priv->sarea_priv) |
222 | master_priv->sarea_priv->pf_current_page = 0; | ||
211 | 223 | ||
212 | /* Allow hardware batchbuffers unless told otherwise. | 224 | /* Allow hardware batchbuffers unless told otherwise. |
213 | */ | 225 | */ |
@@ -222,11 +234,6 @@ static int i915_dma_resume(struct drm_device * dev) | |||
222 | 234 | ||
223 | DRM_DEBUG("%s\n", __func__); | 235 | DRM_DEBUG("%s\n", __func__); |
224 | 236 | ||
225 | if (!dev_priv->sarea) { | ||
226 | DRM_ERROR("can not find sarea!\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | if (dev_priv->ring.map.handle == NULL) { | 237 | if (dev_priv->ring.map.handle == NULL) { |
231 | DRM_ERROR("can not ioremap virtual address for" | 238 | DRM_ERROR("can not ioremap virtual address for" |
232 | " ring buffer\n"); | 239 | " ring buffer\n"); |
@@ -435,13 +442,14 @@ i915_emit_box(struct drm_device *dev, | |||
435 | static void i915_emit_breadcrumb(struct drm_device *dev) | 442 | static void i915_emit_breadcrumb(struct drm_device *dev) |
436 | { | 443 | { |
437 | drm_i915_private_t *dev_priv = dev->dev_private; | 444 | drm_i915_private_t *dev_priv = dev->dev_private; |
445 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
438 | RING_LOCALS; | 446 | RING_LOCALS; |
439 | 447 | ||
440 | dev_priv->counter++; | 448 | dev_priv->counter++; |
441 | if (dev_priv->counter > 0x7FFFFFFFUL) | 449 | if (dev_priv->counter > 0x7FFFFFFFUL) |
442 | dev_priv->counter = 0; | 450 | dev_priv->counter = 0; |
443 | if (dev_priv->sarea_priv) | 451 | if (master_priv->sarea_priv) |
444 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | 452 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
445 | 453 | ||
446 | BEGIN_LP_RING(4); | 454 | BEGIN_LP_RING(4); |
447 | OUT_RING(MI_STORE_DWORD_INDEX); | 455 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -537,15 +545,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
537 | static int i915_dispatch_flip(struct drm_device * dev) | 545 | static int i915_dispatch_flip(struct drm_device * dev) |
538 | { | 546 | { |
539 | drm_i915_private_t *dev_priv = dev->dev_private; | 547 | drm_i915_private_t *dev_priv = dev->dev_private; |
548 | struct drm_i915_master_private *master_priv = | ||
549 | dev->primary->master->driver_priv; | ||
540 | RING_LOCALS; | 550 | RING_LOCALS; |
541 | 551 | ||
542 | if (!dev_priv->sarea_priv) | 552 | if (!master_priv->sarea_priv) |
543 | return -EINVAL; | 553 | return -EINVAL; |
544 | 554 | ||
545 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | 555 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", |
546 | __func__, | 556 | __func__, |
547 | dev_priv->current_page, | 557 | dev_priv->current_page, |
548 | dev_priv->sarea_priv->pf_current_page); | 558 | master_priv->sarea_priv->pf_current_page); |
549 | 559 | ||
550 | i915_kernel_lost_context(dev); | 560 | i915_kernel_lost_context(dev); |
551 | 561 | ||
@@ -572,7 +582,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
572 | OUT_RING(0); | 582 | OUT_RING(0); |
573 | ADVANCE_LP_RING(); | 583 | ADVANCE_LP_RING(); |
574 | 584 | ||
575 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 585 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
576 | 586 | ||
577 | BEGIN_LP_RING(4); | 587 | BEGIN_LP_RING(4); |
578 | OUT_RING(MI_STORE_DWORD_INDEX); | 588 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -581,7 +591,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
581 | OUT_RING(0); | 591 | OUT_RING(0); |
582 | ADVANCE_LP_RING(); | 592 | ADVANCE_LP_RING(); |
583 | 593 | ||
584 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 594 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
585 | return 0; | 595 | return 0; |
586 | } | 596 | } |
587 | 597 | ||
@@ -611,8 +621,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
611 | struct drm_file *file_priv) | 621 | struct drm_file *file_priv) |
612 | { | 622 | { |
613 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 623 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
624 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
614 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 625 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
615 | dev_priv->sarea_priv; | 626 | master_priv->sarea_priv; |
616 | drm_i915_batchbuffer_t *batch = data; | 627 | drm_i915_batchbuffer_t *batch = data; |
617 | int ret; | 628 | int ret; |
618 | 629 | ||
@@ -644,8 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
644 | struct drm_file *file_priv) | 655 | struct drm_file *file_priv) |
645 | { | 656 | { |
646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 657 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
658 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
647 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 659 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
648 | dev_priv->sarea_priv; | 660 | master_priv->sarea_priv; |
649 | drm_i915_cmdbuffer_t *cmdbuf = data; | 661 | drm_i915_cmdbuffer_t *cmdbuf = data; |
650 | int ret; | 662 | int ret; |
651 | 663 | ||
@@ -717,10 +729,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
717 | value = dev->pci_device; | 729 | value = dev->pci_device; |
718 | break; | 730 | break; |
719 | case I915_PARAM_HAS_GEM: | 731 | case I915_PARAM_HAS_GEM: |
720 | value = 1; | 732 | value = dev_priv->has_gem; |
733 | break; | ||
734 | case I915_PARAM_NUM_FENCES_AVAIL: | ||
735 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | ||
721 | break; | 736 | break; |
722 | default: | 737 | default: |
723 | DRM_ERROR("Unknown parameter %d\n", param->param); | 738 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
724 | return -EINVAL; | 739 | return -EINVAL; |
725 | } | 740 | } |
726 | 741 | ||
@@ -752,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data, | |||
752 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | 767 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
753 | dev_priv->allow_batchbuffer = param->value; | 768 | dev_priv->allow_batchbuffer = param->value; |
754 | break; | 769 | break; |
770 | case I915_SETPARAM_NUM_USED_FENCES: | ||
771 | if (param->value > dev_priv->num_fence_regs || | ||
772 | param->value < 0) | ||
773 | return -EINVAL; | ||
774 | /* Userspace can use first N regs */ | ||
775 | dev_priv->fence_reg_start = param->value; | ||
776 | break; | ||
755 | default: | 777 | default: |
756 | DRM_ERROR("unknown parameter %d\n", param->param); | 778 | DRM_DEBUG("unknown parameter %d\n", param->param); |
757 | return -EINVAL; | 779 | return -EINVAL; |
758 | } | 780 | } |
759 | 781 | ||
@@ -774,6 +796,11 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
774 | return -EINVAL; | 796 | return -EINVAL; |
775 | } | 797 | } |
776 | 798 | ||
799 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
800 | WARN(1, "tried to set status page when mode setting active\n"); | ||
801 | return 0; | ||
802 | } | ||
803 | |||
777 | printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); | 804 | printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); |
778 | 805 | ||
779 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | 806 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); |
@@ -802,6 +829,231 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
802 | return 0; | 829 | return 0; |
803 | } | 830 | } |
804 | 831 | ||
832 | /** | ||
833 | * i915_probe_agp - get AGP bootup configuration | ||
834 | * @pdev: PCI device | ||
835 | * @aperture_size: returns AGP aperture configured size | ||
836 | * @preallocated_size: returns size of BIOS preallocated AGP space | ||
837 | * | ||
838 | * Since Intel integrated graphics are UMA, the BIOS has to set aside | ||
839 | * some RAM for the framebuffer at early boot. This code figures out | ||
840 | * how much was set aside so we can use it for our own purposes. | ||
841 | */ | ||
842 | static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, | ||
843 | unsigned long *preallocated_size) | ||
844 | { | ||
845 | struct pci_dev *bridge_dev; | ||
846 | u16 tmp = 0; | ||
847 | unsigned long overhead; | ||
848 | unsigned long stolen; | ||
849 | |||
850 | bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); | ||
851 | if (!bridge_dev) { | ||
852 | DRM_ERROR("bridge device not found\n"); | ||
853 | return -1; | ||
854 | } | ||
855 | |||
856 | /* Get the fb aperture size and "stolen" memory amount. */ | ||
857 | pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); | ||
858 | pci_dev_put(bridge_dev); | ||
859 | |||
860 | *aperture_size = 1024 * 1024; | ||
861 | *preallocated_size = 1024 * 1024; | ||
862 | |||
863 | switch (dev->pdev->device) { | ||
864 | case PCI_DEVICE_ID_INTEL_82830_CGC: | ||
865 | case PCI_DEVICE_ID_INTEL_82845G_IG: | ||
866 | case PCI_DEVICE_ID_INTEL_82855GM_IG: | ||
867 | case PCI_DEVICE_ID_INTEL_82865_IG: | ||
868 | if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) | ||
869 | *aperture_size *= 64; | ||
870 | else | ||
871 | *aperture_size *= 128; | ||
872 | break; | ||
873 | default: | ||
874 | /* 9xx supports large sizes, just look at the length */ | ||
875 | *aperture_size = pci_resource_len(dev->pdev, 2); | ||
876 | break; | ||
877 | } | ||
878 | |||
879 | /* | ||
880 | * Some of the preallocated space is taken by the GTT | ||
881 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. | ||
882 | */ | ||
883 | if (IS_G4X(dev)) | ||
884 | overhead = 4096; | ||
885 | else | ||
886 | overhead = (*aperture_size / 1024) + 4096; | ||
887 | |||
888 | switch (tmp & INTEL_GMCH_GMS_MASK) { | ||
889 | case INTEL_855_GMCH_GMS_DISABLED: | ||
890 | DRM_ERROR("video memory is disabled\n"); | ||
891 | return -1; | ||
892 | case INTEL_855_GMCH_GMS_STOLEN_1M: | ||
893 | stolen = 1 * 1024 * 1024; | ||
894 | break; | ||
895 | case INTEL_855_GMCH_GMS_STOLEN_4M: | ||
896 | stolen = 4 * 1024 * 1024; | ||
897 | break; | ||
898 | case INTEL_855_GMCH_GMS_STOLEN_8M: | ||
899 | stolen = 8 * 1024 * 1024; | ||
900 | break; | ||
901 | case INTEL_855_GMCH_GMS_STOLEN_16M: | ||
902 | stolen = 16 * 1024 * 1024; | ||
903 | break; | ||
904 | case INTEL_855_GMCH_GMS_STOLEN_32M: | ||
905 | stolen = 32 * 1024 * 1024; | ||
906 | break; | ||
907 | case INTEL_915G_GMCH_GMS_STOLEN_48M: | ||
908 | stolen = 48 * 1024 * 1024; | ||
909 | break; | ||
910 | case INTEL_915G_GMCH_GMS_STOLEN_64M: | ||
911 | stolen = 64 * 1024 * 1024; | ||
912 | break; | ||
913 | case INTEL_GMCH_GMS_STOLEN_128M: | ||
914 | stolen = 128 * 1024 * 1024; | ||
915 | break; | ||
916 | case INTEL_GMCH_GMS_STOLEN_256M: | ||
917 | stolen = 256 * 1024 * 1024; | ||
918 | break; | ||
919 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
920 | stolen = 96 * 1024 * 1024; | ||
921 | break; | ||
922 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
923 | stolen = 160 * 1024 * 1024; | ||
924 | break; | ||
925 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
926 | stolen = 224 * 1024 * 1024; | ||
927 | break; | ||
928 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
929 | stolen = 352 * 1024 * 1024; | ||
930 | break; | ||
931 | default: | ||
932 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | ||
933 | tmp & INTEL_GMCH_GMS_MASK); | ||
934 | return -1; | ||
935 | } | ||
936 | *preallocated_size = stolen - overhead; | ||
937 | |||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | static int i915_load_modeset_init(struct drm_device *dev) | ||
942 | { | ||
943 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
944 | unsigned long agp_size, prealloc_size; | ||
945 | int fb_bar = IS_I9XX(dev) ? 2 : 0; | ||
946 | int ret = 0; | ||
947 | |||
948 | dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); | ||
949 | if (!dev->devname) { | ||
950 | ret = -ENOMEM; | ||
951 | goto out; | ||
952 | } | ||
953 | |||
954 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | ||
955 | 0xff000000; | ||
956 | |||
957 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
958 | dev_priv->cursor_needs_physical = true; | ||
959 | else | ||
960 | dev_priv->cursor_needs_physical = false; | ||
961 | |||
962 | if (IS_I965G(dev) || IS_G33(dev)) | ||
963 | dev_priv->cursor_needs_physical = false; | ||
964 | |||
965 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | ||
966 | if (ret) | ||
967 | goto kfree_devname; | ||
968 | |||
969 | /* Basic memrange allocator for stolen space (aka vram) */ | ||
970 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | ||
971 | |||
972 | /* Let GEM Manage from end of prealloc space to end of aperture */ | ||
973 | i915_gem_do_init(dev, prealloc_size, agp_size); | ||
974 | |||
975 | ret = i915_gem_init_ringbuffer(dev); | ||
976 | if (ret) | ||
977 | goto kfree_devname; | ||
978 | |||
979 | /* Allow hardware batchbuffers unless told otherwise. | ||
980 | */ | ||
981 | dev_priv->allow_batchbuffer = 1; | ||
982 | |||
983 | ret = intel_init_bios(dev); | ||
984 | if (ret) | ||
985 | DRM_INFO("failed to find VBIOS tables\n"); | ||
986 | |||
987 | ret = drm_irq_install(dev); | ||
988 | if (ret) | ||
989 | goto destroy_ringbuffer; | ||
990 | |||
991 | /* FIXME: re-add hotplug support */ | ||
992 | #if 0 | ||
993 | ret = drm_hotplug_init(dev); | ||
994 | if (ret) | ||
995 | goto destroy_ringbuffer; | ||
996 | #endif | ||
997 | |||
998 | /* Always safe in the mode setting case. */ | ||
999 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | ||
1000 | dev->vblank_disable_allowed = 1; | ||
1001 | |||
1002 | /* | ||
1003 | * Initialize the hardware status page IRQ location. | ||
1004 | */ | ||
1005 | |||
1006 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | ||
1007 | |||
1008 | intel_modeset_init(dev); | ||
1009 | |||
1010 | drm_helper_initial_config(dev, false); | ||
1011 | |||
1012 | return 0; | ||
1013 | |||
1014 | destroy_ringbuffer: | ||
1015 | i915_gem_cleanup_ringbuffer(dev); | ||
1016 | kfree_devname: | ||
1017 | kfree(dev->devname); | ||
1018 | out: | ||
1019 | return ret; | ||
1020 | } | ||
1021 | |||
1022 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | ||
1023 | { | ||
1024 | struct drm_i915_master_private *master_priv; | ||
1025 | |||
1026 | master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1027 | if (!master_priv) | ||
1028 | return -ENOMEM; | ||
1029 | |||
1030 | master->driver_priv = master_priv; | ||
1031 | return 0; | ||
1032 | } | ||
1033 | |||
1034 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | ||
1035 | { | ||
1036 | struct drm_i915_master_private *master_priv = master->driver_priv; | ||
1037 | |||
1038 | if (!master_priv) | ||
1039 | return; | ||
1040 | |||
1041 | drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1042 | |||
1043 | master->driver_priv = NULL; | ||
1044 | } | ||
1045 | |||
1046 | /** | ||
1047 | * i915_driver_load - setup chip and create an initial config | ||
1048 | * @dev: DRM device | ||
1049 | * @flags: startup flags | ||
1050 | * | ||
1051 | * The driver load routine has to do several things: | ||
1052 | * - drive output discovery via intel_modeset_init() | ||
1053 | * - initialize the memory manager | ||
1054 | * - allocate initial config memory | ||
1055 | * - setup the DRM framebuffer with the allocated memory | ||
1056 | */ | ||
805 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1057 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
806 | { | 1058 | { |
807 | struct drm_i915_private *dev_priv = dev->dev_private; | 1059 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -829,6 +1081,40 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
829 | size = drm_get_resource_len(dev, mmio_bar); | 1081 | size = drm_get_resource_len(dev, mmio_bar); |
830 | 1082 | ||
831 | dev_priv->regs = ioremap(base, size); | 1083 | dev_priv->regs = ioremap(base, size); |
1084 | if (!dev_priv->regs) { | ||
1085 | DRM_ERROR("failed to map registers\n"); | ||
1086 | ret = -EIO; | ||
1087 | goto free_priv; | ||
1088 | } | ||
1089 | |||
1090 | dev_priv->mm.gtt_mapping = | ||
1091 | io_mapping_create_wc(dev->agp->base, | ||
1092 | dev->agp->agp_info.aper_size * 1024*1024); | ||
1093 | /* Set up a WC MTRR for non-PAT systems. This is more common than | ||
1094 | * one would think, because the kernel disables PAT on first | ||
1095 | * generation Core chips because WC PAT gets overridden by a UC | ||
1096 | * MTRR if present. Even if a UC MTRR isn't present. | ||
1097 | */ | ||
1098 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | ||
1099 | dev->agp->agp_info.aper_size * | ||
1100 | 1024 * 1024, | ||
1101 | MTRR_TYPE_WRCOMB, 1); | ||
1102 | if (dev_priv->mm.gtt_mtrr < 0) { | ||
1103 | DRM_INFO("MTRR allocation failed\n. Graphics " | ||
1104 | "performance may suffer.\n"); | ||
1105 | } | ||
1106 | |||
1107 | #ifdef CONFIG_HIGHMEM64G | ||
1108 | /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ | ||
1109 | dev_priv->has_gem = 0; | ||
1110 | #else | ||
1111 | /* enable GEM by default */ | ||
1112 | dev_priv->has_gem = 1; | ||
1113 | #endif | ||
1114 | |||
1115 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
1116 | if (IS_GM45(dev)) | ||
1117 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
832 | 1118 | ||
833 | i915_gem_load(dev); | 1119 | i915_gem_load(dev); |
834 | 1120 | ||
@@ -836,7 +1122,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
836 | if (!I915_NEED_GFX_HWS(dev)) { | 1122 | if (!I915_NEED_GFX_HWS(dev)) { |
837 | ret = i915_init_phys_hws(dev); | 1123 | ret = i915_init_phys_hws(dev); |
838 | if (ret != 0) | 1124 | if (ret != 0) |
839 | return ret; | 1125 | goto out_rmmap; |
840 | } | 1126 | } |
841 | 1127 | ||
842 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1128 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -847,15 +1133,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
847 | * and the registers being closely associated. | 1133 | * and the registers being closely associated. |
848 | * | 1134 | * |
849 | * According to chipset errata, on the 965GM, MSI interrupts may | 1135 | * According to chipset errata, on the 965GM, MSI interrupts may |
850 | * be lost or delayed | 1136 | * be lost or delayed, but we use them anyways to avoid |
1137 | * stuck interrupts on some machines. | ||
851 | */ | 1138 | */ |
852 | if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) | 1139 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
853 | pci_enable_msi(dev->pdev); | 1140 | pci_enable_msi(dev->pdev); |
854 | 1141 | ||
855 | intel_opregion_init(dev); | 1142 | intel_opregion_init(dev); |
856 | 1143 | ||
857 | spin_lock_init(&dev_priv->user_irq_lock); | 1144 | spin_lock_init(&dev_priv->user_irq_lock); |
1145 | dev_priv->user_irq_refcount = 0; | ||
1146 | |||
1147 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | ||
1148 | |||
1149 | if (ret) { | ||
1150 | (void) i915_driver_unload(dev); | ||
1151 | return ret; | ||
1152 | } | ||
1153 | |||
1154 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
1155 | ret = i915_load_modeset_init(dev); | ||
1156 | if (ret < 0) { | ||
1157 | DRM_ERROR("failed to init modeset\n"); | ||
1158 | goto out_rmmap; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | return 0; | ||
858 | 1163 | ||
1164 | out_rmmap: | ||
1165 | iounmap(dev_priv->regs); | ||
1166 | free_priv: | ||
1167 | drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); | ||
859 | return ret; | 1168 | return ret; |
860 | } | 1169 | } |
861 | 1170 | ||
@@ -863,16 +1172,37 @@ int i915_driver_unload(struct drm_device *dev) | |||
863 | { | 1172 | { |
864 | struct drm_i915_private *dev_priv = dev->dev_private; | 1173 | struct drm_i915_private *dev_priv = dev->dev_private; |
865 | 1174 | ||
1175 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
1176 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
1177 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
1178 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
1179 | dev_priv->mm.gtt_mtrr = -1; | ||
1180 | } | ||
1181 | |||
1182 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
1183 | drm_irq_uninstall(dev); | ||
1184 | } | ||
1185 | |||
866 | if (dev->pdev->msi_enabled) | 1186 | if (dev->pdev->msi_enabled) |
867 | pci_disable_msi(dev->pdev); | 1187 | pci_disable_msi(dev->pdev); |
868 | 1188 | ||
869 | i915_free_hws(dev); | ||
870 | |||
871 | if (dev_priv->regs != NULL) | 1189 | if (dev_priv->regs != NULL) |
872 | iounmap(dev_priv->regs); | 1190 | iounmap(dev_priv->regs); |
873 | 1191 | ||
874 | intel_opregion_free(dev); | 1192 | intel_opregion_free(dev); |
875 | 1193 | ||
1194 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
1195 | intel_modeset_cleanup(dev); | ||
1196 | |||
1197 | i915_gem_free_all_phys_object(dev); | ||
1198 | |||
1199 | mutex_lock(&dev->struct_mutex); | ||
1200 | i915_gem_cleanup_ringbuffer(dev); | ||
1201 | mutex_unlock(&dev->struct_mutex); | ||
1202 | drm_mm_takedown(&dev_priv->vram); | ||
1203 | i915_gem_lastclose(dev); | ||
1204 | } | ||
1205 | |||
876 | drm_free(dev->dev_private, sizeof(drm_i915_private_t), | 1206 | drm_free(dev->dev_private, sizeof(drm_i915_private_t), |
877 | DRM_MEM_DRIVER); | 1207 | DRM_MEM_DRIVER); |
878 | 1208 | ||
@@ -898,12 +1228,26 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
898 | return 0; | 1228 | return 0; |
899 | } | 1229 | } |
900 | 1230 | ||
1231 | /** | ||
1232 | * i915_driver_lastclose - clean up after all DRM clients have exited | ||
1233 | * @dev: DRM device | ||
1234 | * | ||
1235 | * Take care of cleaning up after all DRM clients have exited. In the | ||
1236 | * mode setting case, we want to restore the kernel's initial mode (just | ||
1237 | * in case the last client left us in a bad state). | ||
1238 | * | ||
1239 | * Additionally, in the non-mode setting case, we'll tear down the AGP | ||
1240 | * and DMA structures, since the kernel won't be using them, and clea | ||
1241 | * up any GEM state. | ||
1242 | */ | ||
901 | void i915_driver_lastclose(struct drm_device * dev) | 1243 | void i915_driver_lastclose(struct drm_device * dev) |
902 | { | 1244 | { |
903 | drm_i915_private_t *dev_priv = dev->dev_private; | 1245 | drm_i915_private_t *dev_priv = dev->dev_private; |
904 | 1246 | ||
905 | if (!dev_priv) | 1247 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { |
1248 | intelfb_restore(); | ||
906 | return; | 1249 | return; |
1250 | } | ||
907 | 1251 | ||
908 | i915_gem_lastclose(dev); | 1252 | i915_gem_lastclose(dev); |
909 | 1253 | ||
@@ -916,7 +1260,8 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
916 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | 1260 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
917 | { | 1261 | { |
918 | drm_i915_private_t *dev_priv = dev->dev_private; | 1262 | drm_i915_private_t *dev_priv = dev->dev_private; |
919 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | 1263 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
1264 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | ||
920 | } | 1265 | } |
921 | 1266 | ||
922 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | 1267 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) |
@@ -956,6 +1301,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
956 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), | 1301 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), |
957 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), | 1302 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), |
958 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), | 1303 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), |
1304 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), | ||
959 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), | 1305 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), |
960 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), | 1306 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), |
961 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), | 1307 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a80ead215282..aac12ee31a46 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -33,11 +33,22 @@ | |||
33 | #include "i915_drv.h" | 33 | #include "i915_drv.h" |
34 | 34 | ||
35 | #include "drm_pciids.h" | 35 | #include "drm_pciids.h" |
36 | #include <linux/console.h> | ||
37 | |||
38 | static unsigned int i915_modeset = -1; | ||
39 | module_param_named(modeset, i915_modeset, int, 0400); | ||
40 | |||
41 | unsigned int i915_fbpercrtc = 0; | ||
42 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | ||
36 | 43 | ||
37 | static struct pci_device_id pciidlist[] = { | 44 | static struct pci_device_id pciidlist[] = { |
38 | i915_PCI_IDS | 45 | i915_PCI_IDS |
39 | }; | 46 | }; |
40 | 47 | ||
48 | #if defined(CONFIG_DRM_I915_KMS) | ||
49 | MODULE_DEVICE_TABLE(pci, pciidlist); | ||
50 | #endif | ||
51 | |||
41 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | 52 | static int i915_suspend(struct drm_device *dev, pm_message_t state) |
42 | { | 53 | { |
43 | struct drm_i915_private *dev_priv = dev->dev_private; | 54 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -81,6 +92,10 @@ static int i915_resume(struct drm_device *dev) | |||
81 | return 0; | 92 | return 0; |
82 | } | 93 | } |
83 | 94 | ||
95 | static struct vm_operations_struct i915_gem_vm_ops = { | ||
96 | .fault = i915_gem_fault, | ||
97 | }; | ||
98 | |||
84 | static struct drm_driver driver = { | 99 | static struct drm_driver driver = { |
85 | /* don't use mtrr's here, the Xserver or user space app should | 100 | /* don't use mtrr's here, the Xserver or user space app should |
86 | * deal with them for intel hardware. | 101 | * deal with them for intel hardware. |
@@ -97,7 +112,6 @@ static struct drm_driver driver = { | |||
97 | .suspend = i915_suspend, | 112 | .suspend = i915_suspend, |
98 | .resume = i915_resume, | 113 | .resume = i915_resume, |
99 | .device_is_agp = i915_driver_device_is_agp, | 114 | .device_is_agp = i915_driver_device_is_agp, |
100 | .get_vblank_counter = i915_get_vblank_counter, | ||
101 | .enable_vblank = i915_enable_vblank, | 115 | .enable_vblank = i915_enable_vblank, |
102 | .disable_vblank = i915_disable_vblank, | 116 | .disable_vblank = i915_disable_vblank, |
103 | .irq_preinstall = i915_driver_irq_preinstall, | 117 | .irq_preinstall = i915_driver_irq_preinstall, |
@@ -107,17 +121,20 @@ static struct drm_driver driver = { | |||
107 | .reclaim_buffers = drm_core_reclaim_buffers, | 121 | .reclaim_buffers = drm_core_reclaim_buffers, |
108 | .get_map_ofs = drm_core_get_map_ofs, | 122 | .get_map_ofs = drm_core_get_map_ofs, |
109 | .get_reg_ofs = drm_core_get_reg_ofs, | 123 | .get_reg_ofs = drm_core_get_reg_ofs, |
124 | .master_create = i915_master_create, | ||
125 | .master_destroy = i915_master_destroy, | ||
110 | .proc_init = i915_gem_proc_init, | 126 | .proc_init = i915_gem_proc_init, |
111 | .proc_cleanup = i915_gem_proc_cleanup, | 127 | .proc_cleanup = i915_gem_proc_cleanup, |
112 | .gem_init_object = i915_gem_init_object, | 128 | .gem_init_object = i915_gem_init_object, |
113 | .gem_free_object = i915_gem_free_object, | 129 | .gem_free_object = i915_gem_free_object, |
130 | .gem_vm_ops = &i915_gem_vm_ops, | ||
114 | .ioctls = i915_ioctls, | 131 | .ioctls = i915_ioctls, |
115 | .fops = { | 132 | .fops = { |
116 | .owner = THIS_MODULE, | 133 | .owner = THIS_MODULE, |
117 | .open = drm_open, | 134 | .open = drm_open, |
118 | .release = drm_release, | 135 | .release = drm_release, |
119 | .ioctl = drm_ioctl, | 136 | .ioctl = drm_ioctl, |
120 | .mmap = drm_mmap, | 137 | .mmap = drm_gem_mmap, |
121 | .poll = drm_poll, | 138 | .poll = drm_poll, |
122 | .fasync = drm_fasync, | 139 | .fasync = drm_fasync, |
123 | #ifdef CONFIG_COMPAT | 140 | #ifdef CONFIG_COMPAT |
@@ -141,6 +158,28 @@ static struct drm_driver driver = { | |||
141 | static int __init i915_init(void) | 158 | static int __init i915_init(void) |
142 | { | 159 | { |
143 | driver.num_ioctls = i915_max_ioctl; | 160 | driver.num_ioctls = i915_max_ioctl; |
161 | |||
162 | /* | ||
163 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | ||
164 | * explicitly disabled with the module pararmeter. | ||
165 | * | ||
166 | * Otherwise, just follow the parameter (defaulting to off). | ||
167 | * | ||
168 | * Allow optional vga_text_mode_force boot option to override | ||
169 | * the default behavior. | ||
170 | */ | ||
171 | #if defined(CONFIG_DRM_I915_KMS) | ||
172 | if (i915_modeset != 0) | ||
173 | driver.driver_features |= DRIVER_MODESET; | ||
174 | #endif | ||
175 | if (i915_modeset == 1) | ||
176 | driver.driver_features |= DRIVER_MODESET; | ||
177 | |||
178 | #ifdef CONFIG_VGA_CONSOLE | ||
179 | if (vgacon_text_force() && i915_modeset == -1) | ||
180 | driver.driver_features &= ~DRIVER_MODESET; | ||
181 | #endif | ||
182 | |||
144 | return drm_init(&driver); | 183 | return drm_init(&driver); |
145 | } | 184 | } |
146 | 185 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ef1c0b8f8d07..7325363164f8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #define _I915_DRV_H_ | 31 | #define _I915_DRV_H_ |
32 | 32 | ||
33 | #include "i915_reg.h" | 33 | #include "i915_reg.h" |
34 | #include "intel_bios.h" | ||
34 | #include <linux/io-mapping.h> | 35 | #include <linux/io-mapping.h> |
35 | 36 | ||
36 | /* General customization: | 37 | /* General customization: |
@@ -47,6 +48,8 @@ enum pipe { | |||
47 | PIPE_B, | 48 | PIPE_B, |
48 | }; | 49 | }; |
49 | 50 | ||
51 | #define I915_NUM_PIPE 2 | ||
52 | |||
50 | /* Interface history: | 53 | /* Interface history: |
51 | * | 54 | * |
52 | * 1.1: Original. | 55 | * 1.1: Original. |
@@ -69,6 +72,18 @@ enum pipe { | |||
69 | #define WATCH_INACTIVE 0 | 72 | #define WATCH_INACTIVE 0 |
70 | #define WATCH_PWRITE 0 | 73 | #define WATCH_PWRITE 0 |
71 | 74 | ||
75 | #define I915_GEM_PHYS_CURSOR_0 1 | ||
76 | #define I915_GEM_PHYS_CURSOR_1 2 | ||
77 | #define I915_GEM_PHYS_OVERLAY_REGS 3 | ||
78 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) | ||
79 | |||
80 | struct drm_i915_gem_phys_object { | ||
81 | int id; | ||
82 | struct page **page_list; | ||
83 | drm_dma_handle_t *handle; | ||
84 | struct drm_gem_object *cur_obj; | ||
85 | }; | ||
86 | |||
72 | typedef struct _drm_i915_ring_buffer { | 87 | typedef struct _drm_i915_ring_buffer { |
73 | int tail_mask; | 88 | int tail_mask; |
74 | unsigned long Size; | 89 | unsigned long Size; |
@@ -101,13 +116,23 @@ struct intel_opregion { | |||
101 | int enabled; | 116 | int enabled; |
102 | }; | 117 | }; |
103 | 118 | ||
119 | struct drm_i915_master_private { | ||
120 | drm_local_map_t *sarea; | ||
121 | struct _drm_i915_sarea *sarea_priv; | ||
122 | }; | ||
123 | #define I915_FENCE_REG_NONE -1 | ||
124 | |||
125 | struct drm_i915_fence_reg { | ||
126 | struct drm_gem_object *obj; | ||
127 | }; | ||
128 | |||
104 | typedef struct drm_i915_private { | 129 | typedef struct drm_i915_private { |
105 | struct drm_device *dev; | 130 | struct drm_device *dev; |
106 | 131 | ||
132 | int has_gem; | ||
133 | |||
107 | void __iomem *regs; | 134 | void __iomem *regs; |
108 | drm_local_map_t *sarea; | ||
109 | 135 | ||
110 | drm_i915_sarea_t *sarea_priv; | ||
111 | drm_i915_ring_buffer_t ring; | 136 | drm_i915_ring_buffer_t ring; |
112 | 137 | ||
113 | drm_dma_handle_t *status_page_dmah; | 138 | drm_dma_handle_t *status_page_dmah; |
@@ -132,6 +157,7 @@ typedef struct drm_i915_private { | |||
132 | int user_irq_refcount; | 157 | int user_irq_refcount; |
133 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 158 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
134 | u32 irq_mask_reg; | 159 | u32 irq_mask_reg; |
160 | u32 pipestat[2]; | ||
135 | 161 | ||
136 | int tex_lru_log_granularity; | 162 | int tex_lru_log_granularity; |
137 | int allow_batchbuffer; | 163 | int allow_batchbuffer; |
@@ -139,14 +165,37 @@ typedef struct drm_i915_private { | |||
139 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 165 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
140 | int vblank_pipe; | 166 | int vblank_pipe; |
141 | 167 | ||
168 | bool cursor_needs_physical; | ||
169 | |||
170 | struct drm_mm vram; | ||
171 | |||
172 | int irq_enabled; | ||
173 | |||
142 | struct intel_opregion opregion; | 174 | struct intel_opregion opregion; |
143 | 175 | ||
176 | /* LVDS info */ | ||
177 | int backlight_duty_cycle; /* restore backlight to this value */ | ||
178 | bool panel_wants_dither; | ||
179 | struct drm_display_mode *panel_fixed_mode; | ||
180 | struct drm_display_mode *vbt_mode; /* if any */ | ||
181 | |||
182 | /* Feature bits from the VBIOS */ | ||
183 | unsigned int int_tv_support:1; | ||
184 | unsigned int lvds_dither:1; | ||
185 | unsigned int lvds_vbt:1; | ||
186 | unsigned int int_crt_support:1; | ||
187 | |||
188 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | ||
189 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | ||
190 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | ||
191 | |||
144 | /* Register state */ | 192 | /* Register state */ |
145 | u8 saveLBB; | 193 | u8 saveLBB; |
146 | u32 saveDSPACNTR; | 194 | u32 saveDSPACNTR; |
147 | u32 saveDSPBCNTR; | 195 | u32 saveDSPBCNTR; |
148 | u32 saveDSPARB; | 196 | u32 saveDSPARB; |
149 | u32 saveRENDERSTANDBY; | 197 | u32 saveRENDERSTANDBY; |
198 | u32 saveHWS; | ||
150 | u32 savePIPEACONF; | 199 | u32 savePIPEACONF; |
151 | u32 savePIPEBCONF; | 200 | u32 savePIPEBCONF; |
152 | u32 savePIPEASRC; | 201 | u32 savePIPEASRC; |
@@ -235,11 +284,16 @@ typedef struct drm_i915_private { | |||
235 | struct drm_mm gtt_space; | 284 | struct drm_mm gtt_space; |
236 | 285 | ||
237 | struct io_mapping *gtt_mapping; | 286 | struct io_mapping *gtt_mapping; |
287 | int gtt_mtrr; | ||
238 | 288 | ||
239 | /** | 289 | /** |
240 | * List of objects currently involved in rendering from the | 290 | * List of objects currently involved in rendering from the |
241 | * ringbuffer. | 291 | * ringbuffer. |
242 | * | 292 | * |
293 | * Includes buffers having the contents of their GPU caches | ||
294 | * flushed, not necessarily primitives. last_rendering_seqno | ||
295 | * represents when the rendering involved will be completed. | ||
296 | * | ||
243 | * A reference is held on the buffer while on this list. | 297 | * A reference is held on the buffer while on this list. |
244 | */ | 298 | */ |
245 | struct list_head active_list; | 299 | struct list_head active_list; |
@@ -249,6 +303,8 @@ typedef struct drm_i915_private { | |||
249 | * still have a write_domain which needs to be flushed before | 303 | * still have a write_domain which needs to be flushed before |
250 | * unbinding. | 304 | * unbinding. |
251 | * | 305 | * |
306 | * last_rendering_seqno is 0 while an object is in this list. | ||
307 | * | ||
252 | * A reference is held on the buffer while on this list. | 308 | * A reference is held on the buffer while on this list. |
253 | */ | 309 | */ |
254 | struct list_head flushing_list; | 310 | struct list_head flushing_list; |
@@ -257,6 +313,8 @@ typedef struct drm_i915_private { | |||
257 | * LRU list of objects which are not in the ringbuffer and | 313 | * LRU list of objects which are not in the ringbuffer and |
258 | * are ready to unbind, but are still in the GTT. | 314 | * are ready to unbind, but are still in the GTT. |
259 | * | 315 | * |
316 | * last_rendering_seqno is 0 while an object is in this list. | ||
317 | * | ||
260 | * A reference is not held on the buffer while on this list, | 318 | * A reference is not held on the buffer while on this list, |
261 | * as merely being GTT-bound shouldn't prevent its being | 319 | * as merely being GTT-bound shouldn't prevent its being |
262 | * freed, and we'll pull it off the list in the free path. | 320 | * freed, and we'll pull it off the list in the free path. |
@@ -313,6 +371,9 @@ typedef struct drm_i915_private { | |||
313 | uint32_t bit_6_swizzle_x; | 371 | uint32_t bit_6_swizzle_x; |
314 | /** Bit 6 swizzling required for Y tiling */ | 372 | /** Bit 6 swizzling required for Y tiling */ |
315 | uint32_t bit_6_swizzle_y; | 373 | uint32_t bit_6_swizzle_y; |
374 | |||
375 | /* storage for physical objects */ | ||
376 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
316 | } mm; | 377 | } mm; |
317 | } drm_i915_private_t; | 378 | } drm_i915_private_t; |
318 | 379 | ||
@@ -350,6 +411,21 @@ struct drm_i915_gem_object { | |||
350 | * This is the same as gtt_space->start | 411 | * This is the same as gtt_space->start |
351 | */ | 412 | */ |
352 | uint32_t gtt_offset; | 413 | uint32_t gtt_offset; |
414 | /** | ||
415 | * Required alignment for the object | ||
416 | */ | ||
417 | uint32_t gtt_alignment; | ||
418 | /** | ||
419 | * Fake offset for use by mmap(2) | ||
420 | */ | ||
421 | uint64_t mmap_offset; | ||
422 | |||
423 | /** | ||
424 | * Fence register bits (if any) for this object. Will be set | ||
425 | * as needed when mapped into the GTT. | ||
426 | * Protected by dev->struct_mutex. | ||
427 | */ | ||
428 | int fence_reg; | ||
353 | 429 | ||
354 | /** Boolean whether this object has a valid gtt offset. */ | 430 | /** Boolean whether this object has a valid gtt offset. */ |
355 | int gtt_bound; | 431 | int gtt_bound; |
@@ -362,15 +438,23 @@ struct drm_i915_gem_object { | |||
362 | 438 | ||
363 | /** Current tiling mode for the object. */ | 439 | /** Current tiling mode for the object. */ |
364 | uint32_t tiling_mode; | 440 | uint32_t tiling_mode; |
441 | uint32_t stride; | ||
365 | 442 | ||
366 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | 443 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ |
367 | uint32_t agp_type; | 444 | uint32_t agp_type; |
368 | 445 | ||
369 | /** | 446 | /** |
370 | * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when | 447 | * If present, while GEM_DOMAIN_CPU is in the read domain this array |
371 | * GEM_DOMAIN_CPU is not in the object's read domain. | 448 | * flags which individual pages are valid. |
372 | */ | 449 | */ |
373 | uint8_t *page_cpu_valid; | 450 | uint8_t *page_cpu_valid; |
451 | |||
452 | /** User space pin count and filp owning the pin */ | ||
453 | uint32_t user_pin_count; | ||
454 | struct drm_file *pin_filp; | ||
455 | |||
456 | /** for phy allocated objects */ | ||
457 | struct drm_i915_gem_phys_object *phys_obj; | ||
374 | }; | 458 | }; |
375 | 459 | ||
376 | /** | 460 | /** |
@@ -390,9 +474,6 @@ struct drm_i915_gem_request { | |||
390 | /** Time at which this request was emitted, in jiffies. */ | 474 | /** Time at which this request was emitted, in jiffies. */ |
391 | unsigned long emitted_jiffies; | 475 | unsigned long emitted_jiffies; |
392 | 476 | ||
393 | /** Cache domains that were flushed at the start of the request. */ | ||
394 | uint32_t flush_domains; | ||
395 | |||
396 | struct list_head list; | 477 | struct list_head list; |
397 | }; | 478 | }; |
398 | 479 | ||
@@ -403,8 +484,19 @@ struct drm_i915_file_private { | |||
403 | } mm; | 484 | } mm; |
404 | }; | 485 | }; |
405 | 486 | ||
487 | enum intel_chip_family { | ||
488 | CHIP_I8XX = 0x01, | ||
489 | CHIP_I9XX = 0x02, | ||
490 | CHIP_I915 = 0x04, | ||
491 | CHIP_I965 = 0x08, | ||
492 | }; | ||
493 | |||
406 | extern struct drm_ioctl_desc i915_ioctls[]; | 494 | extern struct drm_ioctl_desc i915_ioctls[]; |
407 | extern int i915_max_ioctl; | 495 | extern int i915_max_ioctl; |
496 | extern unsigned int i915_fbpercrtc; | ||
497 | |||
498 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | ||
499 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | ||
408 | 500 | ||
409 | /* i915_dma.c */ | 501 | /* i915_dma.c */ |
410 | extern void i915_kernel_lost_context(struct drm_device * dev); | 502 | extern void i915_kernel_lost_context(struct drm_device * dev); |
@@ -430,6 +522,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, | |||
430 | struct drm_file *file_priv); | 522 | struct drm_file *file_priv); |
431 | void i915_user_irq_get(struct drm_device *dev); | 523 | void i915_user_irq_get(struct drm_device *dev); |
432 | void i915_user_irq_put(struct drm_device *dev); | 524 | void i915_user_irq_put(struct drm_device *dev); |
525 | extern void i915_enable_interrupt (struct drm_device *dev); | ||
433 | 526 | ||
434 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 527 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
435 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 528 | extern void i915_driver_irq_preinstall(struct drm_device * dev); |
@@ -442,10 +535,18 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |||
442 | extern int i915_enable_vblank(struct drm_device *dev, int crtc); | 535 | extern int i915_enable_vblank(struct drm_device *dev, int crtc); |
443 | extern void i915_disable_vblank(struct drm_device *dev, int crtc); | 536 | extern void i915_disable_vblank(struct drm_device *dev, int crtc); |
444 | extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | 537 | extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); |
538 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | ||
445 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 539 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
446 | struct drm_file *file_priv); | 540 | struct drm_file *file_priv); |
447 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | 541 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); |
448 | 542 | ||
543 | void | ||
544 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | ||
545 | |||
546 | void | ||
547 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | ||
548 | |||
549 | |||
449 | /* i915_mem.c */ | 550 | /* i915_mem.c */ |
450 | extern int i915_mem_alloc(struct drm_device *dev, void *data, | 551 | extern int i915_mem_alloc(struct drm_device *dev, void *data, |
451 | struct drm_file *file_priv); | 552 | struct drm_file *file_priv); |
@@ -469,6 +570,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
469 | struct drm_file *file_priv); | 570 | struct drm_file *file_priv); |
470 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 571 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
471 | struct drm_file *file_priv); | 572 | struct drm_file *file_priv); |
573 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | ||
574 | struct drm_file *file_priv); | ||
472 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 575 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
473 | struct drm_file *file_priv); | 576 | struct drm_file *file_priv); |
474 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 577 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
@@ -500,11 +603,27 @@ int i915_gem_init_object(struct drm_gem_object *obj); | |||
500 | void i915_gem_free_object(struct drm_gem_object *obj); | 603 | void i915_gem_free_object(struct drm_gem_object *obj); |
501 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 604 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); |
502 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 605 | void i915_gem_object_unpin(struct drm_gem_object *obj); |
606 | int i915_gem_object_unbind(struct drm_gem_object *obj); | ||
503 | void i915_gem_lastclose(struct drm_device *dev); | 607 | void i915_gem_lastclose(struct drm_device *dev); |
504 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 608 | uint32_t i915_get_gem_seqno(struct drm_device *dev); |
505 | void i915_gem_retire_requests(struct drm_device *dev); | 609 | void i915_gem_retire_requests(struct drm_device *dev); |
506 | void i915_gem_retire_work_handler(struct work_struct *work); | 610 | void i915_gem_retire_work_handler(struct work_struct *work); |
507 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 611 | void i915_gem_clflush_object(struct drm_gem_object *obj); |
612 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | ||
613 | uint32_t read_domains, | ||
614 | uint32_t write_domain); | ||
615 | int i915_gem_init_ringbuffer(struct drm_device *dev); | ||
616 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | ||
617 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | ||
618 | unsigned long end); | ||
619 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
620 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | ||
621 | int write); | ||
622 | int i915_gem_attach_phys_object(struct drm_device *dev, | ||
623 | struct drm_gem_object *obj, int id); | ||
624 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
625 | struct drm_gem_object *obj); | ||
626 | void i915_gem_free_all_phys_object(struct drm_device *dev); | ||
508 | 627 | ||
509 | /* i915_gem_tiling.c */ | 628 | /* i915_gem_tiling.c */ |
510 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 629 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
@@ -543,6 +662,10 @@ static inline void opregion_asle_intr(struct drm_device *dev) { return; } | |||
543 | static inline void opregion_enable_asle(struct drm_device *dev) { return; } | 662 | static inline void opregion_enable_asle(struct drm_device *dev) { return; } |
544 | #endif | 663 | #endif |
545 | 664 | ||
665 | /* modesetting */ | ||
666 | extern void intel_modeset_init(struct drm_device *dev); | ||
667 | extern void intel_modeset_cleanup(struct drm_device *dev); | ||
668 | |||
546 | /** | 669 | /** |
547 | * Lock test for when it's just for synchronization of ring access. | 670 | * Lock test for when it's just for synchronization of ring access. |
548 | * | 671 | * |
@@ -560,6 +683,14 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } | |||
560 | #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) | 683 | #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) |
561 | #define I915_READ8(reg) readb(dev_priv->regs + (reg)) | 684 | #define I915_READ8(reg) readb(dev_priv->regs + (reg)) |
562 | #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) | 685 | #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) |
686 | #ifdef writeq | ||
687 | #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) | ||
688 | #else | ||
689 | #define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \ | ||
690 | writel(upper_32_bits(val), dev_priv->regs + \ | ||
691 | (reg) + 4)) | ||
692 | #endif | ||
693 | #define POSTING_READ(reg) (void)I915_READ(reg) | ||
563 | 694 | ||
564 | #define I915_VERBOSE 0 | 695 | #define I915_VERBOSE 0 |
565 | 696 | ||
@@ -642,7 +773,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
642 | 773 | ||
643 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | 774 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ |
644 | (dev)->pci_device == 0x2E12 || \ | 775 | (dev)->pci_device == 0x2E12 || \ |
645 | (dev)->pci_device == 0x2E22) | 776 | (dev)->pci_device == 0x2E22 || \ |
777 | IS_GM45(dev)) | ||
646 | 778 | ||
647 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | 779 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ |
648 | (dev)->pci_device == 0x29B2 || \ | 780 | (dev)->pci_device == 0x29B2 || \ |
@@ -655,6 +787,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
655 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) | 787 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) |
656 | 788 | ||
657 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) | 789 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) |
790 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | ||
791 | * rows, which changed the alignment requirements and fence programming. | ||
792 | */ | ||
793 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | ||
794 | IS_I915GM(dev))) | ||
795 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) | ||
658 | 796 | ||
659 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 797 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
660 | 798 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6b4a2bd20640..818576654092 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -30,73 +30,80 @@ | |||
30 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include <linux/swap.h> | 32 | #include <linux/swap.h> |
33 | #include <linux/pci.h> | ||
33 | 34 | ||
34 | static int | 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
35 | i915_gem_object_set_domain(struct drm_gem_object *obj, | 36 | |
36 | uint32_t read_domains, | 37 | static void |
37 | uint32_t write_domain); | 38 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, |
38 | static int | 39 | uint32_t read_domains, |
39 | i915_gem_object_set_domain_range(struct drm_gem_object *obj, | 40 | uint32_t write_domain); |
40 | uint64_t offset, | 41 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
41 | uint64_t size, | 42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
42 | uint32_t read_domains, | 43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
43 | uint32_t write_domain); | 44 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
44 | static int | 45 | int write); |
45 | i915_gem_set_domain(struct drm_gem_object *obj, | 46 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
46 | struct drm_file *file_priv, | 47 | uint64_t offset, |
47 | uint32_t read_domains, | 48 | uint64_t size); |
48 | uint32_t write_domain); | 49 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
49 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | 50 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); |
50 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | 51 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); |
51 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 52 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
53 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | ||
54 | unsigned alignment); | ||
55 | static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write); | ||
56 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | ||
57 | static int i915_gem_evict_something(struct drm_device *dev); | ||
58 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
59 | struct drm_i915_gem_pwrite *args, | ||
60 | struct drm_file *file_priv); | ||
61 | |||
62 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | ||
63 | unsigned long end) | ||
64 | { | ||
65 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
52 | 66 | ||
53 | static void | 67 | if (start >= end || |
54 | i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 68 | (start & (PAGE_SIZE - 1)) != 0 || |
69 | (end & (PAGE_SIZE - 1)) != 0) { | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | |||
73 | drm_mm_init(&dev_priv->mm.gtt_space, start, | ||
74 | end - start); | ||
75 | |||
76 | dev->gtt_total = (uint32_t) (end - start); | ||
77 | |||
78 | return 0; | ||
79 | } | ||
55 | 80 | ||
56 | int | 81 | int |
57 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 82 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
58 | struct drm_file *file_priv) | 83 | struct drm_file *file_priv) |
59 | { | 84 | { |
60 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
61 | struct drm_i915_gem_init *args = data; | 85 | struct drm_i915_gem_init *args = data; |
86 | int ret; | ||
62 | 87 | ||
63 | mutex_lock(&dev->struct_mutex); | 88 | mutex_lock(&dev->struct_mutex); |
64 | 89 | ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); | |
65 | if (args->gtt_start >= args->gtt_end || | ||
66 | (args->gtt_start & (PAGE_SIZE - 1)) != 0 || | ||
67 | (args->gtt_end & (PAGE_SIZE - 1)) != 0) { | ||
68 | mutex_unlock(&dev->struct_mutex); | ||
69 | return -EINVAL; | ||
70 | } | ||
71 | |||
72 | drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, | ||
73 | args->gtt_end - args->gtt_start); | ||
74 | |||
75 | dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); | ||
76 | |||
77 | mutex_unlock(&dev->struct_mutex); | 90 | mutex_unlock(&dev->struct_mutex); |
78 | 91 | ||
79 | return 0; | 92 | return ret; |
80 | } | 93 | } |
81 | 94 | ||
82 | int | 95 | int |
83 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 96 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
84 | struct drm_file *file_priv) | 97 | struct drm_file *file_priv) |
85 | { | 98 | { |
86 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
87 | struct drm_i915_gem_get_aperture *args = data; | 99 | struct drm_i915_gem_get_aperture *args = data; |
88 | struct drm_i915_gem_object *obj_priv; | ||
89 | 100 | ||
90 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 101 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
91 | return -ENODEV; | 102 | return -ENODEV; |
92 | 103 | ||
93 | args->aper_size = dev->gtt_total; | 104 | args->aper_size = dev->gtt_total; |
94 | args->aper_available_size = args->aper_size; | 105 | args->aper_available_size = (args->aper_size - |
95 | 106 | atomic_read(&dev->pin_memory)); | |
96 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
97 | if (obj_priv->pin_count > 0) | ||
98 | args->aper_available_size -= obj_priv->obj->size; | ||
99 | } | ||
100 | 107 | ||
101 | return 0; | 108 | return 0; |
102 | } | 109 | } |
@@ -166,8 +173,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
166 | 173 | ||
167 | mutex_lock(&dev->struct_mutex); | 174 | mutex_lock(&dev->struct_mutex); |
168 | 175 | ||
169 | ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, | 176 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, |
170 | I915_GEM_DOMAIN_CPU, 0); | 177 | args->size); |
171 | if (ret != 0) { | 178 | if (ret != 0) { |
172 | drm_gem_object_unreference(obj); | 179 | drm_gem_object_unreference(obj); |
173 | mutex_unlock(&dev->struct_mutex); | 180 | mutex_unlock(&dev->struct_mutex); |
@@ -264,8 +271,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
264 | mutex_unlock(&dev->struct_mutex); | 271 | mutex_unlock(&dev->struct_mutex); |
265 | return ret; | 272 | return ret; |
266 | } | 273 | } |
267 | ret = i915_gem_set_domain(obj, file_priv, | 274 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
268 | I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); | ||
269 | if (ret) | 275 | if (ret) |
270 | goto fail; | 276 | goto fail; |
271 | 277 | ||
@@ -324,8 +330,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
324 | 330 | ||
325 | mutex_lock(&dev->struct_mutex); | 331 | mutex_lock(&dev->struct_mutex); |
326 | 332 | ||
327 | ret = i915_gem_set_domain(obj, file_priv, | 333 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
328 | I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); | ||
329 | if (ret) { | 334 | if (ret) { |
330 | mutex_unlock(&dev->struct_mutex); | 335 | mutex_unlock(&dev->struct_mutex); |
331 | return ret; | 336 | return ret; |
@@ -384,8 +389,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
384 | * pread/pwrite currently are reading and writing from the CPU | 389 | * pread/pwrite currently are reading and writing from the CPU |
385 | * perspective, requiring manual detiling by the client. | 390 | * perspective, requiring manual detiling by the client. |
386 | */ | 391 | */ |
387 | if (obj_priv->tiling_mode == I915_TILING_NONE && | 392 | if (obj_priv->phys_obj) |
388 | dev->gtt_total != 0) | 393 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
394 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | ||
395 | dev->gtt_total != 0) | ||
389 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); | 396 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); |
390 | else | 397 | else |
391 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); | 398 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); |
@@ -401,7 +408,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
401 | } | 408 | } |
402 | 409 | ||
403 | /** | 410 | /** |
404 | * Called when user space prepares to use an object | 411 | * Called when user space prepares to use an object with the CPU, either |
412 | * through the mmap ioctl's mapping or a GTT mapping. | ||
405 | */ | 413 | */ |
406 | int | 414 | int |
407 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 415 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
@@ -409,11 +417,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
409 | { | 417 | { |
410 | struct drm_i915_gem_set_domain *args = data; | 418 | struct drm_i915_gem_set_domain *args = data; |
411 | struct drm_gem_object *obj; | 419 | struct drm_gem_object *obj; |
420 | uint32_t read_domains = args->read_domains; | ||
421 | uint32_t write_domain = args->write_domain; | ||
412 | int ret; | 422 | int ret; |
413 | 423 | ||
414 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 424 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
415 | return -ENODEV; | 425 | return -ENODEV; |
416 | 426 | ||
427 | /* Only handle setting domains to types used by the CPU. */ | ||
428 | if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
429 | return -EINVAL; | ||
430 | |||
431 | if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
432 | return -EINVAL; | ||
433 | |||
434 | /* Having something in the write domain implies it's in the read | ||
435 | * domain, and only that read domain. Enforce that in the request. | ||
436 | */ | ||
437 | if (write_domain != 0 && read_domains != write_domain) | ||
438 | return -EINVAL; | ||
439 | |||
417 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 440 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
418 | if (obj == NULL) | 441 | if (obj == NULL) |
419 | return -EBADF; | 442 | return -EBADF; |
@@ -421,10 +444,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
421 | mutex_lock(&dev->struct_mutex); | 444 | mutex_lock(&dev->struct_mutex); |
422 | #if WATCH_BUF | 445 | #if WATCH_BUF |
423 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", | 446 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", |
424 | obj, obj->size, args->read_domains, args->write_domain); | 447 | obj, obj->size, read_domains, write_domain); |
425 | #endif | 448 | #endif |
426 | ret = i915_gem_set_domain(obj, file_priv, | 449 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
427 | args->read_domains, args->write_domain); | 450 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
451 | |||
452 | /* Silently promote "you're not bound, there was nothing to do" | ||
453 | * to success, since the client was just asking us to | ||
454 | * make sure everything was done. | ||
455 | */ | ||
456 | if (ret == -EINVAL) | ||
457 | ret = 0; | ||
458 | } else { | ||
459 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | ||
460 | } | ||
461 | |||
428 | drm_gem_object_unreference(obj); | 462 | drm_gem_object_unreference(obj); |
429 | mutex_unlock(&dev->struct_mutex); | 463 | mutex_unlock(&dev->struct_mutex); |
430 | return ret; | 464 | return ret; |
@@ -459,10 +493,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
459 | obj_priv = obj->driver_private; | 493 | obj_priv = obj->driver_private; |
460 | 494 | ||
461 | /* Pinned buffers may be scanout, so flush the cache */ | 495 | /* Pinned buffers may be scanout, so flush the cache */ |
462 | if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { | 496 | if (obj_priv->pin_count) |
463 | i915_gem_clflush_object(obj); | 497 | i915_gem_object_flush_cpu_write_domain(obj); |
464 | drm_agp_chipset_flush(dev); | 498 | |
465 | } | ||
466 | drm_gem_object_unreference(obj); | 499 | drm_gem_object_unreference(obj); |
467 | mutex_unlock(&dev->struct_mutex); | 500 | mutex_unlock(&dev->struct_mutex); |
468 | return ret; | 501 | return ret; |
@@ -509,6 +542,258 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
509 | return 0; | 542 | return 0; |
510 | } | 543 | } |
511 | 544 | ||
545 | /** | ||
546 | * i915_gem_fault - fault a page into the GTT | ||
547 | * vma: VMA in question | ||
548 | * vmf: fault info | ||
549 | * | ||
550 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | ||
551 | * from userspace. The fault handler takes care of binding the object to | ||
552 | * the GTT (if needed), allocating and programming a fence register (again, | ||
553 | * only if needed based on whether the old reg is still valid or the object | ||
554 | * is tiled) and inserting a new PTE into the faulting process. | ||
555 | * | ||
556 | * Note that the faulting process may involve evicting existing objects | ||
557 | * from the GTT and/or fence registers to make room. So performance may | ||
558 | * suffer if the GTT working set is large or there are few fence registers | ||
559 | * left. | ||
560 | */ | ||
561 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
562 | { | ||
563 | struct drm_gem_object *obj = vma->vm_private_data; | ||
564 | struct drm_device *dev = obj->dev; | ||
565 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
566 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
567 | pgoff_t page_offset; | ||
568 | unsigned long pfn; | ||
569 | int ret = 0; | ||
570 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | ||
571 | |||
572 | /* We don't use vmf->pgoff since that has the fake offset */ | ||
573 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | ||
574 | PAGE_SHIFT; | ||
575 | |||
576 | /* Now bind it into the GTT if needed */ | ||
577 | mutex_lock(&dev->struct_mutex); | ||
578 | if (!obj_priv->gtt_space) { | ||
579 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | ||
580 | if (ret) { | ||
581 | mutex_unlock(&dev->struct_mutex); | ||
582 | return VM_FAULT_SIGBUS; | ||
583 | } | ||
584 | list_add(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
585 | } | ||
586 | |||
587 | /* Need a new fence register? */ | ||
588 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
589 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
590 | ret = i915_gem_object_get_fence_reg(obj, write); | ||
591 | if (ret) { | ||
592 | mutex_unlock(&dev->struct_mutex); | ||
593 | return VM_FAULT_SIGBUS; | ||
594 | } | ||
595 | } | ||
596 | |||
597 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | ||
598 | page_offset; | ||
599 | |||
600 | /* Finally, remap it using the new GTT offset */ | ||
601 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | ||
602 | |||
603 | mutex_unlock(&dev->struct_mutex); | ||
604 | |||
605 | switch (ret) { | ||
606 | case -ENOMEM: | ||
607 | case -EAGAIN: | ||
608 | return VM_FAULT_OOM; | ||
609 | case -EFAULT: | ||
610 | case -EBUSY: | ||
611 | DRM_ERROR("can't insert pfn?? fault or busy...\n"); | ||
612 | return VM_FAULT_SIGBUS; | ||
613 | default: | ||
614 | return VM_FAULT_NOPAGE; | ||
615 | } | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * i915_gem_create_mmap_offset - create a fake mmap offset for an object | ||
620 | * @obj: obj in question | ||
621 | * | ||
622 | * GEM memory mapping works by handing back to userspace a fake mmap offset | ||
623 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | ||
624 | * up the object based on the offset and sets up the various memory mapping | ||
625 | * structures. | ||
626 | * | ||
627 | * This routine allocates and attaches a fake offset for @obj. | ||
628 | */ | ||
629 | static int | ||
630 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | ||
631 | { | ||
632 | struct drm_device *dev = obj->dev; | ||
633 | struct drm_gem_mm *mm = dev->mm_private; | ||
634 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
635 | struct drm_map_list *list; | ||
636 | struct drm_map *map; | ||
637 | int ret = 0; | ||
638 | |||
639 | /* Set the object up for mmap'ing */ | ||
640 | list = &obj->map_list; | ||
641 | list->map = drm_calloc(1, sizeof(struct drm_map_list), | ||
642 | DRM_MEM_DRIVER); | ||
643 | if (!list->map) | ||
644 | return -ENOMEM; | ||
645 | |||
646 | map = list->map; | ||
647 | map->type = _DRM_GEM; | ||
648 | map->size = obj->size; | ||
649 | map->handle = obj; | ||
650 | |||
651 | /* Get a DRM GEM mmap offset allocated... */ | ||
652 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | ||
653 | obj->size / PAGE_SIZE, 0, 0); | ||
654 | if (!list->file_offset_node) { | ||
655 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | ||
656 | ret = -ENOMEM; | ||
657 | goto out_free_list; | ||
658 | } | ||
659 | |||
660 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | ||
661 | obj->size / PAGE_SIZE, 0); | ||
662 | if (!list->file_offset_node) { | ||
663 | ret = -ENOMEM; | ||
664 | goto out_free_list; | ||
665 | } | ||
666 | |||
667 | list->hash.key = list->file_offset_node->start; | ||
668 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | ||
669 | DRM_ERROR("failed to add to map hash\n"); | ||
670 | goto out_free_mm; | ||
671 | } | ||
672 | |||
673 | /* By now we should be all set, any drm_mmap request on the offset | ||
674 | * below will get to our mmap & fault handler */ | ||
675 | obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; | ||
676 | |||
677 | return 0; | ||
678 | |||
679 | out_free_mm: | ||
680 | drm_mm_put_block(list->file_offset_node); | ||
681 | out_free_list: | ||
682 | drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); | ||
683 | |||
684 | return ret; | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | ||
689 | * @obj: object to check | ||
690 | * | ||
691 | * Return the required GTT alignment for an object, taking into account | ||
692 | * potential fence register mapping if needed. | ||
693 | */ | ||
694 | static uint32_t | ||
695 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | ||
696 | { | ||
697 | struct drm_device *dev = obj->dev; | ||
698 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
699 | int start, i; | ||
700 | |||
701 | /* | ||
702 | * Minimum alignment is 4k (GTT page size), but might be greater | ||
703 | * if a fence register is needed for the object. | ||
704 | */ | ||
705 | if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) | ||
706 | return 4096; | ||
707 | |||
708 | /* | ||
709 | * Previous chips need to be aligned to the size of the smallest | ||
710 | * fence register that can contain the object. | ||
711 | */ | ||
712 | if (IS_I9XX(dev)) | ||
713 | start = 1024*1024; | ||
714 | else | ||
715 | start = 512*1024; | ||
716 | |||
717 | for (i = start; i < obj->size; i <<= 1) | ||
718 | ; | ||
719 | |||
720 | return i; | ||
721 | } | ||
722 | |||
723 | /** | ||
724 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | ||
725 | * @dev: DRM device | ||
726 | * @data: GTT mapping ioctl data | ||
727 | * @file_priv: GEM object info | ||
728 | * | ||
729 | * Simply returns the fake offset to userspace so it can mmap it. | ||
730 | * The mmap call will end up in drm_gem_mmap(), which will set things | ||
731 | * up so we can get faults in the handler above. | ||
732 | * | ||
733 | * The fault handler will take care of binding the object into the GTT | ||
734 | * (since it may have been evicted to make room for something), allocating | ||
735 | * a fence register, and mapping the appropriate aperture address into | ||
736 | * userspace. | ||
737 | */ | ||
738 | int | ||
739 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | ||
740 | struct drm_file *file_priv) | ||
741 | { | ||
742 | struct drm_i915_gem_mmap_gtt *args = data; | ||
743 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
744 | struct drm_gem_object *obj; | ||
745 | struct drm_i915_gem_object *obj_priv; | ||
746 | int ret; | ||
747 | |||
748 | if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
749 | return -ENODEV; | ||
750 | |||
751 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
752 | if (obj == NULL) | ||
753 | return -EBADF; | ||
754 | |||
755 | mutex_lock(&dev->struct_mutex); | ||
756 | |||
757 | obj_priv = obj->driver_private; | ||
758 | |||
759 | if (!obj_priv->mmap_offset) { | ||
760 | ret = i915_gem_create_mmap_offset(obj); | ||
761 | if (ret) | ||
762 | return ret; | ||
763 | } | ||
764 | |||
765 | args->offset = obj_priv->mmap_offset; | ||
766 | |||
767 | obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); | ||
768 | |||
769 | /* Make sure the alignment is correct for fence regs etc */ | ||
770 | if (obj_priv->agp_mem && | ||
771 | (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { | ||
772 | drm_gem_object_unreference(obj); | ||
773 | mutex_unlock(&dev->struct_mutex); | ||
774 | return -EINVAL; | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Pull it into the GTT so that we have a page list (makes the | ||
779 | * initial fault faster and any subsequent flushing possible). | ||
780 | */ | ||
781 | if (!obj_priv->agp_mem) { | ||
782 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | ||
783 | if (ret) { | ||
784 | drm_gem_object_unreference(obj); | ||
785 | mutex_unlock(&dev->struct_mutex); | ||
786 | return ret; | ||
787 | } | ||
788 | list_add(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
789 | } | ||
790 | |||
791 | drm_gem_object_unreference(obj); | ||
792 | mutex_unlock(&dev->struct_mutex); | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | |||
512 | static void | 797 | static void |
513 | i915_gem_object_free_page_list(struct drm_gem_object *obj) | 798 | i915_gem_object_free_page_list(struct drm_gem_object *obj) |
514 | { | 799 | { |
@@ -536,7 +821,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj) | |||
536 | } | 821 | } |
537 | 822 | ||
538 | static void | 823 | static void |
539 | i915_gem_object_move_to_active(struct drm_gem_object *obj) | 824 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) |
540 | { | 825 | { |
541 | struct drm_device *dev = obj->dev; | 826 | struct drm_device *dev = obj->dev; |
542 | drm_i915_private_t *dev_priv = dev->dev_private; | 827 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -550,8 +835,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) | |||
550 | /* Move from whatever list we were on to the tail of execution. */ | 835 | /* Move from whatever list we were on to the tail of execution. */ |
551 | list_move_tail(&obj_priv->list, | 836 | list_move_tail(&obj_priv->list, |
552 | &dev_priv->mm.active_list); | 837 | &dev_priv->mm.active_list); |
838 | obj_priv->last_rendering_seqno = seqno; | ||
553 | } | 839 | } |
554 | 840 | ||
841 | static void | ||
842 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | ||
843 | { | ||
844 | struct drm_device *dev = obj->dev; | ||
845 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
846 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
847 | |||
848 | BUG_ON(!obj_priv->active); | ||
849 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | ||
850 | obj_priv->last_rendering_seqno = 0; | ||
851 | } | ||
555 | 852 | ||
556 | static void | 853 | static void |
557 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 854 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) |
@@ -566,6 +863,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
566 | else | 863 | else |
567 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 864 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
568 | 865 | ||
866 | obj_priv->last_rendering_seqno = 0; | ||
569 | if (obj_priv->active) { | 867 | if (obj_priv->active) { |
570 | obj_priv->active = 0; | 868 | obj_priv->active = 0; |
571 | drm_gem_object_unreference(obj); | 869 | drm_gem_object_unreference(obj); |
@@ -614,10 +912,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) | |||
614 | 912 | ||
615 | request->seqno = seqno; | 913 | request->seqno = seqno; |
616 | request->emitted_jiffies = jiffies; | 914 | request->emitted_jiffies = jiffies; |
617 | request->flush_domains = flush_domains; | ||
618 | was_empty = list_empty(&dev_priv->mm.request_list); | 915 | was_empty = list_empty(&dev_priv->mm.request_list); |
619 | list_add_tail(&request->list, &dev_priv->mm.request_list); | 916 | list_add_tail(&request->list, &dev_priv->mm.request_list); |
620 | 917 | ||
918 | /* Associate any objects on the flushing list matching the write | ||
919 | * domain we're flushing with our flush. | ||
920 | */ | ||
921 | if (flush_domains != 0) { | ||
922 | struct drm_i915_gem_object *obj_priv, *next; | ||
923 | |||
924 | list_for_each_entry_safe(obj_priv, next, | ||
925 | &dev_priv->mm.flushing_list, list) { | ||
926 | struct drm_gem_object *obj = obj_priv->obj; | ||
927 | |||
928 | if ((obj->write_domain & flush_domains) == | ||
929 | obj->write_domain) { | ||
930 | obj->write_domain = 0; | ||
931 | i915_gem_object_move_to_active(obj, seqno); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | } | ||
936 | |||
621 | if (was_empty && !dev_priv->mm.suspended) | 937 | if (was_empty && !dev_priv->mm.suspended) |
622 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 938 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); |
623 | return seqno; | 939 | return seqno; |
@@ -675,35 +991,16 @@ i915_gem_retire_request(struct drm_device *dev, | |||
675 | */ | 991 | */ |
676 | if (obj_priv->last_rendering_seqno != request->seqno) | 992 | if (obj_priv->last_rendering_seqno != request->seqno) |
677 | return; | 993 | return; |
994 | |||
678 | #if WATCH_LRU | 995 | #if WATCH_LRU |
679 | DRM_INFO("%s: retire %d moves to inactive list %p\n", | 996 | DRM_INFO("%s: retire %d moves to inactive list %p\n", |
680 | __func__, request->seqno, obj); | 997 | __func__, request->seqno, obj); |
681 | #endif | 998 | #endif |
682 | 999 | ||
683 | if (obj->write_domain != 0) { | 1000 | if (obj->write_domain != 0) |
684 | list_move_tail(&obj_priv->list, | 1001 | i915_gem_object_move_to_flushing(obj); |
685 | &dev_priv->mm.flushing_list); | 1002 | else |
686 | } else { | ||
687 | i915_gem_object_move_to_inactive(obj); | 1003 | i915_gem_object_move_to_inactive(obj); |
688 | } | ||
689 | } | ||
690 | |||
691 | if (request->flush_domains != 0) { | ||
692 | struct drm_i915_gem_object *obj_priv, *next; | ||
693 | |||
694 | /* Clear the write domain and activity from any buffers | ||
695 | * that are just waiting for a flush matching the one retired. | ||
696 | */ | ||
697 | list_for_each_entry_safe(obj_priv, next, | ||
698 | &dev_priv->mm.flushing_list, list) { | ||
699 | struct drm_gem_object *obj = obj_priv->obj; | ||
700 | |||
701 | if (obj->write_domain & request->flush_domains) { | ||
702 | obj->write_domain = 0; | ||
703 | i915_gem_object_move_to_inactive(obj); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | } | 1004 | } |
708 | } | 1005 | } |
709 | 1006 | ||
@@ -896,25 +1193,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
896 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1193 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
897 | int ret; | 1194 | int ret; |
898 | 1195 | ||
899 | /* If there are writes queued to the buffer, flush and | 1196 | /* This function only exists to support waiting for existing rendering, |
900 | * create a new seqno to wait for. | 1197 | * not for emitting required flushes. |
901 | */ | 1198 | */ |
902 | if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { | 1199 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); |
903 | uint32_t write_domain = obj->write_domain; | ||
904 | #if WATCH_BUF | ||
905 | DRM_INFO("%s: flushing object %p from write domain %08x\n", | ||
906 | __func__, obj, write_domain); | ||
907 | #endif | ||
908 | i915_gem_flush(dev, 0, write_domain); | ||
909 | |||
910 | i915_gem_object_move_to_active(obj); | ||
911 | obj_priv->last_rendering_seqno = i915_add_request(dev, | ||
912 | write_domain); | ||
913 | BUG_ON(obj_priv->last_rendering_seqno == 0); | ||
914 | #if WATCH_LRU | ||
915 | DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); | ||
916 | #endif | ||
917 | } | ||
918 | 1200 | ||
919 | /* If there is rendering queued on the buffer being evicted, wait for | 1201 | /* If there is rendering queued on the buffer being evicted, wait for |
920 | * it. | 1202 | * it. |
@@ -935,11 +1217,12 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
935 | /** | 1217 | /** |
936 | * Unbinds an object from the GTT aperture. | 1218 | * Unbinds an object from the GTT aperture. |
937 | */ | 1219 | */ |
938 | static int | 1220 | int |
939 | i915_gem_object_unbind(struct drm_gem_object *obj) | 1221 | i915_gem_object_unbind(struct drm_gem_object *obj) |
940 | { | 1222 | { |
941 | struct drm_device *dev = obj->dev; | 1223 | struct drm_device *dev = obj->dev; |
942 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1224 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1225 | loff_t offset; | ||
943 | int ret = 0; | 1226 | int ret = 0; |
944 | 1227 | ||
945 | #if WATCH_BUF | 1228 | #if WATCH_BUF |
@@ -954,24 +1237,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
954 | return -EINVAL; | 1237 | return -EINVAL; |
955 | } | 1238 | } |
956 | 1239 | ||
957 | /* Wait for any rendering to complete | ||
958 | */ | ||
959 | ret = i915_gem_object_wait_rendering(obj); | ||
960 | if (ret) { | ||
961 | DRM_ERROR("wait_rendering failed: %d\n", ret); | ||
962 | return ret; | ||
963 | } | ||
964 | |||
965 | /* Move the object to the CPU domain to ensure that | 1240 | /* Move the object to the CPU domain to ensure that |
966 | * any possible CPU writes while it's not in the GTT | 1241 | * any possible CPU writes while it's not in the GTT |
967 | * are flushed when we go to remap it. This will | 1242 | * are flushed when we go to remap it. This will |
968 | * also ensure that all pending GPU writes are finished | 1243 | * also ensure that all pending GPU writes are finished |
969 | * before we unbind. | 1244 | * before we unbind. |
970 | */ | 1245 | */ |
971 | ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, | 1246 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
972 | I915_GEM_DOMAIN_CPU); | ||
973 | if (ret) { | 1247 | if (ret) { |
974 | DRM_ERROR("set_domain failed: %d\n", ret); | 1248 | if (ret != -ERESTARTSYS) |
1249 | DRM_ERROR("set_domain failed: %d\n", ret); | ||
975 | return ret; | 1250 | return ret; |
976 | } | 1251 | } |
977 | 1252 | ||
@@ -983,6 +1258,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
983 | 1258 | ||
984 | BUG_ON(obj_priv->active); | 1259 | BUG_ON(obj_priv->active); |
985 | 1260 | ||
1261 | /* blow away mappings if mapped through GTT */ | ||
1262 | offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; | ||
1263 | if (dev->dev_mapping) | ||
1264 | unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); | ||
1265 | |||
1266 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
1267 | i915_gem_clear_fence_reg(obj); | ||
1268 | |||
986 | i915_gem_object_free_page_list(obj); | 1269 | i915_gem_object_free_page_list(obj); |
987 | 1270 | ||
988 | if (obj_priv->gtt_space) { | 1271 | if (obj_priv->gtt_space) { |
@@ -1087,6 +1370,21 @@ i915_gem_evict_something(struct drm_device *dev) | |||
1087 | } | 1370 | } |
1088 | 1371 | ||
1089 | static int | 1372 | static int |
1373 | i915_gem_evict_everything(struct drm_device *dev) | ||
1374 | { | ||
1375 | int ret; | ||
1376 | |||
1377 | for (;;) { | ||
1378 | ret = i915_gem_evict_something(dev); | ||
1379 | if (ret != 0) | ||
1380 | break; | ||
1381 | } | ||
1382 | if (ret == -ENOMEM) | ||
1383 | return 0; | ||
1384 | return ret; | ||
1385 | } | ||
1386 | |||
1387 | static int | ||
1090 | i915_gem_object_get_page_list(struct drm_gem_object *obj) | 1388 | i915_gem_object_get_page_list(struct drm_gem_object *obj) |
1091 | { | 1389 | { |
1092 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1390 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
@@ -1126,6 +1424,220 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1126 | return 0; | 1424 | return 0; |
1127 | } | 1425 | } |
1128 | 1426 | ||
1427 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1428 | { | ||
1429 | struct drm_gem_object *obj = reg->obj; | ||
1430 | struct drm_device *dev = obj->dev; | ||
1431 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1432 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1433 | int regnum = obj_priv->fence_reg; | ||
1434 | uint64_t val; | ||
1435 | |||
1436 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | ||
1437 | 0xfffff000) << 32; | ||
1438 | val |= obj_priv->gtt_offset & 0xfffff000; | ||
1439 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | ||
1440 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1441 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | ||
1442 | val |= I965_FENCE_REG_VALID; | ||
1443 | |||
1444 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | ||
1445 | } | ||
1446 | |||
1447 | static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1448 | { | ||
1449 | struct drm_gem_object *obj = reg->obj; | ||
1450 | struct drm_device *dev = obj->dev; | ||
1451 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1452 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1453 | int regnum = obj_priv->fence_reg; | ||
1454 | int tile_width; | ||
1455 | uint32_t val; | ||
1456 | uint32_t pitch_val; | ||
1457 | |||
1458 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | ||
1459 | (obj_priv->gtt_offset & (obj->size - 1))) { | ||
1460 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", | ||
1461 | __func__, obj_priv->gtt_offset, obj->size); | ||
1462 | return; | ||
1463 | } | ||
1464 | |||
1465 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
1466 | HAS_128_BYTE_Y_TILING(dev)) | ||
1467 | tile_width = 128; | ||
1468 | else | ||
1469 | tile_width = 512; | ||
1470 | |||
1471 | /* Note: pitch better be a power of two tile widths */ | ||
1472 | pitch_val = obj_priv->stride / tile_width; | ||
1473 | pitch_val = ffs(pitch_val) - 1; | ||
1474 | |||
1475 | val = obj_priv->gtt_offset; | ||
1476 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1477 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | ||
1478 | val |= I915_FENCE_SIZE_BITS(obj->size); | ||
1479 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | ||
1480 | val |= I830_FENCE_REG_VALID; | ||
1481 | |||
1482 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | ||
1483 | } | ||
1484 | |||
1485 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | ||
1486 | { | ||
1487 | struct drm_gem_object *obj = reg->obj; | ||
1488 | struct drm_device *dev = obj->dev; | ||
1489 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1490 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1491 | int regnum = obj_priv->fence_reg; | ||
1492 | uint32_t val; | ||
1493 | uint32_t pitch_val; | ||
1494 | |||
1495 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | ||
1496 | (obj_priv->gtt_offset & (obj->size - 1))) { | ||
1497 | WARN(1, "%s: object 0x%08x not 1M or size aligned\n", | ||
1498 | __func__, obj_priv->gtt_offset); | ||
1499 | return; | ||
1500 | } | ||
1501 | |||
1502 | pitch_val = (obj_priv->stride / 128) - 1; | ||
1503 | |||
1504 | val = obj_priv->gtt_offset; | ||
1505 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
1506 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | ||
1507 | val |= I830_FENCE_SIZE_BITS(obj->size); | ||
1508 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | ||
1509 | val |= I830_FENCE_REG_VALID; | ||
1510 | |||
1511 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | ||
1512 | |||
1513 | } | ||
1514 | |||
1515 | /** | ||
1516 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | ||
1517 | * @obj: object to map through a fence reg | ||
1518 | * @write: object is about to be written | ||
1519 | * | ||
1520 | * When mapping objects through the GTT, userspace wants to be able to write | ||
1521 | * to them without having to worry about swizzling if the object is tiled. | ||
1522 | * | ||
1523 | * This function walks the fence regs looking for a free one for @obj, | ||
1524 | * stealing one if it can't find any. | ||
1525 | * | ||
1526 | * It then sets up the reg based on the object's properties: address, pitch | ||
1527 | * and tiling format. | ||
1528 | */ | ||
1529 | static int | ||
1530 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) | ||
1531 | { | ||
1532 | struct drm_device *dev = obj->dev; | ||
1533 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1534 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1535 | struct drm_i915_fence_reg *reg = NULL; | ||
1536 | int i, ret; | ||
1537 | |||
1538 | switch (obj_priv->tiling_mode) { | ||
1539 | case I915_TILING_NONE: | ||
1540 | WARN(1, "allocating a fence for non-tiled object?\n"); | ||
1541 | break; | ||
1542 | case I915_TILING_X: | ||
1543 | if (!obj_priv->stride) | ||
1544 | return -EINVAL; | ||
1545 | WARN((obj_priv->stride & (512 - 1)), | ||
1546 | "object 0x%08x is X tiled but has non-512B pitch\n", | ||
1547 | obj_priv->gtt_offset); | ||
1548 | break; | ||
1549 | case I915_TILING_Y: | ||
1550 | if (!obj_priv->stride) | ||
1551 | return -EINVAL; | ||
1552 | WARN((obj_priv->stride & (128 - 1)), | ||
1553 | "object 0x%08x is Y tiled but has non-128B pitch\n", | ||
1554 | obj_priv->gtt_offset); | ||
1555 | break; | ||
1556 | } | ||
1557 | |||
1558 | /* First try to find a free reg */ | ||
1559 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | ||
1560 | reg = &dev_priv->fence_regs[i]; | ||
1561 | if (!reg->obj) | ||
1562 | break; | ||
1563 | } | ||
1564 | |||
1565 | /* None available, try to steal one or wait for a user to finish */ | ||
1566 | if (i == dev_priv->num_fence_regs) { | ||
1567 | struct drm_i915_gem_object *old_obj_priv = NULL; | ||
1568 | loff_t offset; | ||
1569 | |||
1570 | try_again: | ||
1571 | /* Could try to use LRU here instead... */ | ||
1572 | for (i = dev_priv->fence_reg_start; | ||
1573 | i < dev_priv->num_fence_regs; i++) { | ||
1574 | reg = &dev_priv->fence_regs[i]; | ||
1575 | old_obj_priv = reg->obj->driver_private; | ||
1576 | if (!old_obj_priv->pin_count) | ||
1577 | break; | ||
1578 | } | ||
1579 | |||
1580 | /* | ||
1581 | * Now things get ugly... we have to wait for one of the | ||
1582 | * objects to finish before trying again. | ||
1583 | */ | ||
1584 | if (i == dev_priv->num_fence_regs) { | ||
1585 | ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0); | ||
1586 | if (ret) { | ||
1587 | WARN(ret != -ERESTARTSYS, | ||
1588 | "switch to GTT domain failed: %d\n", ret); | ||
1589 | return ret; | ||
1590 | } | ||
1591 | goto try_again; | ||
1592 | } | ||
1593 | |||
1594 | /* | ||
1595 | * Zap this virtual mapping so we can set up a fence again | ||
1596 | * for this object next time we need it. | ||
1597 | */ | ||
1598 | offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; | ||
1599 | if (dev->dev_mapping) | ||
1600 | unmap_mapping_range(dev->dev_mapping, offset, | ||
1601 | reg->obj->size, 1); | ||
1602 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
1603 | } | ||
1604 | |||
1605 | obj_priv->fence_reg = i; | ||
1606 | reg->obj = obj; | ||
1607 | |||
1608 | if (IS_I965G(dev)) | ||
1609 | i965_write_fence_reg(reg); | ||
1610 | else if (IS_I9XX(dev)) | ||
1611 | i915_write_fence_reg(reg); | ||
1612 | else | ||
1613 | i830_write_fence_reg(reg); | ||
1614 | |||
1615 | return 0; | ||
1616 | } | ||
1617 | |||
1618 | /** | ||
1619 | * i915_gem_clear_fence_reg - clear out fence register info | ||
1620 | * @obj: object to clear | ||
1621 | * | ||
1622 | * Zeroes out the fence register itself and clears out the associated | ||
1623 | * data structures in dev_priv and obj_priv. | ||
1624 | */ | ||
1625 | static void | ||
1626 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | ||
1627 | { | ||
1628 | struct drm_device *dev = obj->dev; | ||
1629 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1630 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1631 | |||
1632 | if (IS_I965G(dev)) | ||
1633 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | ||
1634 | else | ||
1635 | I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); | ||
1636 | |||
1637 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; | ||
1638 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
1639 | } | ||
1640 | |||
1129 | /** | 1641 | /** |
1130 | * Finds free space in the GTT aperture and binds the object there. | 1642 | * Finds free space in the GTT aperture and binds the object there. |
1131 | */ | 1643 | */ |
@@ -1138,8 +1650,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1138 | struct drm_mm_node *free_space; | 1650 | struct drm_mm_node *free_space; |
1139 | int page_count, ret; | 1651 | int page_count, ret; |
1140 | 1652 | ||
1653 | if (dev_priv->mm.suspended) | ||
1654 | return -EBUSY; | ||
1141 | if (alignment == 0) | 1655 | if (alignment == 0) |
1142 | alignment = PAGE_SIZE; | 1656 | alignment = i915_gem_get_gtt_alignment(obj); |
1143 | if (alignment & (PAGE_SIZE - 1)) { | 1657 | if (alignment & (PAGE_SIZE - 1)) { |
1144 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 1658 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
1145 | return -EINVAL; | 1659 | return -EINVAL; |
@@ -1172,7 +1686,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1172 | 1686 | ||
1173 | ret = i915_gem_evict_something(dev); | 1687 | ret = i915_gem_evict_something(dev); |
1174 | if (ret != 0) { | 1688 | if (ret != 0) { |
1175 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | 1689 | if (ret != -ERESTARTSYS) |
1690 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | ||
1176 | return ret; | 1691 | return ret; |
1177 | } | 1692 | } |
1178 | goto search_free; | 1693 | goto search_free; |
@@ -1232,6 +1747,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1232 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); | 1747 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); |
1233 | } | 1748 | } |
1234 | 1749 | ||
1750 | /** Flushes any GPU write domain for the object if it's dirty. */ | ||
1751 | static void | ||
1752 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | ||
1753 | { | ||
1754 | struct drm_device *dev = obj->dev; | ||
1755 | uint32_t seqno; | ||
1756 | |||
1757 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
1758 | return; | ||
1759 | |||
1760 | /* Queue the GPU write cache flushing we need. */ | ||
1761 | i915_gem_flush(dev, 0, obj->write_domain); | ||
1762 | seqno = i915_add_request(dev, obj->write_domain); | ||
1763 | obj->write_domain = 0; | ||
1764 | i915_gem_object_move_to_active(obj, seqno); | ||
1765 | } | ||
1766 | |||
1767 | /** Flushes the GTT write domain for the object if it's dirty. */ | ||
1768 | static void | ||
1769 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | ||
1770 | { | ||
1771 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | ||
1772 | return; | ||
1773 | |||
1774 | /* No actual flushing is required for the GTT write domain. Writes | ||
1775 | * to it immediately go to main memory as far as we know, so there's | ||
1776 | * no chipset flush. It also doesn't land in render cache. | ||
1777 | */ | ||
1778 | obj->write_domain = 0; | ||
1779 | } | ||
1780 | |||
1781 | /** Flushes the CPU write domain for the object if it's dirty. */ | ||
1782 | static void | ||
1783 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | ||
1784 | { | ||
1785 | struct drm_device *dev = obj->dev; | ||
1786 | |||
1787 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | ||
1788 | return; | ||
1789 | |||
1790 | i915_gem_clflush_object(obj); | ||
1791 | drm_agp_chipset_flush(dev); | ||
1792 | obj->write_domain = 0; | ||
1793 | } | ||
1794 | |||
1795 | /** | ||
1796 | * Moves a single object to the GTT read, and possibly write domain. | ||
1797 | * | ||
1798 | * This function returns when the move is complete, including waiting on | ||
1799 | * flushes to occur. | ||
1800 | */ | ||
1801 | int | ||
1802 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | ||
1803 | { | ||
1804 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1805 | int ret; | ||
1806 | |||
1807 | /* Not valid to be called on unbound objects. */ | ||
1808 | if (obj_priv->gtt_space == NULL) | ||
1809 | return -EINVAL; | ||
1810 | |||
1811 | i915_gem_object_flush_gpu_write_domain(obj); | ||
1812 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1813 | ret = i915_gem_object_wait_rendering(obj); | ||
1814 | if (ret != 0) | ||
1815 | return ret; | ||
1816 | |||
1817 | /* If we're writing through the GTT domain, then CPU and GPU caches | ||
1818 | * will need to be invalidated at next use. | ||
1819 | */ | ||
1820 | if (write) | ||
1821 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
1822 | |||
1823 | i915_gem_object_flush_cpu_write_domain(obj); | ||
1824 | |||
1825 | /* It should now be out of any other write domains, and we can update | ||
1826 | * the domain values for our changes. | ||
1827 | */ | ||
1828 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
1829 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
1830 | if (write) { | ||
1831 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
1832 | obj_priv->dirty = 1; | ||
1833 | } | ||
1834 | |||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | /** | ||
1839 | * Moves a single object to the CPU read, and possibly write domain. | ||
1840 | * | ||
1841 | * This function returns when the move is complete, including waiting on | ||
1842 | * flushes to occur. | ||
1843 | */ | ||
1844 | static int | ||
1845 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | ||
1846 | { | ||
1847 | struct drm_device *dev = obj->dev; | ||
1848 | int ret; | ||
1849 | |||
1850 | i915_gem_object_flush_gpu_write_domain(obj); | ||
1851 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1852 | ret = i915_gem_object_wait_rendering(obj); | ||
1853 | if (ret != 0) | ||
1854 | return ret; | ||
1855 | |||
1856 | i915_gem_object_flush_gtt_write_domain(obj); | ||
1857 | |||
1858 | /* If we have a partially-valid cache of the object in the CPU, | ||
1859 | * finish invalidating it and free the per-page flags. | ||
1860 | */ | ||
1861 | i915_gem_object_set_to_full_cpu_read_domain(obj); | ||
1862 | |||
1863 | /* Flush the CPU cache if it's still invalid. */ | ||
1864 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | ||
1865 | i915_gem_clflush_object(obj); | ||
1866 | drm_agp_chipset_flush(dev); | ||
1867 | |||
1868 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | ||
1869 | } | ||
1870 | |||
1871 | /* It should now be out of any other write domains, and we can update | ||
1872 | * the domain values for our changes. | ||
1873 | */ | ||
1874 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | ||
1875 | |||
1876 | /* If we're writing through the CPU, then the GPU read domains will | ||
1877 | * need to be invalidated at next use. | ||
1878 | */ | ||
1879 | if (write) { | ||
1880 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | ||
1881 | obj->write_domain = I915_GEM_DOMAIN_CPU; | ||
1882 | } | ||
1883 | |||
1884 | return 0; | ||
1885 | } | ||
1886 | |||
1235 | /* | 1887 | /* |
1236 | * Set the next domain for the specified object. This | 1888 | * Set the next domain for the specified object. This |
1237 | * may not actually perform the necessary flushing/invaliding though, | 1889 | * may not actually perform the necessary flushing/invaliding though, |
@@ -1343,16 +1995,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1343 | * MI_FLUSH | 1995 | * MI_FLUSH |
1344 | * drm_agp_chipset_flush | 1996 | * drm_agp_chipset_flush |
1345 | */ | 1997 | */ |
1346 | static int | 1998 | static void |
1347 | i915_gem_object_set_domain(struct drm_gem_object *obj, | 1999 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, |
1348 | uint32_t read_domains, | 2000 | uint32_t read_domains, |
1349 | uint32_t write_domain) | 2001 | uint32_t write_domain) |
1350 | { | 2002 | { |
1351 | struct drm_device *dev = obj->dev; | 2003 | struct drm_device *dev = obj->dev; |
1352 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2004 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1353 | uint32_t invalidate_domains = 0; | 2005 | uint32_t invalidate_domains = 0; |
1354 | uint32_t flush_domains = 0; | 2006 | uint32_t flush_domains = 0; |
1355 | int ret; | 2007 | |
2008 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | ||
2009 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | ||
1356 | 2010 | ||
1357 | #if WATCH_BUF | 2011 | #if WATCH_BUF |
1358 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 2012 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
@@ -1389,34 +2043,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, | |||
1389 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 2043 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
1390 | __func__, flush_domains, invalidate_domains); | 2044 | __func__, flush_domains, invalidate_domains); |
1391 | #endif | 2045 | #endif |
1392 | /* | ||
1393 | * If we're invaliding the CPU cache and flushing a GPU cache, | ||
1394 | * then pause for rendering so that the GPU caches will be | ||
1395 | * flushed before the cpu cache is invalidated | ||
1396 | */ | ||
1397 | if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && | ||
1398 | (flush_domains & ~(I915_GEM_DOMAIN_CPU | | ||
1399 | I915_GEM_DOMAIN_GTT))) { | ||
1400 | ret = i915_gem_object_wait_rendering(obj); | ||
1401 | if (ret) | ||
1402 | return ret; | ||
1403 | } | ||
1404 | i915_gem_clflush_object(obj); | 2046 | i915_gem_clflush_object(obj); |
1405 | } | 2047 | } |
1406 | 2048 | ||
1407 | if ((write_domain | flush_domains) != 0) | 2049 | if ((write_domain | flush_domains) != 0) |
1408 | obj->write_domain = write_domain; | 2050 | obj->write_domain = write_domain; |
1409 | |||
1410 | /* If we're invalidating the CPU domain, clear the per-page CPU | ||
1411 | * domain list as well. | ||
1412 | */ | ||
1413 | if (obj_priv->page_cpu_valid != NULL && | ||
1414 | (write_domain != 0 || | ||
1415 | read_domains & I915_GEM_DOMAIN_CPU)) { | ||
1416 | drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, | ||
1417 | DRM_MEM_DRIVER); | ||
1418 | obj_priv->page_cpu_valid = NULL; | ||
1419 | } | ||
1420 | obj->read_domains = read_domains; | 2051 | obj->read_domains = read_domains; |
1421 | 2052 | ||
1422 | dev->invalidate_domains |= invalidate_domains; | 2053 | dev->invalidate_domains |= invalidate_domains; |
@@ -1427,47 +2058,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, | |||
1427 | obj->read_domains, obj->write_domain, | 2058 | obj->read_domains, obj->write_domain, |
1428 | dev->invalidate_domains, dev->flush_domains); | 2059 | dev->invalidate_domains, dev->flush_domains); |
1429 | #endif | 2060 | #endif |
1430 | return 0; | ||
1431 | } | 2061 | } |
1432 | 2062 | ||
1433 | /** | 2063 | /** |
1434 | * Set the read/write domain on a range of the object. | 2064 | * Moves the object from a partially CPU read to a full one. |
1435 | * | 2065 | * |
1436 | * Currently only implemented for CPU reads, otherwise drops to normal | 2066 | * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), |
1437 | * i915_gem_object_set_domain(). | 2067 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
1438 | */ | 2068 | */ |
1439 | static int | 2069 | static void |
1440 | i915_gem_object_set_domain_range(struct drm_gem_object *obj, | 2070 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
1441 | uint64_t offset, | ||
1442 | uint64_t size, | ||
1443 | uint32_t read_domains, | ||
1444 | uint32_t write_domain) | ||
1445 | { | 2071 | { |
2072 | struct drm_device *dev = obj->dev; | ||
1446 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2073 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1447 | int ret, i; | ||
1448 | 2074 | ||
1449 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) | 2075 | if (!obj_priv->page_cpu_valid) |
1450 | return 0; | 2076 | return; |
1451 | 2077 | ||
1452 | if (read_domains != I915_GEM_DOMAIN_CPU || | 2078 | /* If we're partially in the CPU read domain, finish moving it in. |
1453 | write_domain != 0) | 2079 | */ |
1454 | return i915_gem_object_set_domain(obj, | 2080 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { |
1455 | read_domains, write_domain); | 2081 | int i; |
1456 | 2082 | ||
1457 | /* Wait on any GPU rendering to the object to be flushed. */ | 2083 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { |
2084 | if (obj_priv->page_cpu_valid[i]) | ||
2085 | continue; | ||
2086 | drm_clflush_pages(obj_priv->page_list + i, 1); | ||
2087 | } | ||
2088 | drm_agp_chipset_flush(dev); | ||
2089 | } | ||
2090 | |||
2091 | /* Free the page_cpu_valid mappings which are now stale, whether | ||
2092 | * or not we've got I915_GEM_DOMAIN_CPU. | ||
2093 | */ | ||
2094 | drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, | ||
2095 | DRM_MEM_DRIVER); | ||
2096 | obj_priv->page_cpu_valid = NULL; | ||
2097 | } | ||
2098 | |||
2099 | /** | ||
2100 | * Set the CPU read domain on a range of the object. | ||
2101 | * | ||
2102 | * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's | ||
2103 | * not entirely valid. The page_cpu_valid member of the object flags which | ||
2104 | * pages have been flushed, and will be respected by | ||
2105 | * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping | ||
2106 | * of the whole object. | ||
2107 | * | ||
2108 | * This function returns when the move is complete, including waiting on | ||
2109 | * flushes to occur. | ||
2110 | */ | ||
2111 | static int | ||
2112 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | ||
2113 | uint64_t offset, uint64_t size) | ||
2114 | { | ||
2115 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2116 | int i, ret; | ||
2117 | |||
2118 | if (offset == 0 && size == obj->size) | ||
2119 | return i915_gem_object_set_to_cpu_domain(obj, 0); | ||
2120 | |||
2121 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2122 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1458 | ret = i915_gem_object_wait_rendering(obj); | 2123 | ret = i915_gem_object_wait_rendering(obj); |
1459 | if (ret) | 2124 | if (ret != 0) |
1460 | return ret; | 2125 | return ret; |
2126 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2127 | |||
2128 | /* If we're already fully in the CPU read domain, we're done. */ | ||
2129 | if (obj_priv->page_cpu_valid == NULL && | ||
2130 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | ||
2131 | return 0; | ||
1461 | 2132 | ||
2133 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | ||
2134 | * newly adding I915_GEM_DOMAIN_CPU | ||
2135 | */ | ||
1462 | if (obj_priv->page_cpu_valid == NULL) { | 2136 | if (obj_priv->page_cpu_valid == NULL) { |
1463 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, | 2137 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, |
1464 | DRM_MEM_DRIVER); | 2138 | DRM_MEM_DRIVER); |
1465 | } | 2139 | if (obj_priv->page_cpu_valid == NULL) |
2140 | return -ENOMEM; | ||
2141 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | ||
2142 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | ||
1466 | 2143 | ||
1467 | /* Flush the cache on any pages that are still invalid from the CPU's | 2144 | /* Flush the cache on any pages that are still invalid from the CPU's |
1468 | * perspective. | 2145 | * perspective. |
1469 | */ | 2146 | */ |
1470 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { | 2147 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
2148 | i++) { | ||
1471 | if (obj_priv->page_cpu_valid[i]) | 2149 | if (obj_priv->page_cpu_valid[i]) |
1472 | continue; | 2150 | continue; |
1473 | 2151 | ||
@@ -1476,39 +2154,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, | |||
1476 | obj_priv->page_cpu_valid[i] = 1; | 2154 | obj_priv->page_cpu_valid[i] = 1; |
1477 | } | 2155 | } |
1478 | 2156 | ||
1479 | return 0; | 2157 | /* It should now be out of any other write domains, and we can update |
1480 | } | 2158 | * the domain values for our changes. |
1481 | |||
1482 | /** | ||
1483 | * Once all of the objects have been set in the proper domain, | ||
1484 | * perform the necessary flush and invalidate operations. | ||
1485 | * | ||
1486 | * Returns the write domains flushed, for use in flush tracking. | ||
1487 | */ | ||
1488 | static uint32_t | ||
1489 | i915_gem_dev_set_domain(struct drm_device *dev) | ||
1490 | { | ||
1491 | uint32_t flush_domains = dev->flush_domains; | ||
1492 | |||
1493 | /* | ||
1494 | * Now that all the buffers are synced to the proper domains, | ||
1495 | * flush and invalidate the collected domains | ||
1496 | */ | 2159 | */ |
1497 | if (dev->invalidate_domains | dev->flush_domains) { | 2160 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
1498 | #if WATCH_EXEC | ||
1499 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
1500 | __func__, | ||
1501 | dev->invalidate_domains, | ||
1502 | dev->flush_domains); | ||
1503 | #endif | ||
1504 | i915_gem_flush(dev, | ||
1505 | dev->invalidate_domains, | ||
1506 | dev->flush_domains); | ||
1507 | dev->invalidate_domains = 0; | ||
1508 | dev->flush_domains = 0; | ||
1509 | } | ||
1510 | 2161 | ||
1511 | return flush_domains; | 2162 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
2163 | |||
2164 | return 0; | ||
1512 | } | 2165 | } |
1513 | 2166 | ||
1514 | /** | 2167 | /** |
@@ -1589,6 +2242,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1589 | return -EINVAL; | 2242 | return -EINVAL; |
1590 | } | 2243 | } |
1591 | 2244 | ||
2245 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | ||
2246 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | ||
2247 | DRM_ERROR("reloc with read/write CPU domains: " | ||
2248 | "obj %p target %d offset %d " | ||
2249 | "read %08x write %08x", | ||
2250 | obj, reloc.target_handle, | ||
2251 | (int) reloc.offset, | ||
2252 | reloc.read_domains, | ||
2253 | reloc.write_domain); | ||
2254 | return -EINVAL; | ||
2255 | } | ||
2256 | |||
1592 | if (reloc.write_domain && target_obj->pending_write_domain && | 2257 | if (reloc.write_domain && target_obj->pending_write_domain && |
1593 | reloc.write_domain != target_obj->pending_write_domain) { | 2258 | reloc.write_domain != target_obj->pending_write_domain) { |
1594 | DRM_ERROR("Write domain conflict: " | 2259 | DRM_ERROR("Write domain conflict: " |
@@ -1629,19 +2294,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1629 | continue; | 2294 | continue; |
1630 | } | 2295 | } |
1631 | 2296 | ||
1632 | /* Now that we're going to actually write some data in, | 2297 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
1633 | * make sure that any rendering using this buffer's contents | 2298 | if (ret != 0) { |
1634 | * is completed. | 2299 | drm_gem_object_unreference(target_obj); |
1635 | */ | 2300 | i915_gem_object_unpin(obj); |
1636 | i915_gem_object_wait_rendering(obj); | 2301 | return -EINVAL; |
1637 | |||
1638 | /* As we're writing through the gtt, flush | ||
1639 | * any CPU writes before we write the relocations | ||
1640 | */ | ||
1641 | if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | ||
1642 | i915_gem_clflush_object(obj); | ||
1643 | drm_agp_chipset_flush(dev); | ||
1644 | obj->write_domain = 0; | ||
1645 | } | 2302 | } |
1646 | 2303 | ||
1647 | /* Map the page containing the relocation we're going to | 2304 | /* Map the page containing the relocation we're going to |
@@ -1783,6 +2440,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1783 | int ret, i, pinned = 0; | 2440 | int ret, i, pinned = 0; |
1784 | uint64_t exec_offset; | 2441 | uint64_t exec_offset; |
1785 | uint32_t seqno, flush_domains; | 2442 | uint32_t seqno, flush_domains; |
2443 | int pin_tries; | ||
1786 | 2444 | ||
1787 | #if WATCH_EXEC | 2445 | #if WATCH_EXEC |
1788 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 2446 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
@@ -1831,14 +2489,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1831 | return -EBUSY; | 2489 | return -EBUSY; |
1832 | } | 2490 | } |
1833 | 2491 | ||
1834 | /* Zero the gloabl flush/invalidate flags. These | 2492 | /* Look up object handles */ |
1835 | * will be modified as each object is bound to the | ||
1836 | * gtt | ||
1837 | */ | ||
1838 | dev->invalidate_domains = 0; | ||
1839 | dev->flush_domains = 0; | ||
1840 | |||
1841 | /* Look up object handles and perform the relocations */ | ||
1842 | for (i = 0; i < args->buffer_count; i++) { | 2493 | for (i = 0; i < args->buffer_count; i++) { |
1843 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 2494 | object_list[i] = drm_gem_object_lookup(dev, file_priv, |
1844 | exec_list[i].handle); | 2495 | exec_list[i].handle); |
@@ -1848,17 +2499,41 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1848 | ret = -EBADF; | 2499 | ret = -EBADF; |
1849 | goto err; | 2500 | goto err; |
1850 | } | 2501 | } |
2502 | } | ||
2503 | |||
2504 | /* Pin and relocate */ | ||
2505 | for (pin_tries = 0; ; pin_tries++) { | ||
2506 | ret = 0; | ||
2507 | for (i = 0; i < args->buffer_count; i++) { | ||
2508 | object_list[i]->pending_read_domains = 0; | ||
2509 | object_list[i]->pending_write_domain = 0; | ||
2510 | ret = i915_gem_object_pin_and_relocate(object_list[i], | ||
2511 | file_priv, | ||
2512 | &exec_list[i]); | ||
2513 | if (ret) | ||
2514 | break; | ||
2515 | pinned = i + 1; | ||
2516 | } | ||
2517 | /* success */ | ||
2518 | if (ret == 0) | ||
2519 | break; | ||
1851 | 2520 | ||
1852 | object_list[i]->pending_read_domains = 0; | 2521 | /* error other than GTT full, or we've already tried again */ |
1853 | object_list[i]->pending_write_domain = 0; | 2522 | if (ret != -ENOMEM || pin_tries >= 1) { |
1854 | ret = i915_gem_object_pin_and_relocate(object_list[i], | 2523 | if (ret != -ERESTARTSYS) |
1855 | file_priv, | 2524 | DRM_ERROR("Failed to pin buffers %d\n", ret); |
1856 | &exec_list[i]); | ||
1857 | if (ret) { | ||
1858 | DRM_ERROR("object bind and relocate failed %d\n", ret); | ||
1859 | goto err; | 2525 | goto err; |
1860 | } | 2526 | } |
1861 | pinned = i + 1; | 2527 | |
2528 | /* unpin all of our buffers */ | ||
2529 | for (i = 0; i < pinned; i++) | ||
2530 | i915_gem_object_unpin(object_list[i]); | ||
2531 | pinned = 0; | ||
2532 | |||
2533 | /* evict everyone we can from the aperture */ | ||
2534 | ret = i915_gem_evict_everything(dev); | ||
2535 | if (ret) | ||
2536 | goto err; | ||
1862 | } | 2537 | } |
1863 | 2538 | ||
1864 | /* Set the pending read domains for the batch buffer to COMMAND */ | 2539 | /* Set the pending read domains for the batch buffer to COMMAND */ |
@@ -1868,32 +2543,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1868 | 2543 | ||
1869 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2544 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1870 | 2545 | ||
2546 | /* Zero the global flush/invalidate flags. These | ||
2547 | * will be modified as new domains are computed | ||
2548 | * for each object | ||
2549 | */ | ||
2550 | dev->invalidate_domains = 0; | ||
2551 | dev->flush_domains = 0; | ||
2552 | |||
1871 | for (i = 0; i < args->buffer_count; i++) { | 2553 | for (i = 0; i < args->buffer_count; i++) { |
1872 | struct drm_gem_object *obj = object_list[i]; | 2554 | struct drm_gem_object *obj = object_list[i]; |
1873 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1874 | 2555 | ||
1875 | if (obj_priv->gtt_space == NULL) { | 2556 | /* Compute new gpu domains and update invalidate/flush */ |
1876 | /* We evicted the buffer in the process of validating | 2557 | i915_gem_object_set_to_gpu_domain(obj, |
1877 | * our set of buffers in. We could try to recover by | 2558 | obj->pending_read_domains, |
1878 | * kicking them everything out and trying again from | 2559 | obj->pending_write_domain); |
1879 | * the start. | ||
1880 | */ | ||
1881 | ret = -ENOMEM; | ||
1882 | goto err; | ||
1883 | } | ||
1884 | |||
1885 | /* make sure all previous memory operations have passed */ | ||
1886 | ret = i915_gem_object_set_domain(obj, | ||
1887 | obj->pending_read_domains, | ||
1888 | obj->pending_write_domain); | ||
1889 | if (ret) | ||
1890 | goto err; | ||
1891 | } | 2560 | } |
1892 | 2561 | ||
1893 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2562 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1894 | 2563 | ||
1895 | /* Flush/invalidate caches and chipset buffer */ | 2564 | if (dev->invalidate_domains | dev->flush_domains) { |
1896 | flush_domains = i915_gem_dev_set_domain(dev); | 2565 | #if WATCH_EXEC |
2566 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
2567 | __func__, | ||
2568 | dev->invalidate_domains, | ||
2569 | dev->flush_domains); | ||
2570 | #endif | ||
2571 | i915_gem_flush(dev, | ||
2572 | dev->invalidate_domains, | ||
2573 | dev->flush_domains); | ||
2574 | if (dev->flush_domains) | ||
2575 | (void)i915_add_request(dev, dev->flush_domains); | ||
2576 | } | ||
1897 | 2577 | ||
1898 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2578 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1899 | 2579 | ||
@@ -1913,8 +2593,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1913 | ~0); | 2593 | ~0); |
1914 | #endif | 2594 | #endif |
1915 | 2595 | ||
1916 | (void)i915_add_request(dev, flush_domains); | ||
1917 | |||
1918 | /* Exec the batchbuffer */ | 2596 | /* Exec the batchbuffer */ |
1919 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); | 2597 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); |
1920 | if (ret) { | 2598 | if (ret) { |
@@ -1942,10 +2620,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1942 | i915_file_priv->mm.last_gem_seqno = seqno; | 2620 | i915_file_priv->mm.last_gem_seqno = seqno; |
1943 | for (i = 0; i < args->buffer_count; i++) { | 2621 | for (i = 0; i < args->buffer_count; i++) { |
1944 | struct drm_gem_object *obj = object_list[i]; | 2622 | struct drm_gem_object *obj = object_list[i]; |
1945 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1946 | 2623 | ||
1947 | i915_gem_object_move_to_active(obj); | 2624 | i915_gem_object_move_to_active(obj, seqno); |
1948 | obj_priv->last_rendering_seqno = seqno; | ||
1949 | #if WATCH_LRU | 2625 | #if WATCH_LRU |
1950 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | 2626 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); |
1951 | #endif | 2627 | #endif |
@@ -1966,13 +2642,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1966 | "back to user (%d)\n", | 2642 | "back to user (%d)\n", |
1967 | args->buffer_count, ret); | 2643 | args->buffer_count, ret); |
1968 | err: | 2644 | err: |
1969 | if (object_list != NULL) { | 2645 | for (i = 0; i < pinned; i++) |
1970 | for (i = 0; i < pinned; i++) | 2646 | i915_gem_object_unpin(object_list[i]); |
1971 | i915_gem_object_unpin(object_list[i]); | 2647 | |
2648 | for (i = 0; i < args->buffer_count; i++) | ||
2649 | drm_gem_object_unreference(object_list[i]); | ||
1972 | 2650 | ||
1973 | for (i = 0; i < args->buffer_count; i++) | ||
1974 | drm_gem_object_unreference(object_list[i]); | ||
1975 | } | ||
1976 | mutex_unlock(&dev->struct_mutex); | 2651 | mutex_unlock(&dev->struct_mutex); |
1977 | 2652 | ||
1978 | pre_mutex_err: | 2653 | pre_mutex_err: |
@@ -1995,9 +2670,18 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
1995 | if (obj_priv->gtt_space == NULL) { | 2670 | if (obj_priv->gtt_space == NULL) { |
1996 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 2671 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
1997 | if (ret != 0) { | 2672 | if (ret != 0) { |
1998 | DRM_ERROR("Failure to bind: %d", ret); | 2673 | if (ret != -EBUSY && ret != -ERESTARTSYS) |
2674 | DRM_ERROR("Failure to bind: %d", ret); | ||
1999 | return ret; | 2675 | return ret; |
2000 | } | 2676 | } |
2677 | /* | ||
2678 | * Pre-965 chips need a fence register set up in order to | ||
2679 | * properly handle tiled surfaces. | ||
2680 | */ | ||
2681 | if (!IS_I965G(dev) && | ||
2682 | obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
2683 | obj_priv->tiling_mode != I915_TILING_NONE) | ||
2684 | i915_gem_object_get_fence_reg(obj, true); | ||
2001 | } | 2685 | } |
2002 | obj_priv->pin_count++; | 2686 | obj_priv->pin_count++; |
2003 | 2687 | ||
@@ -2066,21 +2750,28 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
2066 | } | 2750 | } |
2067 | obj_priv = obj->driver_private; | 2751 | obj_priv = obj->driver_private; |
2068 | 2752 | ||
2069 | ret = i915_gem_object_pin(obj, args->alignment); | 2753 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
2070 | if (ret != 0) { | 2754 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
2071 | drm_gem_object_unreference(obj); | 2755 | args->handle); |
2072 | mutex_unlock(&dev->struct_mutex); | 2756 | mutex_unlock(&dev->struct_mutex); |
2073 | return ret; | 2757 | return -EINVAL; |
2758 | } | ||
2759 | |||
2760 | obj_priv->user_pin_count++; | ||
2761 | obj_priv->pin_filp = file_priv; | ||
2762 | if (obj_priv->user_pin_count == 1) { | ||
2763 | ret = i915_gem_object_pin(obj, args->alignment); | ||
2764 | if (ret != 0) { | ||
2765 | drm_gem_object_unreference(obj); | ||
2766 | mutex_unlock(&dev->struct_mutex); | ||
2767 | return ret; | ||
2768 | } | ||
2074 | } | 2769 | } |
2075 | 2770 | ||
2076 | /* XXX - flush the CPU caches for pinned objects | 2771 | /* XXX - flush the CPU caches for pinned objects |
2077 | * as the X server doesn't manage domains yet | 2772 | * as the X server doesn't manage domains yet |
2078 | */ | 2773 | */ |
2079 | if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | 2774 | i915_gem_object_flush_cpu_write_domain(obj); |
2080 | i915_gem_clflush_object(obj); | ||
2081 | drm_agp_chipset_flush(dev); | ||
2082 | obj->write_domain = 0; | ||
2083 | } | ||
2084 | args->offset = obj_priv->gtt_offset; | 2775 | args->offset = obj_priv->gtt_offset; |
2085 | drm_gem_object_unreference(obj); | 2776 | drm_gem_object_unreference(obj); |
2086 | mutex_unlock(&dev->struct_mutex); | 2777 | mutex_unlock(&dev->struct_mutex); |
@@ -2094,6 +2785,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
2094 | { | 2785 | { |
2095 | struct drm_i915_gem_pin *args = data; | 2786 | struct drm_i915_gem_pin *args = data; |
2096 | struct drm_gem_object *obj; | 2787 | struct drm_gem_object *obj; |
2788 | struct drm_i915_gem_object *obj_priv; | ||
2097 | 2789 | ||
2098 | mutex_lock(&dev->struct_mutex); | 2790 | mutex_lock(&dev->struct_mutex); |
2099 | 2791 | ||
@@ -2105,7 +2797,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
2105 | return -EBADF; | 2797 | return -EBADF; |
2106 | } | 2798 | } |
2107 | 2799 | ||
2108 | i915_gem_object_unpin(obj); | 2800 | obj_priv = obj->driver_private; |
2801 | if (obj_priv->pin_filp != file_priv) { | ||
2802 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | ||
2803 | args->handle); | ||
2804 | drm_gem_object_unreference(obj); | ||
2805 | mutex_unlock(&dev->struct_mutex); | ||
2806 | return -EINVAL; | ||
2807 | } | ||
2808 | obj_priv->user_pin_count--; | ||
2809 | if (obj_priv->user_pin_count == 0) { | ||
2810 | obj_priv->pin_filp = NULL; | ||
2811 | i915_gem_object_unpin(obj); | ||
2812 | } | ||
2109 | 2813 | ||
2110 | drm_gem_object_unreference(obj); | 2814 | drm_gem_object_unreference(obj); |
2111 | mutex_unlock(&dev->struct_mutex); | 2815 | mutex_unlock(&dev->struct_mutex); |
@@ -2130,7 +2834,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
2130 | } | 2834 | } |
2131 | 2835 | ||
2132 | obj_priv = obj->driver_private; | 2836 | obj_priv = obj->driver_private; |
2133 | args->busy = obj_priv->active; | 2837 | /* Don't count being on the flushing list against the object being |
2838 | * done. Otherwise, a buffer left on the flushing list but not getting | ||
2839 | * flushed (because nobody's flushing that domain) won't ever return | ||
2840 | * unbusy and get reused by libdrm's bo cache. The other expected | ||
2841 | * consumer of this interface, OpenGL's occlusion queries, also specs | ||
2842 | * that the objects get unbusy "eventually" without any interference. | ||
2843 | */ | ||
2844 | args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; | ||
2134 | 2845 | ||
2135 | drm_gem_object_unreference(obj); | 2846 | drm_gem_object_unreference(obj); |
2136 | mutex_unlock(&dev->struct_mutex); | 2847 | mutex_unlock(&dev->struct_mutex); |
@@ -2165,44 +2876,44 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
2165 | 2876 | ||
2166 | obj->driver_private = obj_priv; | 2877 | obj->driver_private = obj_priv; |
2167 | obj_priv->obj = obj; | 2878 | obj_priv->obj = obj; |
2879 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
2168 | INIT_LIST_HEAD(&obj_priv->list); | 2880 | INIT_LIST_HEAD(&obj_priv->list); |
2881 | |||
2169 | return 0; | 2882 | return 0; |
2170 | } | 2883 | } |
2171 | 2884 | ||
2172 | void i915_gem_free_object(struct drm_gem_object *obj) | 2885 | void i915_gem_free_object(struct drm_gem_object *obj) |
2173 | { | 2886 | { |
2887 | struct drm_device *dev = obj->dev; | ||
2888 | struct drm_gem_mm *mm = dev->mm_private; | ||
2889 | struct drm_map_list *list; | ||
2890 | struct drm_map *map; | ||
2174 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2891 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2175 | 2892 | ||
2176 | while (obj_priv->pin_count > 0) | 2893 | while (obj_priv->pin_count > 0) |
2177 | i915_gem_object_unpin(obj); | 2894 | i915_gem_object_unpin(obj); |
2178 | 2895 | ||
2179 | i915_gem_object_unbind(obj); | 2896 | if (obj_priv->phys_obj) |
2180 | 2897 | i915_gem_detach_phys_object(dev, obj); | |
2181 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | ||
2182 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | ||
2183 | } | ||
2184 | 2898 | ||
2185 | static int | 2899 | i915_gem_object_unbind(obj); |
2186 | i915_gem_set_domain(struct drm_gem_object *obj, | ||
2187 | struct drm_file *file_priv, | ||
2188 | uint32_t read_domains, | ||
2189 | uint32_t write_domain) | ||
2190 | { | ||
2191 | struct drm_device *dev = obj->dev; | ||
2192 | int ret; | ||
2193 | uint32_t flush_domains; | ||
2194 | 2900 | ||
2195 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 2901 | list = &obj->map_list; |
2902 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
2196 | 2903 | ||
2197 | ret = i915_gem_object_set_domain(obj, read_domains, write_domain); | 2904 | if (list->file_offset_node) { |
2198 | if (ret) | 2905 | drm_mm_put_block(list->file_offset_node); |
2199 | return ret; | 2906 | list->file_offset_node = NULL; |
2200 | flush_domains = i915_gem_dev_set_domain(obj->dev); | 2907 | } |
2201 | 2908 | ||
2202 | if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) | 2909 | map = list->map; |
2203 | (void) i915_add_request(dev, flush_domains); | 2910 | if (map) { |
2911 | drm_free(map, sizeof(*map), DRM_MEM_DRIVER); | ||
2912 | list->map = NULL; | ||
2913 | } | ||
2204 | 2914 | ||
2205 | return 0; | 2915 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
2916 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | ||
2206 | } | 2917 | } |
2207 | 2918 | ||
2208 | /** Unbinds all objects that are on the given buffer list. */ | 2919 | /** Unbinds all objects that are on the given buffer list. */ |
@@ -2269,8 +2980,7 @@ i915_gem_idle(struct drm_device *dev) | |||
2269 | */ | 2980 | */ |
2270 | i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), | 2981 | i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), |
2271 | ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); | 2982 | ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); |
2272 | seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | | 2983 | seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); |
2273 | I915_GEM_DOMAIN_GTT)); | ||
2274 | 2984 | ||
2275 | if (seqno == 0) { | 2985 | if (seqno == 0) { |
2276 | mutex_unlock(&dev->struct_mutex); | 2986 | mutex_unlock(&dev->struct_mutex); |
@@ -2299,29 +3009,52 @@ i915_gem_idle(struct drm_device *dev) | |||
2299 | 3009 | ||
2300 | i915_gem_retire_requests(dev); | 3010 | i915_gem_retire_requests(dev); |
2301 | 3011 | ||
2302 | /* Active and flushing should now be empty as we've | 3012 | if (!dev_priv->mm.wedged) { |
2303 | * waited for a sequence higher than any pending execbuffer | 3013 | /* Active and flushing should now be empty as we've |
2304 | */ | 3014 | * waited for a sequence higher than any pending execbuffer |
2305 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3015 | */ |
2306 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 3016 | WARN_ON(!list_empty(&dev_priv->mm.active_list)); |
3017 | WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3018 | /* Request should now be empty as we've also waited | ||
3019 | * for the last request in the list | ||
3020 | */ | ||
3021 | WARN_ON(!list_empty(&dev_priv->mm.request_list)); | ||
3022 | } | ||
2307 | 3023 | ||
2308 | /* Request should now be empty as we've also waited | 3024 | /* Empty the active and flushing lists to inactive. If there's |
2309 | * for the last request in the list | 3025 | * anything left at this point, it means that we're wedged and |
3026 | * nothing good's going to happen by leaving them there. So strip | ||
3027 | * the GPU domains and just stuff them onto inactive. | ||
2310 | */ | 3028 | */ |
2311 | BUG_ON(!list_empty(&dev_priv->mm.request_list)); | 3029 | while (!list_empty(&dev_priv->mm.active_list)) { |
3030 | struct drm_i915_gem_object *obj_priv; | ||
3031 | |||
3032 | obj_priv = list_first_entry(&dev_priv->mm.active_list, | ||
3033 | struct drm_i915_gem_object, | ||
3034 | list); | ||
3035 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
3036 | i915_gem_object_move_to_inactive(obj_priv->obj); | ||
3037 | } | ||
3038 | |||
3039 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
3040 | struct drm_i915_gem_object *obj_priv; | ||
3041 | |||
3042 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | ||
3043 | struct drm_i915_gem_object, | ||
3044 | list); | ||
3045 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
3046 | i915_gem_object_move_to_inactive(obj_priv->obj); | ||
3047 | } | ||
3048 | |||
2312 | 3049 | ||
2313 | /* Move all buffers out of the GTT. */ | 3050 | /* Move all inactive buffers out of the GTT. */ |
2314 | ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); | 3051 | ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); |
3052 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||
2315 | if (ret) { | 3053 | if (ret) { |
2316 | mutex_unlock(&dev->struct_mutex); | 3054 | mutex_unlock(&dev->struct_mutex); |
2317 | return ret; | 3055 | return ret; |
2318 | } | 3056 | } |
2319 | 3057 | ||
2320 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||
2321 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2322 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||
2323 | BUG_ON(!list_empty(&dev_priv->mm.request_list)); | ||
2324 | |||
2325 | i915_gem_cleanup_ringbuffer(dev); | 3058 | i915_gem_cleanup_ringbuffer(dev); |
2326 | mutex_unlock(&dev->struct_mutex); | 3059 | mutex_unlock(&dev->struct_mutex); |
2327 | 3060 | ||
@@ -2374,12 +3107,13 @@ i915_gem_init_hws(struct drm_device *dev) | |||
2374 | return 0; | 3107 | return 0; |
2375 | } | 3108 | } |
2376 | 3109 | ||
2377 | static int | 3110 | int |
2378 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3111 | i915_gem_init_ringbuffer(struct drm_device *dev) |
2379 | { | 3112 | { |
2380 | drm_i915_private_t *dev_priv = dev->dev_private; | 3113 | drm_i915_private_t *dev_priv = dev->dev_private; |
2381 | struct drm_gem_object *obj; | 3114 | struct drm_gem_object *obj; |
2382 | struct drm_i915_gem_object *obj_priv; | 3115 | struct drm_i915_gem_object *obj_priv; |
3116 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | ||
2383 | int ret; | 3117 | int ret; |
2384 | u32 head; | 3118 | u32 head; |
2385 | 3119 | ||
@@ -2401,24 +3135,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
2401 | } | 3135 | } |
2402 | 3136 | ||
2403 | /* Set up the kernel mapping for the ring. */ | 3137 | /* Set up the kernel mapping for the ring. */ |
2404 | dev_priv->ring.Size = obj->size; | 3138 | ring->Size = obj->size; |
2405 | dev_priv->ring.tail_mask = obj->size - 1; | 3139 | ring->tail_mask = obj->size - 1; |
2406 | 3140 | ||
2407 | dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; | 3141 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; |
2408 | dev_priv->ring.map.size = obj->size; | 3142 | ring->map.size = obj->size; |
2409 | dev_priv->ring.map.type = 0; | 3143 | ring->map.type = 0; |
2410 | dev_priv->ring.map.flags = 0; | 3144 | ring->map.flags = 0; |
2411 | dev_priv->ring.map.mtrr = 0; | 3145 | ring->map.mtrr = 0; |
2412 | 3146 | ||
2413 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); | 3147 | drm_core_ioremap_wc(&ring->map, dev); |
2414 | if (dev_priv->ring.map.handle == NULL) { | 3148 | if (ring->map.handle == NULL) { |
2415 | DRM_ERROR("Failed to map ringbuffer.\n"); | 3149 | DRM_ERROR("Failed to map ringbuffer.\n"); |
2416 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3150 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
2417 | drm_gem_object_unreference(obj); | 3151 | drm_gem_object_unreference(obj); |
2418 | return -EINVAL; | 3152 | return -EINVAL; |
2419 | } | 3153 | } |
2420 | dev_priv->ring.ring_obj = obj; | 3154 | ring->ring_obj = obj; |
2421 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 3155 | ring->virtual_start = ring->map.handle; |
2422 | 3156 | ||
2423 | /* Stop the ring if it's running. */ | 3157 | /* Stop the ring if it's running. */ |
2424 | I915_WRITE(PRB0_CTL, 0); | 3158 | I915_WRITE(PRB0_CTL, 0); |
@@ -2466,12 +3200,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
2466 | } | 3200 | } |
2467 | 3201 | ||
2468 | /* Update our cache of the ring state */ | 3202 | /* Update our cache of the ring state */ |
2469 | i915_kernel_lost_context(dev); | 3203 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3204 | i915_kernel_lost_context(dev); | ||
3205 | else { | ||
3206 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
3207 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
3208 | ring->space = ring->head - (ring->tail + 8); | ||
3209 | if (ring->space < 0) | ||
3210 | ring->space += ring->Size; | ||
3211 | } | ||
2470 | 3212 | ||
2471 | return 0; | 3213 | return 0; |
2472 | } | 3214 | } |
2473 | 3215 | ||
2474 | static void | 3216 | void |
2475 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) | 3217 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
2476 | { | 3218 | { |
2477 | drm_i915_private_t *dev_priv = dev->dev_private; | 3219 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -2509,25 +3251,25 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
2509 | drm_i915_private_t *dev_priv = dev->dev_private; | 3251 | drm_i915_private_t *dev_priv = dev->dev_private; |
2510 | int ret; | 3252 | int ret; |
2511 | 3253 | ||
3254 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3255 | return 0; | ||
3256 | |||
2512 | if (dev_priv->mm.wedged) { | 3257 | if (dev_priv->mm.wedged) { |
2513 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 3258 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
2514 | dev_priv->mm.wedged = 0; | 3259 | dev_priv->mm.wedged = 0; |
2515 | } | 3260 | } |
2516 | 3261 | ||
3262 | mutex_lock(&dev->struct_mutex); | ||
3263 | dev_priv->mm.suspended = 0; | ||
3264 | |||
2517 | ret = i915_gem_init_ringbuffer(dev); | 3265 | ret = i915_gem_init_ringbuffer(dev); |
2518 | if (ret != 0) | 3266 | if (ret != 0) |
2519 | return ret; | 3267 | return ret; |
2520 | 3268 | ||
2521 | dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, | ||
2522 | dev->agp->agp_info.aper_size | ||
2523 | * 1024 * 1024); | ||
2524 | |||
2525 | mutex_lock(&dev->struct_mutex); | ||
2526 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3269 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
2527 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 3270 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
2528 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3271 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
2529 | BUG_ON(!list_empty(&dev_priv->mm.request_list)); | 3272 | BUG_ON(!list_empty(&dev_priv->mm.request_list)); |
2530 | dev_priv->mm.suspended = 0; | ||
2531 | mutex_unlock(&dev->struct_mutex); | 3273 | mutex_unlock(&dev->struct_mutex); |
2532 | 3274 | ||
2533 | drm_irq_install(dev); | 3275 | drm_irq_install(dev); |
@@ -2539,13 +3281,14 @@ int | |||
2539 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 3281 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
2540 | struct drm_file *file_priv) | 3282 | struct drm_file *file_priv) |
2541 | { | 3283 | { |
2542 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2543 | int ret; | 3284 | int ret; |
2544 | 3285 | ||
3286 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3287 | return 0; | ||
3288 | |||
2545 | ret = i915_gem_idle(dev); | 3289 | ret = i915_gem_idle(dev); |
2546 | drm_irq_uninstall(dev); | 3290 | drm_irq_uninstall(dev); |
2547 | 3291 | ||
2548 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
2549 | return ret; | 3292 | return ret; |
2550 | } | 3293 | } |
2551 | 3294 | ||
@@ -2554,6 +3297,9 @@ i915_gem_lastclose(struct drm_device *dev) | |||
2554 | { | 3297 | { |
2555 | int ret; | 3298 | int ret; |
2556 | 3299 | ||
3300 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3301 | return; | ||
3302 | |||
2557 | ret = i915_gem_idle(dev); | 3303 | ret = i915_gem_idle(dev); |
2558 | if (ret) | 3304 | if (ret) |
2559 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 3305 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
@@ -2572,5 +3318,190 @@ i915_gem_load(struct drm_device *dev) | |||
2572 | i915_gem_retire_work_handler); | 3318 | i915_gem_retire_work_handler); |
2573 | dev_priv->mm.next_gem_seqno = 1; | 3319 | dev_priv->mm.next_gem_seqno = 1; |
2574 | 3320 | ||
3321 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | ||
3322 | dev_priv->fence_reg_start = 3; | ||
3323 | |||
3324 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
3325 | dev_priv->num_fence_regs = 16; | ||
3326 | else | ||
3327 | dev_priv->num_fence_regs = 8; | ||
3328 | |||
2575 | i915_gem_detect_bit_6_swizzle(dev); | 3329 | i915_gem_detect_bit_6_swizzle(dev); |
2576 | } | 3330 | } |
3331 | |||
3332 | /* | ||
3333 | * Create a physically contiguous memory object for this object | ||
3334 | * e.g. for cursor + overlay regs | ||
3335 | */ | ||
3336 | int i915_gem_init_phys_object(struct drm_device *dev, | ||
3337 | int id, int size) | ||
3338 | { | ||
3339 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3340 | struct drm_i915_gem_phys_object *phys_obj; | ||
3341 | int ret; | ||
3342 | |||
3343 | if (dev_priv->mm.phys_objs[id - 1] || !size) | ||
3344 | return 0; | ||
3345 | |||
3346 | phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); | ||
3347 | if (!phys_obj) | ||
3348 | return -ENOMEM; | ||
3349 | |||
3350 | phys_obj->id = id; | ||
3351 | |||
3352 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | ||
3353 | if (!phys_obj->handle) { | ||
3354 | ret = -ENOMEM; | ||
3355 | goto kfree_obj; | ||
3356 | } | ||
3357 | #ifdef CONFIG_X86 | ||
3358 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
3359 | #endif | ||
3360 | |||
3361 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | ||
3362 | |||
3363 | return 0; | ||
3364 | kfree_obj: | ||
3365 | drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); | ||
3366 | return ret; | ||
3367 | } | ||
3368 | |||
3369 | void i915_gem_free_phys_object(struct drm_device *dev, int id) | ||
3370 | { | ||
3371 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3372 | struct drm_i915_gem_phys_object *phys_obj; | ||
3373 | |||
3374 | if (!dev_priv->mm.phys_objs[id - 1]) | ||
3375 | return; | ||
3376 | |||
3377 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
3378 | if (phys_obj->cur_obj) { | ||
3379 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | ||
3380 | } | ||
3381 | |||
3382 | #ifdef CONFIG_X86 | ||
3383 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
3384 | #endif | ||
3385 | drm_pci_free(dev, phys_obj->handle); | ||
3386 | kfree(phys_obj); | ||
3387 | dev_priv->mm.phys_objs[id - 1] = NULL; | ||
3388 | } | ||
3389 | |||
3390 | void i915_gem_free_all_phys_object(struct drm_device *dev) | ||
3391 | { | ||
3392 | int i; | ||
3393 | |||
3394 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | ||
3395 | i915_gem_free_phys_object(dev, i); | ||
3396 | } | ||
3397 | |||
3398 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
3399 | struct drm_gem_object *obj) | ||
3400 | { | ||
3401 | struct drm_i915_gem_object *obj_priv; | ||
3402 | int i; | ||
3403 | int ret; | ||
3404 | int page_count; | ||
3405 | |||
3406 | obj_priv = obj->driver_private; | ||
3407 | if (!obj_priv->phys_obj) | ||
3408 | return; | ||
3409 | |||
3410 | ret = i915_gem_object_get_page_list(obj); | ||
3411 | if (ret) | ||
3412 | goto out; | ||
3413 | |||
3414 | page_count = obj->size / PAGE_SIZE; | ||
3415 | |||
3416 | for (i = 0; i < page_count; i++) { | ||
3417 | char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); | ||
3418 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
3419 | |||
3420 | memcpy(dst, src, PAGE_SIZE); | ||
3421 | kunmap_atomic(dst, KM_USER0); | ||
3422 | } | ||
3423 | drm_clflush_pages(obj_priv->page_list, page_count); | ||
3424 | drm_agp_chipset_flush(dev); | ||
3425 | out: | ||
3426 | obj_priv->phys_obj->cur_obj = NULL; | ||
3427 | obj_priv->phys_obj = NULL; | ||
3428 | } | ||
3429 | |||
3430 | int | ||
3431 | i915_gem_attach_phys_object(struct drm_device *dev, | ||
3432 | struct drm_gem_object *obj, int id) | ||
3433 | { | ||
3434 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3435 | struct drm_i915_gem_object *obj_priv; | ||
3436 | int ret = 0; | ||
3437 | int page_count; | ||
3438 | int i; | ||
3439 | |||
3440 | if (id > I915_MAX_PHYS_OBJECT) | ||
3441 | return -EINVAL; | ||
3442 | |||
3443 | obj_priv = obj->driver_private; | ||
3444 | |||
3445 | if (obj_priv->phys_obj) { | ||
3446 | if (obj_priv->phys_obj->id == id) | ||
3447 | return 0; | ||
3448 | i915_gem_detach_phys_object(dev, obj); | ||
3449 | } | ||
3450 | |||
3451 | |||
3452 | /* create a new object */ | ||
3453 | if (!dev_priv->mm.phys_objs[id - 1]) { | ||
3454 | ret = i915_gem_init_phys_object(dev, id, | ||
3455 | obj->size); | ||
3456 | if (ret) { | ||
3457 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | ||
3458 | goto out; | ||
3459 | } | ||
3460 | } | ||
3461 | |||
3462 | /* bind to the object */ | ||
3463 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
3464 | obj_priv->phys_obj->cur_obj = obj; | ||
3465 | |||
3466 | ret = i915_gem_object_get_page_list(obj); | ||
3467 | if (ret) { | ||
3468 | DRM_ERROR("failed to get page list\n"); | ||
3469 | goto out; | ||
3470 | } | ||
3471 | |||
3472 | page_count = obj->size / PAGE_SIZE; | ||
3473 | |||
3474 | for (i = 0; i < page_count; i++) { | ||
3475 | char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); | ||
3476 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
3477 | |||
3478 | memcpy(dst, src, PAGE_SIZE); | ||
3479 | kunmap_atomic(src, KM_USER0); | ||
3480 | } | ||
3481 | |||
3482 | return 0; | ||
3483 | out: | ||
3484 | return ret; | ||
3485 | } | ||
3486 | |||
3487 | static int | ||
3488 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
3489 | struct drm_i915_gem_pwrite *args, | ||
3490 | struct drm_file *file_priv) | ||
3491 | { | ||
3492 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3493 | void *obj_addr; | ||
3494 | int ret; | ||
3495 | char __user *user_data; | ||
3496 | |||
3497 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
3498 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
3499 | |||
3500 | DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); | ||
3501 | ret = copy_from_user(obj_addr, user_data, args->size); | ||
3502 | if (ret) | ||
3503 | return -EFAULT; | ||
3504 | |||
3505 | drm_agp_chipset_flush(dev); | ||
3506 | return 0; | ||
3507 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c index 93de15b4c9a7..4d1b9de0cd8b 100644 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ b/drivers/gpu/drm/i915/i915_gem_proc.c | |||
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset, | |||
166 | list_for_each_entry(gem_request, &dev_priv->mm.request_list, | 166 | list_for_each_entry(gem_request, &dev_priv->mm.request_list, |
167 | list) | 167 | list) |
168 | { | 168 | { |
169 | DRM_PROC_PRINT(" %d @ %d %08x\n", | 169 | DRM_PROC_PRINT(" %d @ %d\n", |
170 | gem_request->seqno, | 170 | gem_request->seqno, |
171 | (int) (jiffies - gem_request->emitted_jiffies), | 171 | (int) (jiffies - gem_request->emitted_jiffies)); |
172 | gem_request->flush_domains); | ||
173 | } | 172 | } |
174 | if (len > request + offset) | 173 | if (len > request + offset) |
175 | return request; | 174 | return request; |
@@ -251,6 +250,39 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset, | |||
251 | return len - offset; | 250 | return len - offset; |
252 | } | 251 | } |
253 | 252 | ||
253 | static int i915_hws_info(char *buf, char **start, off_t offset, | ||
254 | int request, int *eof, void *data) | ||
255 | { | ||
256 | struct drm_minor *minor = (struct drm_minor *) data; | ||
257 | struct drm_device *dev = minor->dev; | ||
258 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
259 | int len = 0, i; | ||
260 | volatile u32 *hws; | ||
261 | |||
262 | if (offset > DRM_PROC_LIMIT) { | ||
263 | *eof = 1; | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | hws = (volatile u32 *)dev_priv->hw_status_page; | ||
268 | if (hws == NULL) { | ||
269 | *eof = 1; | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | *start = &buf[offset]; | ||
274 | *eof = 0; | ||
275 | for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { | ||
276 | DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
277 | i * 4, | ||
278 | hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); | ||
279 | } | ||
280 | if (len > request + offset) | ||
281 | return request; | ||
282 | *eof = 1; | ||
283 | return len - offset; | ||
284 | } | ||
285 | |||
254 | static struct drm_proc_list { | 286 | static struct drm_proc_list { |
255 | /** file name */ | 287 | /** file name */ |
256 | const char *name; | 288 | const char *name; |
@@ -263,6 +295,7 @@ static struct drm_proc_list { | |||
263 | {"i915_gem_request", i915_gem_request_info}, | 295 | {"i915_gem_request", i915_gem_request_info}, |
264 | {"i915_gem_seqno", i915_gem_seqno_info}, | 296 | {"i915_gem_seqno", i915_gem_seqno_info}, |
265 | {"i915_gem_interrupt", i915_interrupt_info}, | 297 | {"i915_gem_interrupt", i915_interrupt_info}, |
298 | {"i915_gem_hws", i915_hws_info}, | ||
266 | }; | 299 | }; |
267 | 300 | ||
268 | #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) | 301 | #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index e8b85ac4ca04..fa1685cba840 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
119 | dcc & DCC_CHANNEL_XOR_DISABLE) { | 119 | dcc & DCC_CHANNEL_XOR_DISABLE) { |
120 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 120 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
121 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 121 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
122 | } else if (IS_I965GM(dev) || IS_GM45(dev)) { | 122 | } else if ((IS_I965GM(dev) || IS_GM45(dev)) && |
123 | /* GM965 only does bit 11-based channel | 123 | (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
124 | * randomization | 124 | /* GM965/GM45 does either bit 11 or bit 17 |
125 | * swizzling. | ||
125 | */ | 126 | */ |
126 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; | 127 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
127 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | 128 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
@@ -172,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
172 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | 173 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
173 | } | 174 | } |
174 | 175 | ||
176 | |||
177 | /** | ||
178 | * Returns the size of the fence for a tiled object of the given size. | ||
179 | */ | ||
180 | static int | ||
181 | i915_get_fence_size(struct drm_device *dev, int size) | ||
182 | { | ||
183 | int i; | ||
184 | int start; | ||
185 | |||
186 | if (IS_I965G(dev)) { | ||
187 | /* The 965 can have fences at any page boundary. */ | ||
188 | return ALIGN(size, 4096); | ||
189 | } else { | ||
190 | /* Align the size to a power of two greater than the smallest | ||
191 | * fence size. | ||
192 | */ | ||
193 | if (IS_I9XX(dev)) | ||
194 | start = 1024 * 1024; | ||
195 | else | ||
196 | start = 512 * 1024; | ||
197 | |||
198 | for (i = start; i < size; i <<= 1) | ||
199 | ; | ||
200 | |||
201 | return i; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Check pitch constriants for all chips & tiling formats */ | ||
206 | static bool | ||
207 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | ||
208 | { | ||
209 | int tile_width; | ||
210 | |||
211 | /* Linear is always fine */ | ||
212 | if (tiling_mode == I915_TILING_NONE) | ||
213 | return true; | ||
214 | |||
215 | if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) | ||
216 | tile_width = 128; | ||
217 | else | ||
218 | tile_width = 512; | ||
219 | |||
220 | /* 965+ just needs multiples of tile width */ | ||
221 | if (IS_I965G(dev)) { | ||
222 | if (stride & (tile_width - 1)) | ||
223 | return false; | ||
224 | return true; | ||
225 | } | ||
226 | |||
227 | /* Pre-965 needs power of two tile widths */ | ||
228 | if (stride < tile_width) | ||
229 | return false; | ||
230 | |||
231 | if (stride & (stride - 1)) | ||
232 | return false; | ||
233 | |||
234 | /* We don't handle the aperture area covered by the fence being bigger | ||
235 | * than the object size. | ||
236 | */ | ||
237 | if (i915_get_fence_size(dev, size) != size) | ||
238 | return false; | ||
239 | |||
240 | return true; | ||
241 | } | ||
242 | |||
175 | /** | 243 | /** |
176 | * Sets the tiling mode of an object, returning the required swizzling of | 244 | * Sets the tiling mode of an object, returning the required swizzling of |
177 | * bit 6 of addresses in the object. | 245 | * bit 6 of addresses in the object. |
@@ -190,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
190 | return -EINVAL; | 258 | return -EINVAL; |
191 | obj_priv = obj->driver_private; | 259 | obj_priv = obj->driver_private; |
192 | 260 | ||
261 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | ||
262 | drm_gem_object_unreference(obj); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | |||
193 | mutex_lock(&dev->struct_mutex); | 266 | mutex_lock(&dev->struct_mutex); |
194 | 267 | ||
195 | if (args->tiling_mode == I915_TILING_NONE) { | 268 | if (args->tiling_mode == I915_TILING_NONE) { |
@@ -206,7 +279,25 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
206 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | 279 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
207 | } | 280 | } |
208 | } | 281 | } |
209 | obj_priv->tiling_mode = args->tiling_mode; | 282 | if (args->tiling_mode != obj_priv->tiling_mode) { |
283 | int ret; | ||
284 | |||
285 | /* Unbind the object, as switching tiling means we're | ||
286 | * switching the cache organization due to fencing, probably. | ||
287 | */ | ||
288 | ret = i915_gem_object_unbind(obj); | ||
289 | if (ret != 0) { | ||
290 | WARN(ret != -ERESTARTSYS, | ||
291 | "failed to unbind object for tiling switch"); | ||
292 | args->tiling_mode = obj_priv->tiling_mode; | ||
293 | mutex_unlock(&dev->struct_mutex); | ||
294 | drm_gem_object_unreference(obj); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | obj_priv->tiling_mode = args->tiling_mode; | ||
299 | } | ||
300 | obj_priv->stride = args->stride; | ||
210 | 301 | ||
211 | mutex_unlock(&dev->struct_mutex); | 302 | mutex_unlock(&dev->struct_mutex); |
212 | 303 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 82752d6177a4..548ff2c66431 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -30,14 +30,36 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "i915_drm.h" | 31 | #include "i915_drm.h" |
32 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | #include "intel_drv.h" | ||
33 | 34 | ||
34 | #define MAX_NOPID ((u32)~0) | 35 | #define MAX_NOPID ((u32)~0) |
35 | 36 | ||
36 | /** These are the interrupts used by the driver */ | 37 | /** |
37 | #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ | 38 | * Interrupts that are always left unmasked. |
38 | I915_ASLE_INTERRUPT | \ | 39 | * |
39 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ | 40 | * Since pipe events are edge-triggered from the PIPESTAT register to IIR, |
40 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) | 41 | * we leave them always unmasked in IMR and then control enabling them through |
42 | * PIPESTAT alone. | ||
43 | */ | ||
44 | #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ | ||
45 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ | ||
46 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) | ||
47 | |||
48 | /** Interrupts that we mask and unmask at runtime. */ | ||
49 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) | ||
50 | |||
51 | /** These are all of the interrupts used by the driver */ | ||
52 | #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ | ||
53 | I915_INTERRUPT_ENABLE_VAR) | ||
54 | |||
55 | #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ | ||
56 | PIPE_VBLANK_INTERRUPT_STATUS) | ||
57 | |||
58 | #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ | ||
59 | PIPE_VBLANK_INTERRUPT_ENABLE) | ||
60 | |||
61 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | ||
62 | DRM_I915_VBLANK_PIPE_B) | ||
41 | 63 | ||
42 | void | 64 | void |
43 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | 65 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
@@ -59,6 +81,41 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
59 | } | 81 | } |
60 | } | 82 | } |
61 | 83 | ||
84 | static inline u32 | ||
85 | i915_pipestat(int pipe) | ||
86 | { | ||
87 | if (pipe == 0) | ||
88 | return PIPEASTAT; | ||
89 | if (pipe == 1) | ||
90 | return PIPEBSTAT; | ||
91 | BUG(); | ||
92 | } | ||
93 | |||
94 | void | ||
95 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | ||
96 | { | ||
97 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | ||
98 | u32 reg = i915_pipestat(pipe); | ||
99 | |||
100 | dev_priv->pipestat[pipe] |= mask; | ||
101 | /* Enable the interrupt, clear any pending status */ | ||
102 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | ||
103 | (void) I915_READ(reg); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | void | ||
108 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | ||
109 | { | ||
110 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | ||
111 | u32 reg = i915_pipestat(pipe); | ||
112 | |||
113 | dev_priv->pipestat[pipe] &= ~mask; | ||
114 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | ||
115 | (void) I915_READ(reg); | ||
116 | } | ||
117 | } | ||
118 | |||
62 | /** | 119 | /** |
63 | * i915_pipe_enabled - check if a pipe is enabled | 120 | * i915_pipe_enabled - check if a pipe is enabled |
64 | * @dev: DRM device | 121 | * @dev: DRM device |
@@ -117,89 +174,130 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
117 | return count; | 174 | return count; |
118 | } | 175 | } |
119 | 176 | ||
177 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | ||
178 | { | ||
179 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
180 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | ||
181 | |||
182 | if (!i915_pipe_enabled(dev, pipe)) { | ||
183 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | return I915_READ(reg); | ||
188 | } | ||
189 | |||
120 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 190 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
121 | { | 191 | { |
122 | struct drm_device *dev = (struct drm_device *) arg; | 192 | struct drm_device *dev = (struct drm_device *) arg; |
123 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 193 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
124 | u32 iir; | 194 | struct drm_i915_master_private *master_priv; |
195 | u32 iir, new_iir; | ||
125 | u32 pipea_stats, pipeb_stats; | 196 | u32 pipea_stats, pipeb_stats; |
197 | u32 vblank_status; | ||
198 | u32 vblank_enable; | ||
126 | int vblank = 0; | 199 | int vblank = 0; |
200 | unsigned long irqflags; | ||
201 | int irq_received; | ||
202 | int ret = IRQ_NONE; | ||
127 | 203 | ||
128 | atomic_inc(&dev_priv->irq_received); | 204 | atomic_inc(&dev_priv->irq_received); |
129 | 205 | ||
130 | if (dev->pdev->msi_enabled) | ||
131 | I915_WRITE(IMR, ~0); | ||
132 | iir = I915_READ(IIR); | 206 | iir = I915_READ(IIR); |
133 | 207 | ||
134 | if (iir == 0) { | 208 | if (IS_I965G(dev)) { |
135 | if (dev->pdev->msi_enabled) { | 209 | vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; |
136 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 210 | vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; |
137 | (void) I915_READ(IMR); | 211 | } else { |
138 | } | 212 | vblank_status = I915_VBLANK_INTERRUPT_STATUS; |
139 | return IRQ_NONE; | 213 | vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; |
140 | } | 214 | } |
141 | 215 | ||
142 | /* | 216 | for (;;) { |
143 | * Clear the PIPE(A|B)STAT regs before the IIR otherwise | 217 | irq_received = iir != 0; |
144 | * we may get extra interrupts. | 218 | |
145 | */ | 219 | /* Can't rely on pipestat interrupt bit in iir as it might |
146 | if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { | 220 | * have been cleared after the pipestat interrupt was received. |
221 | * It doesn't set the bit in iir again, but it still produces | ||
222 | * interrupts (for non-MSI). | ||
223 | */ | ||
224 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
147 | pipea_stats = I915_READ(PIPEASTAT); | 225 | pipea_stats = I915_READ(PIPEASTAT); |
148 | if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) | 226 | pipeb_stats = I915_READ(PIPEBSTAT); |
149 | pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | 227 | |
150 | PIPE_VBLANK_INTERRUPT_ENABLE); | 228 | /* |
151 | else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| | 229 | * Clear the PIPE(A|B)STAT regs before the IIR |
152 | PIPE_VBLANK_INTERRUPT_STATUS)) { | 230 | */ |
231 | if (pipea_stats & 0x8000ffff) { | ||
232 | I915_WRITE(PIPEASTAT, pipea_stats); | ||
233 | irq_received = 1; | ||
234 | } | ||
235 | |||
236 | if (pipeb_stats & 0x8000ffff) { | ||
237 | I915_WRITE(PIPEBSTAT, pipeb_stats); | ||
238 | irq_received = 1; | ||
239 | } | ||
240 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
241 | |||
242 | if (!irq_received) | ||
243 | break; | ||
244 | |||
245 | ret = IRQ_HANDLED; | ||
246 | |||
247 | I915_WRITE(IIR, iir); | ||
248 | new_iir = I915_READ(IIR); /* Flush posted writes */ | ||
249 | |||
250 | if (dev->primary->master) { | ||
251 | master_priv = dev->primary->master->driver_priv; | ||
252 | if (master_priv->sarea_priv) | ||
253 | master_priv->sarea_priv->last_dispatch = | ||
254 | READ_BREADCRUMB(dev_priv); | ||
255 | } | ||
256 | |||
257 | if (iir & I915_USER_INTERRUPT) { | ||
258 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | ||
259 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
260 | } | ||
261 | |||
262 | if (pipea_stats & vblank_status) { | ||
153 | vblank++; | 263 | vblank++; |
154 | drm_handle_vblank(dev, 0); | 264 | drm_handle_vblank(dev, 0); |
155 | } | 265 | } |
156 | 266 | ||
157 | I915_WRITE(PIPEASTAT, pipea_stats); | 267 | if (pipeb_stats & vblank_status) { |
158 | } | ||
159 | if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { | ||
160 | pipeb_stats = I915_READ(PIPEBSTAT); | ||
161 | /* Ack the event */ | ||
162 | I915_WRITE(PIPEBSTAT, pipeb_stats); | ||
163 | |||
164 | /* The vblank interrupt gets enabled even if we didn't ask for | ||
165 | it, so make sure it's shut down again */ | ||
166 | if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) | ||
167 | pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | ||
168 | PIPE_VBLANK_INTERRUPT_ENABLE); | ||
169 | else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| | ||
170 | PIPE_VBLANK_INTERRUPT_STATUS)) { | ||
171 | vblank++; | 268 | vblank++; |
172 | drm_handle_vblank(dev, 1); | 269 | drm_handle_vblank(dev, 1); |
173 | } | 270 | } |
174 | 271 | ||
175 | if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) | 272 | if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
273 | (iir & I915_ASLE_INTERRUPT)) | ||
176 | opregion_asle_intr(dev); | 274 | opregion_asle_intr(dev); |
177 | I915_WRITE(PIPEBSTAT, pipeb_stats); | ||
178 | } | ||
179 | |||
180 | I915_WRITE(IIR, iir); | ||
181 | if (dev->pdev->msi_enabled) | ||
182 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
183 | (void) I915_READ(IIR); /* Flush posted writes */ | ||
184 | |||
185 | if (dev_priv->sarea_priv) | ||
186 | dev_priv->sarea_priv->last_dispatch = | ||
187 | READ_BREADCRUMB(dev_priv); | ||
188 | 275 | ||
189 | if (iir & I915_USER_INTERRUPT) { | 276 | /* With MSI, interrupts are only generated when iir |
190 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 277 | * transitions from zero to nonzero. If another bit got |
191 | DRM_WAKEUP(&dev_priv->irq_queue); | 278 | * set while we were handling the existing iir bits, then |
279 | * we would never get another interrupt. | ||
280 | * | ||
281 | * This is fine on non-MSI as well, as if we hit this path | ||
282 | * we avoid exiting the interrupt handler only to generate | ||
283 | * another one. | ||
284 | * | ||
285 | * Note that for MSI this could cause a stray interrupt report | ||
286 | * if an interrupt landed in the time between writing IIR and | ||
287 | * the posting read. This should be rare enough to never | ||
288 | * trigger the 99% of 100,000 interrupts test for disabling | ||
289 | * stray interrupts. | ||
290 | */ | ||
291 | iir = new_iir; | ||
192 | } | 292 | } |
193 | 293 | ||
194 | if (iir & I915_ASLE_INTERRUPT) | 294 | return ret; |
195 | opregion_asle_intr(dev); | ||
196 | |||
197 | return IRQ_HANDLED; | ||
198 | } | 295 | } |
199 | 296 | ||
200 | static int i915_emit_irq(struct drm_device * dev) | 297 | static int i915_emit_irq(struct drm_device * dev) |
201 | { | 298 | { |
202 | drm_i915_private_t *dev_priv = dev->dev_private; | 299 | drm_i915_private_t *dev_priv = dev->dev_private; |
300 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
203 | RING_LOCALS; | 301 | RING_LOCALS; |
204 | 302 | ||
205 | i915_kernel_lost_context(dev); | 303 | i915_kernel_lost_context(dev); |
@@ -209,8 +307,8 @@ static int i915_emit_irq(struct drm_device * dev) | |||
209 | dev_priv->counter++; | 307 | dev_priv->counter++; |
210 | if (dev_priv->counter > 0x7FFFFFFFUL) | 308 | if (dev_priv->counter > 0x7FFFFFFFUL) |
211 | dev_priv->counter = 1; | 309 | dev_priv->counter = 1; |
212 | if (dev_priv->sarea_priv) | 310 | if (master_priv->sarea_priv) |
213 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | 311 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
214 | 312 | ||
215 | BEGIN_LP_RING(4); | 313 | BEGIN_LP_RING(4); |
216 | OUT_RING(MI_STORE_DWORD_INDEX); | 314 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -248,21 +346,20 @@ void i915_user_irq_put(struct drm_device *dev) | |||
248 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 346 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
249 | { | 347 | { |
250 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 348 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
349 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
251 | int ret = 0; | 350 | int ret = 0; |
252 | 351 | ||
253 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, | 352 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, |
254 | READ_BREADCRUMB(dev_priv)); | 353 | READ_BREADCRUMB(dev_priv)); |
255 | 354 | ||
256 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | 355 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
257 | if (dev_priv->sarea_priv) { | 356 | if (master_priv->sarea_priv) |
258 | dev_priv->sarea_priv->last_dispatch = | 357 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
259 | READ_BREADCRUMB(dev_priv); | ||
260 | } | ||
261 | return 0; | 358 | return 0; |
262 | } | 359 | } |
263 | 360 | ||
264 | if (dev_priv->sarea_priv) | 361 | if (master_priv->sarea_priv) |
265 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 362 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
266 | 363 | ||
267 | i915_user_irq_get(dev); | 364 | i915_user_irq_get(dev); |
268 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, | 365 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, |
@@ -274,10 +371,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
274 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); | 371 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); |
275 | } | 372 | } |
276 | 373 | ||
277 | if (dev_priv->sarea_priv) | ||
278 | dev_priv->sarea_priv->last_dispatch = | ||
279 | READ_BREADCRUMB(dev_priv); | ||
280 | |||
281 | return ret; | 374 | return ret; |
282 | } | 375 | } |
283 | 376 | ||
@@ -330,48 +423,22 @@ int i915_irq_wait(struct drm_device *dev, void *data, | |||
330 | int i915_enable_vblank(struct drm_device *dev, int pipe) | 423 | int i915_enable_vblank(struct drm_device *dev, int pipe) |
331 | { | 424 | { |
332 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 425 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
333 | u32 pipestat_reg = 0; | ||
334 | u32 pipestat; | ||
335 | u32 interrupt = 0; | ||
336 | unsigned long irqflags; | 426 | unsigned long irqflags; |
427 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
428 | u32 pipeconf; | ||
337 | 429 | ||
338 | switch (pipe) { | 430 | pipeconf = I915_READ(pipeconf_reg); |
339 | case 0: | 431 | if (!(pipeconf & PIPEACONF_ENABLE)) |
340 | pipestat_reg = PIPEASTAT; | 432 | return -EINVAL; |
341 | interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | ||
342 | break; | ||
343 | case 1: | ||
344 | pipestat_reg = PIPEBSTAT; | ||
345 | interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
346 | break; | ||
347 | default: | ||
348 | DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", | ||
349 | pipe); | ||
350 | return 0; | ||
351 | } | ||
352 | 433 | ||
353 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 434 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
354 | /* Enabling vblank events in IMR comes before PIPESTAT write, or | ||
355 | * there's a race where the PIPESTAT vblank bit gets set to 1, so | ||
356 | * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in | ||
357 | * ISR flashes to 1, but the IIR bit doesn't get set to 1 because | ||
358 | * IMR masks it. It doesn't ever get set after we clear the masking | ||
359 | * in IMR because the ISR bit is edge, not level-triggered, on the | ||
360 | * OR of PIPESTAT bits. | ||
361 | */ | ||
362 | i915_enable_irq(dev_priv, interrupt); | ||
363 | pipestat = I915_READ(pipestat_reg); | ||
364 | if (IS_I965G(dev)) | 435 | if (IS_I965G(dev)) |
365 | pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; | 436 | i915_enable_pipestat(dev_priv, pipe, |
437 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
366 | else | 438 | else |
367 | pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; | 439 | i915_enable_pipestat(dev_priv, pipe, |
368 | /* Clear any stale interrupt status */ | 440 | PIPE_VBLANK_INTERRUPT_ENABLE); |
369 | pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | | ||
370 | PIPE_VBLANK_INTERRUPT_STATUS); | ||
371 | I915_WRITE(pipestat_reg, pipestat); | ||
372 | (void) I915_READ(pipestat_reg); /* Posting read */ | ||
373 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 441 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
374 | |||
375 | return 0; | 442 | return 0; |
376 | } | 443 | } |
377 | 444 | ||
@@ -381,40 +448,23 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
381 | void i915_disable_vblank(struct drm_device *dev, int pipe) | 448 | void i915_disable_vblank(struct drm_device *dev, int pipe) |
382 | { | 449 | { |
383 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 450 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
384 | u32 pipestat_reg = 0; | ||
385 | u32 pipestat; | ||
386 | u32 interrupt = 0; | ||
387 | unsigned long irqflags; | 451 | unsigned long irqflags; |
388 | 452 | ||
389 | switch (pipe) { | ||
390 | case 0: | ||
391 | pipestat_reg = PIPEASTAT; | ||
392 | interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | ||
393 | break; | ||
394 | case 1: | ||
395 | pipestat_reg = PIPEBSTAT; | ||
396 | interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
397 | break; | ||
398 | default: | ||
399 | DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", | ||
400 | pipe); | ||
401 | return; | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 453 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
406 | i915_disable_irq(dev_priv, interrupt); | 454 | i915_disable_pipestat(dev_priv, pipe, |
407 | pipestat = I915_READ(pipestat_reg); | 455 | PIPE_VBLANK_INTERRUPT_ENABLE | |
408 | pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | | 456 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
409 | PIPE_VBLANK_INTERRUPT_ENABLE); | ||
410 | /* Clear any stale interrupt status */ | ||
411 | pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | | ||
412 | PIPE_VBLANK_INTERRUPT_STATUS); | ||
413 | I915_WRITE(pipestat_reg, pipestat); | ||
414 | (void) I915_READ(pipestat_reg); /* Posting read */ | ||
415 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 457 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
416 | } | 458 | } |
417 | 459 | ||
460 | void i915_enable_interrupt (struct drm_device *dev) | ||
461 | { | ||
462 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
463 | opregion_enable_asle(dev); | ||
464 | dev_priv->irq_enabled = 1; | ||
465 | } | ||
466 | |||
467 | |||
418 | /* Set the vblank monitor pipe | 468 | /* Set the vblank monitor pipe |
419 | */ | 469 | */ |
420 | int i915_vblank_pipe_set(struct drm_device *dev, void *data, | 470 | int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
@@ -475,33 +525,38 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
475 | { | 525 | { |
476 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
477 | 527 | ||
528 | atomic_set(&dev_priv->irq_received, 0); | ||
529 | |||
478 | I915_WRITE(HWSTAM, 0xeffe); | 530 | I915_WRITE(HWSTAM, 0xeffe); |
531 | I915_WRITE(PIPEASTAT, 0); | ||
532 | I915_WRITE(PIPEBSTAT, 0); | ||
479 | I915_WRITE(IMR, 0xffffffff); | 533 | I915_WRITE(IMR, 0xffffffff); |
480 | I915_WRITE(IER, 0x0); | 534 | I915_WRITE(IER, 0x0); |
535 | (void) I915_READ(IER); | ||
481 | } | 536 | } |
482 | 537 | ||
483 | int i915_driver_irq_postinstall(struct drm_device *dev) | 538 | int i915_driver_irq_postinstall(struct drm_device *dev) |
484 | { | 539 | { |
485 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 540 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
486 | int ret, num_pipes = 2; | ||
487 | |||
488 | /* Set initial unmasked IRQs to just the selected vblank pipes. */ | ||
489 | dev_priv->irq_mask_reg = ~0; | ||
490 | |||
491 | ret = drm_vblank_init(dev, num_pipes); | ||
492 | if (ret) | ||
493 | return ret; | ||
494 | 541 | ||
495 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 542 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
496 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | ||
497 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
498 | 543 | ||
499 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 544 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
500 | 545 | ||
501 | dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; | 546 | /* Unmask the interrupts that we always want on. */ |
547 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | ||
548 | |||
549 | dev_priv->pipestat[0] = 0; | ||
550 | dev_priv->pipestat[1] = 0; | ||
551 | |||
552 | /* Disable pipe interrupt enables, clear pending pipe status */ | ||
553 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | ||
554 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | ||
555 | /* Clear pending interrupt status */ | ||
556 | I915_WRITE(IIR, I915_READ(IIR)); | ||
502 | 557 | ||
503 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
504 | I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); | 558 | I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); |
559 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
505 | (void) I915_READ(IER); | 560 | (void) I915_READ(IER); |
506 | 561 | ||
507 | opregion_enable_asle(dev); | 562 | opregion_enable_asle(dev); |
@@ -513,7 +568,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
513 | void i915_driver_irq_uninstall(struct drm_device * dev) | 568 | void i915_driver_irq_uninstall(struct drm_device * dev) |
514 | { | 569 | { |
515 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 570 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
516 | u32 temp; | ||
517 | 571 | ||
518 | if (!dev_priv) | 572 | if (!dev_priv) |
519 | return; | 573 | return; |
@@ -521,13 +575,12 @@ void i915_driver_irq_uninstall(struct drm_device * dev) | |||
521 | dev_priv->vblank_pipe = 0; | 575 | dev_priv->vblank_pipe = 0; |
522 | 576 | ||
523 | I915_WRITE(HWSTAM, 0xffffffff); | 577 | I915_WRITE(HWSTAM, 0xffffffff); |
578 | I915_WRITE(PIPEASTAT, 0); | ||
579 | I915_WRITE(PIPEBSTAT, 0); | ||
524 | I915_WRITE(IMR, 0xffffffff); | 580 | I915_WRITE(IMR, 0xffffffff); |
525 | I915_WRITE(IER, 0x0); | 581 | I915_WRITE(IER, 0x0); |
526 | 582 | ||
527 | temp = I915_READ(PIPEASTAT); | 583 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); |
528 | I915_WRITE(PIPEASTAT, temp); | 584 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); |
529 | temp = I915_READ(PIPEBSTAT); | 585 | I915_WRITE(IIR, I915_READ(IIR)); |
530 | I915_WRITE(PIPEBSTAT, temp); | ||
531 | temp = I915_READ(IIR); | ||
532 | I915_WRITE(IIR, temp); | ||
533 | } | 586 | } |
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c index 6126a60dc9cb..96e271986d2a 100644 --- a/drivers/gpu/drm/i915/i915_mem.c +++ b/drivers/gpu/drm/i915/i915_mem.c | |||
@@ -46,7 +46,8 @@ | |||
46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) | 46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) |
47 | { | 47 | { |
48 | drm_i915_private_t *dev_priv = dev->dev_private; | 48 | drm_i915_private_t *dev_priv = dev->dev_private; |
49 | drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; | 49 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
50 | drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
50 | struct drm_tex_region *list; | 51 | struct drm_tex_region *list; |
51 | unsigned shift, nr; | 52 | unsigned shift, nr; |
52 | unsigned start; | 53 | unsigned start; |
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 1787a0c7e3ab..ff012835a386 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -235,17 +235,15 @@ void opregion_enable_asle(struct drm_device *dev) | |||
235 | struct opregion_asle *asle = dev_priv->opregion.asle; | 235 | struct opregion_asle *asle = dev_priv->opregion.asle; |
236 | 236 | ||
237 | if (asle) { | 237 | if (asle) { |
238 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | ||
239 | if (IS_MOBILE(dev)) { | 238 | if (IS_MOBILE(dev)) { |
240 | /* Many devices trigger events with a write to the | 239 | unsigned long irqflags; |
241 | legacy backlight controller, so we need to ensure | 240 | |
242 | that it's able to generate interrupts */ | 241 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
243 | I915_WRITE(PIPEBSTAT, pipeb_stats |= | 242 | i915_enable_pipestat(dev_priv, 1, |
244 | I915_LEGACY_BLC_EVENT_ENABLE); | 243 | I915_LEGACY_BLC_EVENT_ENABLE); |
245 | i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | | 244 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, |
246 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); | 245 | irqflags); |
247 | } else | 246 | } |
248 | i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT); | ||
249 | 247 | ||
250 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | | 248 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | |
251 | ASLE_PFMB_EN; | 249 | ASLE_PFMB_EN; |
@@ -259,8 +257,8 @@ void opregion_enable_asle(struct drm_device *dev) | |||
259 | 257 | ||
260 | static struct intel_opregion *system_opregion; | 258 | static struct intel_opregion *system_opregion; |
261 | 259 | ||
262 | int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, | 260 | static int intel_opregion_video_event(struct notifier_block *nb, |
263 | void *data) | 261 | unsigned long val, void *data) |
264 | { | 262 | { |
265 | /* The only video events relevant to opregion are 0x80. These indicate | 263 | /* The only video events relevant to opregion are 0x80. These indicate |
266 | either a docking event, lid switch or display switch request. In | 264 | either a docking event, lid switch or display switch request. In |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0e476eba36e6..9d6539a868b3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #define INTEL_GMCH_MEM_64M 0x1 | 35 | #define INTEL_GMCH_MEM_64M 0x1 |
36 | #define INTEL_GMCH_MEM_128M 0 | 36 | #define INTEL_GMCH_MEM_128M 0 |
37 | 37 | ||
38 | #define INTEL_855_GMCH_GMS_MASK (0x7 << 4) | 38 | #define INTEL_GMCH_GMS_MASK (0xf << 4) |
39 | #define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) | 39 | #define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) |
40 | #define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) | 40 | #define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) |
41 | #define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) | 41 | #define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) |
@@ -45,6 +45,12 @@ | |||
45 | 45 | ||
46 | #define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) | 46 | #define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) |
47 | #define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) | 47 | #define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) |
48 | #define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
49 | #define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
50 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
51 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
52 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
53 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
48 | 54 | ||
49 | /* PCI config space */ | 55 | /* PCI config space */ |
50 | 56 | ||
@@ -175,9 +181,26 @@ | |||
175 | #define DISPLAY_PLANE_B (1<<20) | 181 | #define DISPLAY_PLANE_B (1<<20) |
176 | 182 | ||
177 | /* | 183 | /* |
178 | * Instruction and interrupt control regs | 184 | * Fence registers |
179 | */ | 185 | */ |
186 | #define FENCE_REG_830_0 0x2000 | ||
187 | #define I830_FENCE_START_MASK 0x07f80000 | ||
188 | #define I830_FENCE_TILING_Y_SHIFT 12 | ||
189 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | ||
190 | #define I830_FENCE_PITCH_SHIFT 4 | ||
191 | #define I830_FENCE_REG_VALID (1<<0) | ||
192 | |||
193 | #define I915_FENCE_START_MASK 0x0ff00000 | ||
194 | #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) | ||
195 | |||
196 | #define FENCE_REG_965_0 0x03000 | ||
197 | #define I965_FENCE_PITCH_SHIFT 2 | ||
198 | #define I965_FENCE_TILING_Y_SHIFT 1 | ||
199 | #define I965_FENCE_REG_VALID (1<<0) | ||
180 | 200 | ||
201 | /* | ||
202 | * Instruction and interrupt control regs | ||
203 | */ | ||
181 | #define PRB0_TAIL 0x02030 | 204 | #define PRB0_TAIL 0x02030 |
182 | #define PRB0_HEAD 0x02034 | 205 | #define PRB0_HEAD 0x02034 |
183 | #define PRB0_START 0x02038 | 206 | #define PRB0_START 0x02038 |
@@ -245,6 +268,7 @@ | |||
245 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 268 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
246 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 269 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
247 | 270 | ||
271 | |||
248 | /* | 272 | /* |
249 | * Framebuffer compression (915+ only) | 273 | * Framebuffer compression (915+ only) |
250 | */ | 274 | */ |
@@ -522,6 +546,7 @@ | |||
522 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) | 546 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) |
523 | #define DCC_ADDRESSING_MODE_MASK (3 << 0) | 547 | #define DCC_ADDRESSING_MODE_MASK (3 << 0) |
524 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) | 548 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) |
549 | #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) | ||
525 | 550 | ||
526 | /** 965 MCH register controlling DRAM channel configuration */ | 551 | /** 965 MCH register controlling DRAM channel configuration */ |
527 | #define C0DRB3 0x10206 | 552 | #define C0DRB3 0x10206 |
@@ -530,6 +555,8 @@ | |||
530 | /** GM965 GM45 render standby register */ | 555 | /** GM965 GM45 render standby register */ |
531 | #define MCHBAR_RENDER_STANDBY 0x111B8 | 556 | #define MCHBAR_RENDER_STANDBY 0x111B8 |
532 | 557 | ||
558 | #define PEG_BAND_GAP_DATA 0x14d68 | ||
559 | |||
533 | /* | 560 | /* |
534 | * Overlay regs | 561 | * Overlay regs |
535 | */ | 562 | */ |
@@ -593,6 +620,9 @@ | |||
593 | 620 | ||
594 | /* Hotplug control (945+ only) */ | 621 | /* Hotplug control (945+ only) */ |
595 | #define PORT_HOTPLUG_EN 0x61110 | 622 | #define PORT_HOTPLUG_EN 0x61110 |
623 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) | ||
624 | #define HDMIC_HOTPLUG_INT_EN (1 << 28) | ||
625 | #define HDMID_HOTPLUG_INT_EN (1 << 27) | ||
596 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) | 626 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) |
597 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) | 627 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) |
598 | #define TV_HOTPLUG_INT_EN (1 << 18) | 628 | #define TV_HOTPLUG_INT_EN (1 << 18) |
@@ -600,6 +630,9 @@ | |||
600 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) | 630 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
601 | 631 | ||
602 | #define PORT_HOTPLUG_STAT 0x61114 | 632 | #define PORT_HOTPLUG_STAT 0x61114 |
633 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | ||
634 | #define HDMIC_HOTPLUG_INT_STATUS (1 << 28) | ||
635 | #define HDMID_HOTPLUG_INT_STATUS (1 << 27) | ||
603 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) | 636 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) |
604 | #define TV_HOTPLUG_INT_STATUS (1 << 10) | 637 | #define TV_HOTPLUG_INT_STATUS (1 << 10) |
605 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) | 638 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) |
@@ -629,7 +662,16 @@ | |||
629 | #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) | 662 | #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) |
630 | #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) | 663 | #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) |
631 | #define SDVOC_GANG_MODE (1 << 16) | 664 | #define SDVOC_GANG_MODE (1 << 16) |
665 | #define SDVO_ENCODING_SDVO (0x0 << 10) | ||
666 | #define SDVO_ENCODING_HDMI (0x2 << 10) | ||
667 | /** Requird for HDMI operation */ | ||
668 | #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) | ||
632 | #define SDVO_BORDER_ENABLE (1 << 7) | 669 | #define SDVO_BORDER_ENABLE (1 << 7) |
670 | #define SDVO_AUDIO_ENABLE (1 << 6) | ||
671 | /** New with 965, default is to be set */ | ||
672 | #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) | ||
673 | /** New with 965, default is to be set */ | ||
674 | #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) | ||
633 | #define SDVOB_PCIE_CONCURRENCY (1 << 3) | 675 | #define SDVOB_PCIE_CONCURRENCY (1 << 3) |
634 | #define SDVO_DETECTED (1 << 2) | 676 | #define SDVO_DETECTED (1 << 2) |
635 | /* Bits to be preserved when writing */ | 677 | /* Bits to be preserved when writing */ |
@@ -1329,6 +1371,9 @@ | |||
1329 | #define PIPE_FRAME_LOW_SHIFT 24 | 1371 | #define PIPE_FRAME_LOW_SHIFT 24 |
1330 | #define PIPE_PIXEL_MASK 0x00ffffff | 1372 | #define PIPE_PIXEL_MASK 0x00ffffff |
1331 | #define PIPE_PIXEL_SHIFT 0 | 1373 | #define PIPE_PIXEL_SHIFT 0 |
1374 | /* GM45+ just has to be different */ | ||
1375 | #define PIPEA_FRMCOUNT_GM45 0x70040 | ||
1376 | #define PIPEA_FLIPCOUNT_GM45 0x70044 | ||
1332 | 1377 | ||
1333 | /* Cursor A & B regs */ | 1378 | /* Cursor A & B regs */ |
1334 | #define CURACNTR 0x70080 | 1379 | #define CURACNTR 0x70080 |
@@ -1397,6 +1442,9 @@ | |||
1397 | #define PIPEBSTAT 0x71024 | 1442 | #define PIPEBSTAT 0x71024 |
1398 | #define PIPEBFRAMEHIGH 0x71040 | 1443 | #define PIPEBFRAMEHIGH 0x71040 |
1399 | #define PIPEBFRAMEPIXEL 0x71044 | 1444 | #define PIPEBFRAMEPIXEL 0x71044 |
1445 | #define PIPEB_FRMCOUNT_GM45 0x71040 | ||
1446 | #define PIPEB_FLIPCOUNT_GM45 0x71044 | ||
1447 | |||
1400 | 1448 | ||
1401 | /* Display B control */ | 1449 | /* Display B control */ |
1402 | #define DSPBCNTR 0x71180 | 1450 | #define DSPBCNTR 0x71180 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5ddc6e595c0c..5d84027ee8f3 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -244,6 +244,9 @@ int i915_save_state(struct drm_device *dev) | |||
244 | if (IS_I965G(dev) && IS_MOBILE(dev)) | 244 | if (IS_I965G(dev) && IS_MOBILE(dev)) |
245 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | 245 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); |
246 | 246 | ||
247 | /* Hardware status page */ | ||
248 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
249 | |||
247 | /* Display arbitration control */ | 250 | /* Display arbitration control */ |
248 | dev_priv->saveDSPARB = I915_READ(DSPARB); | 251 | dev_priv->saveDSPARB = I915_READ(DSPARB); |
249 | 252 | ||
@@ -373,6 +376,9 @@ int i915_restore_state(struct drm_device *dev) | |||
373 | if (IS_I965G(dev) && IS_MOBILE(dev)) | 376 | if (IS_I965G(dev) && IS_MOBILE(dev)) |
374 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | 377 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); |
375 | 378 | ||
379 | /* Hardware status page */ | ||
380 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
381 | |||
376 | /* Display arbitration */ | 382 | /* Display arbitration */ |
377 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); | 383 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); |
378 | 384 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c new file mode 100644 index 000000000000..4ca82a025525 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | * SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * | ||
26 | */ | ||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "i915_drm.h" | ||
30 | #include "i915_drv.h" | ||
31 | #include "intel_bios.h" | ||
32 | |||
33 | |||
34 | static void * | ||
35 | find_section(struct bdb_header *bdb, int section_id) | ||
36 | { | ||
37 | u8 *base = (u8 *)bdb; | ||
38 | int index = 0; | ||
39 | u16 total, current_size; | ||
40 | u8 current_id; | ||
41 | |||
42 | /* skip to first section */ | ||
43 | index += bdb->header_size; | ||
44 | total = bdb->bdb_size; | ||
45 | |||
46 | /* walk the sections looking for section_id */ | ||
47 | while (index < total) { | ||
48 | current_id = *(base + index); | ||
49 | index++; | ||
50 | current_size = *((u16 *)(base + index)); | ||
51 | index += 2; | ||
52 | if (current_id == section_id) | ||
53 | return base + index; | ||
54 | index += current_size; | ||
55 | } | ||
56 | |||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | /* Try to find panel data */ | ||
61 | static void | ||
62 | parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | ||
63 | { | ||
64 | struct bdb_lvds_options *lvds_options; | ||
65 | struct bdb_lvds_lfp_data *lvds_lfp_data; | ||
66 | struct bdb_lvds_lfp_data_entry *entry; | ||
67 | struct lvds_dvo_timing *dvo_timing; | ||
68 | struct drm_display_mode *panel_fixed_mode; | ||
69 | |||
70 | /* Defaults if we can't find VBT info */ | ||
71 | dev_priv->lvds_dither = 0; | ||
72 | dev_priv->lvds_vbt = 0; | ||
73 | |||
74 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); | ||
75 | if (!lvds_options) | ||
76 | return; | ||
77 | |||
78 | dev_priv->lvds_dither = lvds_options->pixel_dither; | ||
79 | if (lvds_options->panel_type == 0xff) | ||
80 | return; | ||
81 | |||
82 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); | ||
83 | if (!lvds_lfp_data) | ||
84 | return; | ||
85 | |||
86 | dev_priv->lvds_vbt = 1; | ||
87 | |||
88 | entry = &lvds_lfp_data->data[lvds_options->panel_type]; | ||
89 | dvo_timing = &entry->dvo_timing; | ||
90 | |||
91 | panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), | ||
92 | DRM_MEM_DRIVER); | ||
93 | |||
94 | panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | | ||
95 | dvo_timing->hactive_lo; | ||
96 | panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + | ||
97 | ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); | ||
98 | panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + | ||
99 | dvo_timing->hsync_pulse_width; | ||
100 | panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + | ||
101 | ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); | ||
102 | |||
103 | panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | | ||
104 | dvo_timing->vactive_lo; | ||
105 | panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + | ||
106 | dvo_timing->vsync_off; | ||
107 | panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + | ||
108 | dvo_timing->vsync_pulse_width; | ||
109 | panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + | ||
110 | ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); | ||
111 | panel_fixed_mode->clock = dvo_timing->clock * 10; | ||
112 | panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; | ||
113 | |||
114 | drm_mode_set_name(panel_fixed_mode); | ||
115 | |||
116 | dev_priv->vbt_mode = panel_fixed_mode; | ||
117 | |||
118 | DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); | ||
119 | drm_mode_debug_printmodeline(panel_fixed_mode); | ||
120 | |||
121 | return; | ||
122 | } | ||
123 | |||
124 | static void | ||
125 | parse_general_features(struct drm_i915_private *dev_priv, | ||
126 | struct bdb_header *bdb) | ||
127 | { | ||
128 | struct bdb_general_features *general; | ||
129 | |||
130 | /* Set sensible defaults in case we can't find the general block */ | ||
131 | dev_priv->int_tv_support = 1; | ||
132 | dev_priv->int_crt_support = 1; | ||
133 | |||
134 | general = find_section(bdb, BDB_GENERAL_FEATURES); | ||
135 | if (general) { | ||
136 | dev_priv->int_tv_support = general->int_tv_support; | ||
137 | dev_priv->int_crt_support = general->int_crt_support; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /** | ||
142 | * intel_init_bios - initialize VBIOS settings & find VBT | ||
143 | * @dev: DRM device | ||
144 | * | ||
145 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers | ||
146 | * to appropriate values. | ||
147 | * | ||
148 | * VBT existence is a sanity check that is relied on by other i830_bios.c code. | ||
149 | * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may | ||
150 | * feed an updated VBT back through that, compared to what we'll fetch using | ||
151 | * this method of groping around in the BIOS data. | ||
152 | * | ||
153 | * Returns 0 on success, nonzero on failure. | ||
154 | */ | ||
155 | bool | ||
156 | intel_init_bios(struct drm_device *dev) | ||
157 | { | ||
158 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
159 | struct pci_dev *pdev = dev->pdev; | ||
160 | struct vbt_header *vbt = NULL; | ||
161 | struct bdb_header *bdb; | ||
162 | u8 __iomem *bios; | ||
163 | size_t size; | ||
164 | int i; | ||
165 | |||
166 | bios = pci_map_rom(pdev, &size); | ||
167 | if (!bios) | ||
168 | return -1; | ||
169 | |||
170 | /* Scour memory looking for the VBT signature */ | ||
171 | for (i = 0; i + 4 < size; i++) { | ||
172 | if (!memcmp(bios + i, "$VBT", 4)) { | ||
173 | vbt = (struct vbt_header *)(bios + i); | ||
174 | break; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | if (!vbt) { | ||
179 | DRM_ERROR("VBT signature missing\n"); | ||
180 | pci_unmap_rom(pdev, bios); | ||
181 | return -1; | ||
182 | } | ||
183 | |||
184 | bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); | ||
185 | |||
186 | /* Grab useful general definitions */ | ||
187 | parse_general_features(dev_priv, bdb); | ||
188 | parse_panel_data(dev_priv, bdb); | ||
189 | |||
190 | pci_unmap_rom(pdev, bios); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h new file mode 100644 index 000000000000..5ea715ace3a0 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -0,0 +1,405 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | * SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #ifndef _I830_BIOS_H_ | ||
29 | #define _I830_BIOS_H_ | ||
30 | |||
31 | #include "drmP.h" | ||
32 | |||
33 | struct vbt_header { | ||
34 | u8 signature[20]; /**< Always starts with 'VBT$' */ | ||
35 | u16 version; /**< decimal */ | ||
36 | u16 header_size; /**< in bytes */ | ||
37 | u16 vbt_size; /**< in bytes */ | ||
38 | u8 vbt_checksum; | ||
39 | u8 reserved0; | ||
40 | u32 bdb_offset; /**< from beginning of VBT */ | ||
41 | u32 aim_offset[4]; /**< from beginning of VBT */ | ||
42 | } __attribute__((packed)); | ||
43 | |||
44 | struct bdb_header { | ||
45 | u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ | ||
46 | u16 version; /**< decimal */ | ||
47 | u16 header_size; /**< in bytes */ | ||
48 | u16 bdb_size; /**< in bytes */ | ||
49 | }; | ||
50 | |||
51 | /* strictly speaking, this is a "skip" block, but it has interesting info */ | ||
52 | struct vbios_data { | ||
53 | u8 type; /* 0 == desktop, 1 == mobile */ | ||
54 | u8 relstage; | ||
55 | u8 chipset; | ||
56 | u8 lvds_present:1; | ||
57 | u8 tv_present:1; | ||
58 | u8 rsvd2:6; /* finish byte */ | ||
59 | u8 rsvd3[4]; | ||
60 | u8 signon[155]; | ||
61 | u8 copyright[61]; | ||
62 | u16 code_segment; | ||
63 | u8 dos_boot_mode; | ||
64 | u8 bandwidth_percent; | ||
65 | u8 rsvd4; /* popup memory size */ | ||
66 | u8 resize_pci_bios; | ||
67 | u8 rsvd5; /* is crt already on ddc2 */ | ||
68 | } __attribute__((packed)); | ||
69 | |||
70 | /* | ||
71 | * There are several types of BIOS data blocks (BDBs), each block has | ||
72 | * an ID and size in the first 3 bytes (ID in first, size in next 2). | ||
73 | * Known types are listed below. | ||
74 | */ | ||
75 | #define BDB_GENERAL_FEATURES 1 | ||
76 | #define BDB_GENERAL_DEFINITIONS 2 | ||
77 | #define BDB_OLD_TOGGLE_LIST 3 | ||
78 | #define BDB_MODE_SUPPORT_LIST 4 | ||
79 | #define BDB_GENERIC_MODE_TABLE 5 | ||
80 | #define BDB_EXT_MMIO_REGS 6 | ||
81 | #define BDB_SWF_IO 7 | ||
82 | #define BDB_SWF_MMIO 8 | ||
83 | #define BDB_DOT_CLOCK_TABLE 9 | ||
84 | #define BDB_MODE_REMOVAL_TABLE 10 | ||
85 | #define BDB_CHILD_DEVICE_TABLE 11 | ||
86 | #define BDB_DRIVER_FEATURES 12 | ||
87 | #define BDB_DRIVER_PERSISTENCE 13 | ||
88 | #define BDB_EXT_TABLE_PTRS 14 | ||
89 | #define BDB_DOT_CLOCK_OVERRIDE 15 | ||
90 | #define BDB_DISPLAY_SELECT 16 | ||
91 | /* 17 rsvd */ | ||
92 | #define BDB_DRIVER_ROTATION 18 | ||
93 | #define BDB_DISPLAY_REMOVE 19 | ||
94 | #define BDB_OEM_CUSTOM 20 | ||
95 | #define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ | ||
96 | #define BDB_SDVO_LVDS_OPTIONS 22 | ||
97 | #define BDB_SDVO_PANEL_DTDS 23 | ||
98 | #define BDB_SDVO_LVDS_PNP_IDS 24 | ||
99 | #define BDB_SDVO_LVDS_POWER_SEQ 25 | ||
100 | #define BDB_TV_OPTIONS 26 | ||
101 | #define BDB_LVDS_OPTIONS 40 | ||
102 | #define BDB_LVDS_LFP_DATA_PTRS 41 | ||
103 | #define BDB_LVDS_LFP_DATA 42 | ||
104 | #define BDB_LVDS_BACKLIGHT 43 | ||
105 | #define BDB_LVDS_POWER 44 | ||
106 | #define BDB_SKIP 254 /* VBIOS private block, ignore */ | ||
107 | |||
108 | struct bdb_general_features { | ||
109 | /* bits 1 */ | ||
110 | u8 panel_fitting:2; | ||
111 | u8 flexaim:1; | ||
112 | u8 msg_enable:1; | ||
113 | u8 clear_screen:3; | ||
114 | u8 color_flip:1; | ||
115 | |||
116 | /* bits 2 */ | ||
117 | u8 download_ext_vbt:1; | ||
118 | u8 enable_ssc:1; | ||
119 | u8 ssc_freq:1; | ||
120 | u8 enable_lfp_on_override:1; | ||
121 | u8 disable_ssc_ddt:1; | ||
122 | u8 rsvd8:3; /* finish byte */ | ||
123 | |||
124 | /* bits 3 */ | ||
125 | u8 disable_smooth_vision:1; | ||
126 | u8 single_dvi:1; | ||
127 | u8 rsvd9:6; /* finish byte */ | ||
128 | |||
129 | /* bits 4 */ | ||
130 | u8 legacy_monitor_detect; | ||
131 | |||
132 | /* bits 5 */ | ||
133 | u8 int_crt_support:1; | ||
134 | u8 int_tv_support:1; | ||
135 | u8 rsvd11:6; /* finish byte */ | ||
136 | } __attribute__((packed)); | ||
137 | |||
138 | struct bdb_general_definitions { | ||
139 | /* DDC GPIO */ | ||
140 | u8 crt_ddc_gmbus_pin; | ||
141 | |||
142 | /* DPMS bits */ | ||
143 | u8 dpms_acpi:1; | ||
144 | u8 skip_boot_crt_detect:1; | ||
145 | u8 dpms_aim:1; | ||
146 | u8 rsvd1:5; /* finish byte */ | ||
147 | |||
148 | /* boot device bits */ | ||
149 | u8 boot_display[2]; | ||
150 | u8 child_dev_size; | ||
151 | |||
152 | /* device info */ | ||
153 | u8 tv_or_lvds_info[33]; | ||
154 | u8 dev1[33]; | ||
155 | u8 dev2[33]; | ||
156 | u8 dev3[33]; | ||
157 | u8 dev4[33]; | ||
158 | /* may be another device block here on some platforms */ | ||
159 | }; | ||
160 | |||
161 | struct bdb_lvds_options { | ||
162 | u8 panel_type; | ||
163 | u8 rsvd1; | ||
164 | /* LVDS capabilities, stored in a dword */ | ||
165 | u8 rsvd2:1; | ||
166 | u8 lvds_edid:1; | ||
167 | u8 pixel_dither:1; | ||
168 | u8 pfit_ratio_auto:1; | ||
169 | u8 pfit_gfx_mode_enhanced:1; | ||
170 | u8 pfit_text_mode_enhanced:1; | ||
171 | u8 pfit_mode:2; | ||
172 | u8 rsvd4; | ||
173 | } __attribute__((packed)); | ||
174 | |||
175 | /* LFP pointer table contains entries to the struct below */ | ||
176 | struct bdb_lvds_lfp_data_ptr { | ||
177 | u16 fp_timing_offset; /* offsets are from start of bdb */ | ||
178 | u8 fp_table_size; | ||
179 | u16 dvo_timing_offset; | ||
180 | u8 dvo_table_size; | ||
181 | u16 panel_pnp_id_offset; | ||
182 | u8 pnp_table_size; | ||
183 | } __attribute__((packed)); | ||
184 | |||
185 | struct bdb_lvds_lfp_data_ptrs { | ||
186 | u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ | ||
187 | struct bdb_lvds_lfp_data_ptr ptr[16]; | ||
188 | } __attribute__((packed)); | ||
189 | |||
190 | /* LFP data has 3 blocks per entry */ | ||
191 | struct lvds_fp_timing { | ||
192 | u16 x_res; | ||
193 | u16 y_res; | ||
194 | u32 lvds_reg; | ||
195 | u32 lvds_reg_val; | ||
196 | u32 pp_on_reg; | ||
197 | u32 pp_on_reg_val; | ||
198 | u32 pp_off_reg; | ||
199 | u32 pp_off_reg_val; | ||
200 | u32 pp_cycle_reg; | ||
201 | u32 pp_cycle_reg_val; | ||
202 | u32 pfit_reg; | ||
203 | u32 pfit_reg_val; | ||
204 | u16 terminator; | ||
205 | } __attribute__((packed)); | ||
206 | |||
207 | struct lvds_dvo_timing { | ||
208 | u16 clock; /**< In 10khz */ | ||
209 | u8 hactive_lo; | ||
210 | u8 hblank_lo; | ||
211 | u8 hblank_hi:4; | ||
212 | u8 hactive_hi:4; | ||
213 | u8 vactive_lo; | ||
214 | u8 vblank_lo; | ||
215 | u8 vblank_hi:4; | ||
216 | u8 vactive_hi:4; | ||
217 | u8 hsync_off_lo; | ||
218 | u8 hsync_pulse_width; | ||
219 | u8 vsync_pulse_width:4; | ||
220 | u8 vsync_off:4; | ||
221 | u8 rsvd0:6; | ||
222 | u8 hsync_off_hi:2; | ||
223 | u8 h_image; | ||
224 | u8 v_image; | ||
225 | u8 max_hv; | ||
226 | u8 h_border; | ||
227 | u8 v_border; | ||
228 | u8 rsvd1:3; | ||
229 | u8 digital:2; | ||
230 | u8 vsync_positive:1; | ||
231 | u8 hsync_positive:1; | ||
232 | u8 rsvd2:1; | ||
233 | } __attribute__((packed)); | ||
234 | |||
235 | struct lvds_pnp_id { | ||
236 | u16 mfg_name; | ||
237 | u16 product_code; | ||
238 | u32 serial; | ||
239 | u8 mfg_week; | ||
240 | u8 mfg_year; | ||
241 | } __attribute__((packed)); | ||
242 | |||
243 | struct bdb_lvds_lfp_data_entry { | ||
244 | struct lvds_fp_timing fp_timing; | ||
245 | struct lvds_dvo_timing dvo_timing; | ||
246 | struct lvds_pnp_id pnp_id; | ||
247 | } __attribute__((packed)); | ||
248 | |||
249 | struct bdb_lvds_lfp_data { | ||
250 | struct bdb_lvds_lfp_data_entry data[16]; | ||
251 | } __attribute__((packed)); | ||
252 | |||
253 | struct aimdb_header { | ||
254 | char signature[16]; | ||
255 | char oem_device[20]; | ||
256 | u16 aimdb_version; | ||
257 | u16 aimdb_header_size; | ||
258 | u16 aimdb_size; | ||
259 | } __attribute__((packed)); | ||
260 | |||
261 | struct aimdb_block { | ||
262 | u8 aimdb_id; | ||
263 | u16 aimdb_size; | ||
264 | } __attribute__((packed)); | ||
265 | |||
266 | struct vch_panel_data { | ||
267 | u16 fp_timing_offset; | ||
268 | u8 fp_timing_size; | ||
269 | u16 dvo_timing_offset; | ||
270 | u8 dvo_timing_size; | ||
271 | u16 text_fitting_offset; | ||
272 | u8 text_fitting_size; | ||
273 | u16 graphics_fitting_offset; | ||
274 | u8 graphics_fitting_size; | ||
275 | } __attribute__((packed)); | ||
276 | |||
277 | struct vch_bdb_22 { | ||
278 | struct aimdb_block aimdb_block; | ||
279 | struct vch_panel_data panels[16]; | ||
280 | } __attribute__((packed)); | ||
281 | |||
282 | bool intel_init_bios(struct drm_device *dev); | ||
283 | |||
284 | /* | ||
285 | * Driver<->VBIOS interaction occurs through scratch bits in | ||
286 | * GR18 & SWF*. | ||
287 | */ | ||
288 | |||
289 | /* GR18 bits are set on display switch and hotkey events */ | ||
290 | #define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ | ||
291 | #define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ | ||
292 | #define GR18_HK_NONE (0x0<<3) | ||
293 | #define GR18_HK_LFP_STRETCH (0x1<<3) | ||
294 | #define GR18_HK_TOGGLE_DISP (0x2<<3) | ||
295 | #define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ | ||
296 | #define GR18_HK_POPUP_DISABLED (0x6<<3) | ||
297 | #define GR18_HK_POPUP_ENABLED (0x7<<3) | ||
298 | #define GR18_HK_PFIT (0x8<<3) | ||
299 | #define GR18_HK_APM_CHANGE (0xa<<3) | ||
300 | #define GR18_HK_MULTIPLE (0xc<<3) | ||
301 | #define GR18_USER_INT_EN (1<<2) | ||
302 | #define GR18_A0000_FLUSH_EN (1<<1) | ||
303 | #define GR18_SMM_EN (1<<0) | ||
304 | |||
305 | /* Set by driver, cleared by VBIOS */ | ||
306 | #define SWF00_YRES_SHIFT 16 | ||
307 | #define SWF00_XRES_SHIFT 0 | ||
308 | #define SWF00_RES_MASK 0xffff | ||
309 | |||
310 | /* Set by VBIOS at boot time and driver at runtime */ | ||
311 | #define SWF01_TV2_FORMAT_SHIFT 8 | ||
312 | #define SWF01_TV1_FORMAT_SHIFT 0 | ||
313 | #define SWF01_TV_FORMAT_MASK 0xffff | ||
314 | |||
315 | #define SWF10_VBIOS_BLC_I2C_EN (1<<29) | ||
316 | #define SWF10_GTT_OVERRIDE_EN (1<<28) | ||
317 | #define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ | ||
318 | #define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) | ||
319 | #define SWF10_OLD_TOGGLE 0x0 | ||
320 | #define SWF10_TOGGLE_LIST_1 0x1 | ||
321 | #define SWF10_TOGGLE_LIST_2 0x2 | ||
322 | #define SWF10_TOGGLE_LIST_3 0x3 | ||
323 | #define SWF10_TOGGLE_LIST_4 0x4 | ||
324 | #define SWF10_PANNING_EN (1<<23) | ||
325 | #define SWF10_DRIVER_LOADED (1<<22) | ||
326 | #define SWF10_EXTENDED_DESKTOP (1<<21) | ||
327 | #define SWF10_EXCLUSIVE_MODE (1<<20) | ||
328 | #define SWF10_OVERLAY_EN (1<<19) | ||
329 | #define SWF10_PLANEB_HOLDOFF (1<<18) | ||
330 | #define SWF10_PLANEA_HOLDOFF (1<<17) | ||
331 | #define SWF10_VGA_HOLDOFF (1<<16) | ||
332 | #define SWF10_ACTIVE_DISP_MASK 0xffff | ||
333 | #define SWF10_PIPEB_LFP2 (1<<15) | ||
334 | #define SWF10_PIPEB_EFP2 (1<<14) | ||
335 | #define SWF10_PIPEB_TV2 (1<<13) | ||
336 | #define SWF10_PIPEB_CRT2 (1<<12) | ||
337 | #define SWF10_PIPEB_LFP (1<<11) | ||
338 | #define SWF10_PIPEB_EFP (1<<10) | ||
339 | #define SWF10_PIPEB_TV (1<<9) | ||
340 | #define SWF10_PIPEB_CRT (1<<8) | ||
341 | #define SWF10_PIPEA_LFP2 (1<<7) | ||
342 | #define SWF10_PIPEA_EFP2 (1<<6) | ||
343 | #define SWF10_PIPEA_TV2 (1<<5) | ||
344 | #define SWF10_PIPEA_CRT2 (1<<4) | ||
345 | #define SWF10_PIPEA_LFP (1<<3) | ||
346 | #define SWF10_PIPEA_EFP (1<<2) | ||
347 | #define SWF10_PIPEA_TV (1<<1) | ||
348 | #define SWF10_PIPEA_CRT (1<<0) | ||
349 | |||
350 | #define SWF11_MEMORY_SIZE_SHIFT 16 | ||
351 | #define SWF11_SV_TEST_EN (1<<15) | ||
352 | #define SWF11_IS_AGP (1<<14) | ||
353 | #define SWF11_DISPLAY_HOLDOFF (1<<13) | ||
354 | #define SWF11_DPMS_REDUCED (1<<12) | ||
355 | #define SWF11_IS_VBE_MODE (1<<11) | ||
356 | #define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ | ||
357 | #define SWF11_DPMS_MASK 0x07 | ||
358 | #define SWF11_DPMS_OFF (1<<2) | ||
359 | #define SWF11_DPMS_SUSPEND (1<<1) | ||
360 | #define SWF11_DPMS_STANDBY (1<<0) | ||
361 | #define SWF11_DPMS_ON 0 | ||
362 | |||
363 | #define SWF14_GFX_PFIT_EN (1<<31) | ||
364 | #define SWF14_TEXT_PFIT_EN (1<<30) | ||
365 | #define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ | ||
366 | #define SWF14_POPUP_EN (1<<28) | ||
367 | #define SWF14_DISPLAY_HOLDOFF (1<<27) | ||
368 | #define SWF14_DISP_DETECT_EN (1<<26) | ||
369 | #define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ | ||
370 | #define SWF14_DRIVER_STATUS (1<<24) | ||
371 | #define SWF14_OS_TYPE_WIN9X (1<<23) | ||
372 | #define SWF14_OS_TYPE_WINNT (1<<22) | ||
373 | /* 21:19 rsvd */ | ||
374 | #define SWF14_PM_TYPE_MASK 0x00070000 | ||
375 | #define SWF14_PM_ACPI_VIDEO (0x4 << 16) | ||
376 | #define SWF14_PM_ACPI (0x3 << 16) | ||
377 | #define SWF14_PM_APM_12 (0x2 << 16) | ||
378 | #define SWF14_PM_APM_11 (0x1 << 16) | ||
379 | #define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ | ||
380 | /* if GR18 indicates a display switch */ | ||
381 | #define SWF14_DS_PIPEB_LFP2_EN (1<<15) | ||
382 | #define SWF14_DS_PIPEB_EFP2_EN (1<<14) | ||
383 | #define SWF14_DS_PIPEB_TV2_EN (1<<13) | ||
384 | #define SWF14_DS_PIPEB_CRT2_EN (1<<12) | ||
385 | #define SWF14_DS_PIPEB_LFP_EN (1<<11) | ||
386 | #define SWF14_DS_PIPEB_EFP_EN (1<<10) | ||
387 | #define SWF14_DS_PIPEB_TV_EN (1<<9) | ||
388 | #define SWF14_DS_PIPEB_CRT_EN (1<<8) | ||
389 | #define SWF14_DS_PIPEA_LFP2_EN (1<<7) | ||
390 | #define SWF14_DS_PIPEA_EFP2_EN (1<<6) | ||
391 | #define SWF14_DS_PIPEA_TV2_EN (1<<5) | ||
392 | #define SWF14_DS_PIPEA_CRT2_EN (1<<4) | ||
393 | #define SWF14_DS_PIPEA_LFP_EN (1<<3) | ||
394 | #define SWF14_DS_PIPEA_EFP_EN (1<<2) | ||
395 | #define SWF14_DS_PIPEA_TV_EN (1<<1) | ||
396 | #define SWF14_DS_PIPEA_CRT_EN (1<<0) | ||
397 | /* if GR18 indicates a panel fitting request */ | ||
398 | #define SWF14_PFIT_EN (1<<0) /* 0 means disable */ | ||
399 | /* if GR18 indicates an APM change request */ | ||
400 | #define SWF14_APM_HIBERNATE 0x4 | ||
401 | #define SWF14_APM_SUSPEND 0x3 | ||
402 | #define SWF14_APM_STANDBY 0x1 | ||
403 | #define SWF14_APM_RESTORE 0x0 | ||
404 | |||
405 | #endif /* _I830_BIOS_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c new file mode 100644 index 000000000000..dcaed3466e83 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2007 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | */ | ||
26 | |||
27 | #include <linux/i2c.h> | ||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | #include "drm_crtc.h" | ||
31 | #include "drm_crtc_helper.h" | ||
32 | #include "intel_drv.h" | ||
33 | #include "i915_drm.h" | ||
34 | #include "i915_drv.h" | ||
35 | |||
36 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | ||
37 | { | ||
38 | struct drm_device *dev = encoder->dev; | ||
39 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
40 | u32 temp; | ||
41 | |||
42 | temp = I915_READ(ADPA); | ||
43 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); | ||
44 | temp &= ~ADPA_DAC_ENABLE; | ||
45 | |||
46 | switch(mode) { | ||
47 | case DRM_MODE_DPMS_ON: | ||
48 | temp |= ADPA_DAC_ENABLE; | ||
49 | break; | ||
50 | case DRM_MODE_DPMS_STANDBY: | ||
51 | temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; | ||
52 | break; | ||
53 | case DRM_MODE_DPMS_SUSPEND: | ||
54 | temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; | ||
55 | break; | ||
56 | case DRM_MODE_DPMS_OFF: | ||
57 | temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; | ||
58 | break; | ||
59 | } | ||
60 | |||
61 | I915_WRITE(ADPA, temp); | ||
62 | } | ||
63 | |||
64 | static int intel_crt_mode_valid(struct drm_connector *connector, | ||
65 | struct drm_display_mode *mode) | ||
66 | { | ||
67 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
68 | return MODE_NO_DBLESCAN; | ||
69 | |||
70 | if (mode->clock > 400000 || mode->clock < 25000) | ||
71 | return MODE_CLOCK_RANGE; | ||
72 | |||
73 | return MODE_OK; | ||
74 | } | ||
75 | |||
76 | static bool intel_crt_mode_fixup(struct drm_encoder *encoder, | ||
77 | struct drm_display_mode *mode, | ||
78 | struct drm_display_mode *adjusted_mode) | ||
79 | { | ||
80 | return true; | ||
81 | } | ||
82 | |||
83 | static void intel_crt_mode_set(struct drm_encoder *encoder, | ||
84 | struct drm_display_mode *mode, | ||
85 | struct drm_display_mode *adjusted_mode) | ||
86 | { | ||
87 | |||
88 | struct drm_device *dev = encoder->dev; | ||
89 | struct drm_crtc *crtc = encoder->crtc; | ||
90 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
91 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
92 | int dpll_md_reg; | ||
93 | u32 adpa, dpll_md; | ||
94 | |||
95 | if (intel_crtc->pipe == 0) | ||
96 | dpll_md_reg = DPLL_A_MD; | ||
97 | else | ||
98 | dpll_md_reg = DPLL_B_MD; | ||
99 | |||
100 | /* | ||
101 | * Disable separate mode multiplier used when cloning SDVO to CRT | ||
102 | * XXX this needs to be adjusted when we really are cloning | ||
103 | */ | ||
104 | if (IS_I965G(dev)) { | ||
105 | dpll_md = I915_READ(dpll_md_reg); | ||
106 | I915_WRITE(dpll_md_reg, | ||
107 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | ||
108 | } | ||
109 | |||
110 | adpa = 0; | ||
111 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
112 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; | ||
113 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
114 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | ||
115 | |||
116 | if (intel_crtc->pipe == 0) | ||
117 | adpa |= ADPA_PIPE_A_SELECT; | ||
118 | else | ||
119 | adpa |= ADPA_PIPE_B_SELECT; | ||
120 | |||
121 | I915_WRITE(ADPA, adpa); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. | ||
126 | * | ||
127 | * Not for i915G/i915GM | ||
128 | * | ||
129 | * \return true if CRT is connected. | ||
130 | * \return false if CRT is disconnected. | ||
131 | */ | ||
132 | static bool intel_crt_detect_hotplug(struct drm_connector *connector) | ||
133 | { | ||
134 | struct drm_device *dev = connector->dev; | ||
135 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
136 | u32 temp; | ||
137 | |||
138 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
139 | |||
140 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
141 | |||
142 | I915_WRITE(PORT_HOTPLUG_EN, | ||
143 | temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); | ||
144 | |||
145 | do { | ||
146 | if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) | ||
147 | break; | ||
148 | msleep(1); | ||
149 | } while (time_after(timeout, jiffies)); | ||
150 | |||
151 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == | ||
152 | CRT_HOTPLUG_MONITOR_COLOR) | ||
153 | return true; | ||
154 | |||
155 | return false; | ||
156 | } | ||
157 | |||
158 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | ||
159 | { | ||
160 | struct intel_output *intel_output = to_intel_output(connector); | ||
161 | |||
162 | /* CRT should always be at 0, but check anyway */ | ||
163 | if (intel_output->type != INTEL_OUTPUT_ANALOG) | ||
164 | return false; | ||
165 | |||
166 | return intel_ddc_probe(intel_output); | ||
167 | } | ||
168 | |||
169 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | ||
170 | { | ||
171 | struct drm_device *dev = connector->dev; | ||
172 | |||
173 | if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { | ||
174 | if (intel_crt_detect_hotplug(connector)) | ||
175 | return connector_status_connected; | ||
176 | else | ||
177 | return connector_status_disconnected; | ||
178 | } | ||
179 | |||
180 | if (intel_crt_detect_ddc(connector)) | ||
181 | return connector_status_connected; | ||
182 | |||
183 | /* TODO use load detect */ | ||
184 | return connector_status_unknown; | ||
185 | } | ||
186 | |||
187 | static void intel_crt_destroy(struct drm_connector *connector) | ||
188 | { | ||
189 | struct intel_output *intel_output = to_intel_output(connector); | ||
190 | |||
191 | intel_i2c_destroy(intel_output->ddc_bus); | ||
192 | drm_sysfs_connector_remove(connector); | ||
193 | drm_connector_cleanup(connector); | ||
194 | kfree(connector); | ||
195 | } | ||
196 | |||
197 | static int intel_crt_get_modes(struct drm_connector *connector) | ||
198 | { | ||
199 | struct intel_output *intel_output = to_intel_output(connector); | ||
200 | return intel_ddc_get_modes(intel_output); | ||
201 | } | ||
202 | |||
203 | static int intel_crt_set_property(struct drm_connector *connector, | ||
204 | struct drm_property *property, | ||
205 | uint64_t value) | ||
206 | { | ||
207 | struct drm_device *dev = connector->dev; | ||
208 | |||
209 | if (property == dev->mode_config.dpms_property && connector->encoder) | ||
210 | intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf)); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Routines for controlling stuff on the analog port | ||
217 | */ | ||
218 | |||
219 | static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { | ||
220 | .dpms = intel_crt_dpms, | ||
221 | .mode_fixup = intel_crt_mode_fixup, | ||
222 | .prepare = intel_encoder_prepare, | ||
223 | .commit = intel_encoder_commit, | ||
224 | .mode_set = intel_crt_mode_set, | ||
225 | }; | ||
226 | |||
227 | static const struct drm_connector_funcs intel_crt_connector_funcs = { | ||
228 | .detect = intel_crt_detect, | ||
229 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
230 | .destroy = intel_crt_destroy, | ||
231 | .set_property = intel_crt_set_property, | ||
232 | }; | ||
233 | |||
234 | static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { | ||
235 | .mode_valid = intel_crt_mode_valid, | ||
236 | .get_modes = intel_crt_get_modes, | ||
237 | .best_encoder = intel_best_encoder, | ||
238 | }; | ||
239 | |||
240 | static void intel_crt_enc_destroy(struct drm_encoder *encoder) | ||
241 | { | ||
242 | drm_encoder_cleanup(encoder); | ||
243 | } | ||
244 | |||
245 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { | ||
246 | .destroy = intel_crt_enc_destroy, | ||
247 | }; | ||
248 | |||
249 | void intel_crt_init(struct drm_device *dev) | ||
250 | { | ||
251 | struct drm_connector *connector; | ||
252 | struct intel_output *intel_output; | ||
253 | |||
254 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | ||
255 | if (!intel_output) | ||
256 | return; | ||
257 | |||
258 | connector = &intel_output->base; | ||
259 | drm_connector_init(dev, &intel_output->base, | ||
260 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | ||
261 | |||
262 | drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, | ||
263 | DRM_MODE_ENCODER_DAC); | ||
264 | |||
265 | drm_mode_connector_attach_encoder(&intel_output->base, | ||
266 | &intel_output->enc); | ||
267 | |||
268 | /* Set up the DDC bus. */ | ||
269 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); | ||
270 | if (!intel_output->ddc_bus) { | ||
271 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | ||
272 | "failed.\n"); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | intel_output->type = INTEL_OUTPUT_ANALOG; | ||
277 | connector->interlace_allowed = 0; | ||
278 | connector->doublescan_allowed = 0; | ||
279 | |||
280 | drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); | ||
281 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | ||
282 | |||
283 | drm_sysfs_connector_add(connector); | ||
284 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c new file mode 100644 index 000000000000..bbdd72909a11 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -0,0 +1,1667 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2007 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | */ | ||
26 | |||
27 | #include <linux/i2c.h> | ||
28 | #include "drmP.h" | ||
29 | #include "intel_drv.h" | ||
30 | #include "i915_drm.h" | ||
31 | #include "i915_drv.h" | ||
32 | |||
33 | #include "drm_crtc_helper.h" | ||
34 | |||
35 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | ||
36 | |||
37 | typedef struct { | ||
38 | /* given values */ | ||
39 | int n; | ||
40 | int m1, m2; | ||
41 | int p1, p2; | ||
42 | /* derived values */ | ||
43 | int dot; | ||
44 | int vco; | ||
45 | int m; | ||
46 | int p; | ||
47 | } intel_clock_t; | ||
48 | |||
49 | typedef struct { | ||
50 | int min, max; | ||
51 | } intel_range_t; | ||
52 | |||
53 | typedef struct { | ||
54 | int dot_limit; | ||
55 | int p2_slow, p2_fast; | ||
56 | } intel_p2_t; | ||
57 | |||
58 | #define INTEL_P2_NUM 2 | ||
59 | |||
60 | typedef struct { | ||
61 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | ||
62 | intel_p2_t p2; | ||
63 | } intel_limit_t; | ||
64 | |||
65 | #define I8XX_DOT_MIN 25000 | ||
66 | #define I8XX_DOT_MAX 350000 | ||
67 | #define I8XX_VCO_MIN 930000 | ||
68 | #define I8XX_VCO_MAX 1400000 | ||
69 | #define I8XX_N_MIN 3 | ||
70 | #define I8XX_N_MAX 16 | ||
71 | #define I8XX_M_MIN 96 | ||
72 | #define I8XX_M_MAX 140 | ||
73 | #define I8XX_M1_MIN 18 | ||
74 | #define I8XX_M1_MAX 26 | ||
75 | #define I8XX_M2_MIN 6 | ||
76 | #define I8XX_M2_MAX 16 | ||
77 | #define I8XX_P_MIN 4 | ||
78 | #define I8XX_P_MAX 128 | ||
79 | #define I8XX_P1_MIN 2 | ||
80 | #define I8XX_P1_MAX 33 | ||
81 | #define I8XX_P1_LVDS_MIN 1 | ||
82 | #define I8XX_P1_LVDS_MAX 6 | ||
83 | #define I8XX_P2_SLOW 4 | ||
84 | #define I8XX_P2_FAST 2 | ||
85 | #define I8XX_P2_LVDS_SLOW 14 | ||
86 | #define I8XX_P2_LVDS_FAST 14 /* No fast option */ | ||
87 | #define I8XX_P2_SLOW_LIMIT 165000 | ||
88 | |||
89 | #define I9XX_DOT_MIN 20000 | ||
90 | #define I9XX_DOT_MAX 400000 | ||
91 | #define I9XX_VCO_MIN 1400000 | ||
92 | #define I9XX_VCO_MAX 2800000 | ||
93 | #define I9XX_N_MIN 3 | ||
94 | #define I9XX_N_MAX 8 | ||
95 | #define I9XX_M_MIN 70 | ||
96 | #define I9XX_M_MAX 120 | ||
97 | #define I9XX_M1_MIN 10 | ||
98 | #define I9XX_M1_MAX 20 | ||
99 | #define I9XX_M2_MIN 5 | ||
100 | #define I9XX_M2_MAX 9 | ||
101 | #define I9XX_P_SDVO_DAC_MIN 5 | ||
102 | #define I9XX_P_SDVO_DAC_MAX 80 | ||
103 | #define I9XX_P_LVDS_MIN 7 | ||
104 | #define I9XX_P_LVDS_MAX 98 | ||
105 | #define I9XX_P1_MIN 1 | ||
106 | #define I9XX_P1_MAX 8 | ||
107 | #define I9XX_P2_SDVO_DAC_SLOW 10 | ||
108 | #define I9XX_P2_SDVO_DAC_FAST 5 | ||
109 | #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 | ||
110 | #define I9XX_P2_LVDS_SLOW 14 | ||
111 | #define I9XX_P2_LVDS_FAST 7 | ||
112 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | ||
113 | |||
114 | #define INTEL_LIMIT_I8XX_DVO_DAC 0 | ||
115 | #define INTEL_LIMIT_I8XX_LVDS 1 | ||
116 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 | ||
117 | #define INTEL_LIMIT_I9XX_LVDS 3 | ||
118 | |||
119 | static const intel_limit_t intel_limits[] = { | ||
120 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ | ||
121 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | ||
122 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | ||
123 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | ||
124 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | ||
125 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | ||
126 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | ||
127 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | ||
128 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | ||
129 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | ||
130 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | ||
131 | }, | ||
132 | { /* INTEL_LIMIT_I8XX_LVDS */ | ||
133 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | ||
134 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | ||
135 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | ||
136 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | ||
137 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | ||
138 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | ||
139 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | ||
140 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | ||
141 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | ||
142 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | ||
143 | }, | ||
144 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ | ||
145 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | ||
146 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | ||
147 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | ||
148 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | ||
149 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | ||
150 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | ||
151 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | ||
152 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
153 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | ||
154 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | ||
155 | }, | ||
156 | { /* INTEL_LIMIT_I9XX_LVDS */ | ||
157 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | ||
158 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | ||
159 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | ||
160 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | ||
161 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | ||
162 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | ||
163 | .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, | ||
164 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
165 | /* The single-channel range is 25-112Mhz, and dual-channel | ||
166 | * is 80-224Mhz. Prefer single channel as much as possible. | ||
167 | */ | ||
168 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | ||
169 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | ||
170 | }, | ||
171 | }; | ||
172 | |||
173 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | ||
174 | { | ||
175 | struct drm_device *dev = crtc->dev; | ||
176 | const intel_limit_t *limit; | ||
177 | |||
178 | if (IS_I9XX(dev)) { | ||
179 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
180 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; | ||
181 | else | ||
182 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | ||
183 | } else { | ||
184 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
185 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; | ||
186 | else | ||
187 | limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; | ||
188 | } | ||
189 | return limit; | ||
190 | } | ||
191 | |||
192 | /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ | ||
193 | |||
194 | static void i8xx_clock(int refclk, intel_clock_t *clock) | ||
195 | { | ||
196 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | ||
197 | clock->p = clock->p1 * clock->p2; | ||
198 | clock->vco = refclk * clock->m / (clock->n + 2); | ||
199 | clock->dot = clock->vco / clock->p; | ||
200 | } | ||
201 | |||
202 | /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ | ||
203 | |||
204 | static void i9xx_clock(int refclk, intel_clock_t *clock) | ||
205 | { | ||
206 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | ||
207 | clock->p = clock->p1 * clock->p2; | ||
208 | clock->vco = refclk * clock->m / (clock->n + 2); | ||
209 | clock->dot = clock->vco / clock->p; | ||
210 | } | ||
211 | |||
212 | static void intel_clock(struct drm_device *dev, int refclk, | ||
213 | intel_clock_t *clock) | ||
214 | { | ||
215 | if (IS_I9XX(dev)) | ||
216 | i9xx_clock (refclk, clock); | ||
217 | else | ||
218 | i8xx_clock (refclk, clock); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * Returns whether any output on the specified pipe is of the specified type | ||
223 | */ | ||
224 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | ||
225 | { | ||
226 | struct drm_device *dev = crtc->dev; | ||
227 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
228 | struct drm_connector *l_entry; | ||
229 | |||
230 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | ||
231 | if (l_entry->encoder && | ||
232 | l_entry->encoder->crtc == crtc) { | ||
233 | struct intel_output *intel_output = to_intel_output(l_entry); | ||
234 | if (intel_output->type == type) | ||
235 | return true; | ||
236 | } | ||
237 | } | ||
238 | return false; | ||
239 | } | ||
240 | |||
241 | #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } | ||
242 | /** | ||
243 | * Returns whether the given set of divisors are valid for a given refclk with | ||
244 | * the given connectors. | ||
245 | */ | ||
246 | |||
247 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | ||
248 | { | ||
249 | const intel_limit_t *limit = intel_limit (crtc); | ||
250 | |||
251 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | ||
252 | INTELPllInvalid ("p1 out of range\n"); | ||
253 | if (clock->p < limit->p.min || limit->p.max < clock->p) | ||
254 | INTELPllInvalid ("p out of range\n"); | ||
255 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) | ||
256 | INTELPllInvalid ("m2 out of range\n"); | ||
257 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | ||
258 | INTELPllInvalid ("m1 out of range\n"); | ||
259 | if (clock->m1 <= clock->m2) | ||
260 | INTELPllInvalid ("m1 <= m2\n"); | ||
261 | if (clock->m < limit->m.min || limit->m.max < clock->m) | ||
262 | INTELPllInvalid ("m out of range\n"); | ||
263 | if (clock->n < limit->n.min || limit->n.max < clock->n) | ||
264 | INTELPllInvalid ("n out of range\n"); | ||
265 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) | ||
266 | INTELPllInvalid ("vco out of range\n"); | ||
267 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, | ||
268 | * connector, etc., rather than just a single range. | ||
269 | */ | ||
270 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) | ||
271 | INTELPllInvalid ("dot out of range\n"); | ||
272 | |||
273 | return true; | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * Returns a set of divisors for the desired target clock with the given | ||
278 | * refclk, or FALSE. The returned values represent the clock equation: | ||
279 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | ||
280 | */ | ||
281 | static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | ||
282 | int refclk, intel_clock_t *best_clock) | ||
283 | { | ||
284 | struct drm_device *dev = crtc->dev; | ||
285 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
286 | intel_clock_t clock; | ||
287 | const intel_limit_t *limit = intel_limit(crtc); | ||
288 | int err = target; | ||
289 | |||
290 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | ||
291 | (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { | ||
292 | /* | ||
293 | * For LVDS, if the panel is on, just rely on its current | ||
294 | * settings for dual-channel. We haven't figured out how to | ||
295 | * reliably set up different single/dual channel state, if we | ||
296 | * even can. | ||
297 | */ | ||
298 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | ||
299 | LVDS_CLKB_POWER_UP) | ||
300 | clock.p2 = limit->p2.p2_fast; | ||
301 | else | ||
302 | clock.p2 = limit->p2.p2_slow; | ||
303 | } else { | ||
304 | if (target < limit->p2.dot_limit) | ||
305 | clock.p2 = limit->p2.p2_slow; | ||
306 | else | ||
307 | clock.p2 = limit->p2.p2_fast; | ||
308 | } | ||
309 | |||
310 | memset (best_clock, 0, sizeof (*best_clock)); | ||
311 | |||
312 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | ||
313 | for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && | ||
314 | clock.m2 <= limit->m2.max; clock.m2++) { | ||
315 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | ||
316 | clock.n++) { | ||
317 | for (clock.p1 = limit->p1.min; | ||
318 | clock.p1 <= limit->p1.max; clock.p1++) { | ||
319 | int this_err; | ||
320 | |||
321 | intel_clock(dev, refclk, &clock); | ||
322 | |||
323 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
324 | continue; | ||
325 | |||
326 | this_err = abs(clock.dot - target); | ||
327 | if (this_err < err) { | ||
328 | *best_clock = clock; | ||
329 | err = this_err; | ||
330 | } | ||
331 | } | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | return (err != target); | ||
337 | } | ||
338 | |||
339 | void | ||
340 | intel_wait_for_vblank(struct drm_device *dev) | ||
341 | { | ||
342 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | ||
343 | udelay(20000); | ||
344 | } | ||
345 | |||
346 | static void | ||
347 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
348 | struct drm_framebuffer *old_fb) | ||
349 | { | ||
350 | struct drm_device *dev = crtc->dev; | ||
351 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
352 | struct drm_i915_master_private *master_priv; | ||
353 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
354 | struct intel_framebuffer *intel_fb; | ||
355 | struct drm_i915_gem_object *obj_priv; | ||
356 | struct drm_gem_object *obj; | ||
357 | int pipe = intel_crtc->pipe; | ||
358 | unsigned long Start, Offset; | ||
359 | int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); | ||
360 | int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); | ||
361 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
362 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | ||
363 | u32 dspcntr, alignment; | ||
364 | |||
365 | /* no fb bound */ | ||
366 | if (!crtc->fb) { | ||
367 | DRM_DEBUG("No FB bound\n"); | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
372 | obj = intel_fb->obj; | ||
373 | obj_priv = obj->driver_private; | ||
374 | |||
375 | switch (obj_priv->tiling_mode) { | ||
376 | case I915_TILING_NONE: | ||
377 | alignment = 64 * 1024; | ||
378 | break; | ||
379 | case I915_TILING_X: | ||
380 | if (IS_I9XX(dev)) | ||
381 | alignment = 1024 * 1024; | ||
382 | else | ||
383 | alignment = 512 * 1024; | ||
384 | break; | ||
385 | case I915_TILING_Y: | ||
386 | /* FIXME: Is this true? */ | ||
387 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | ||
388 | return; | ||
389 | default: | ||
390 | BUG(); | ||
391 | } | ||
392 | |||
393 | if (i915_gem_object_pin(intel_fb->obj, alignment)) | ||
394 | return; | ||
395 | |||
396 | i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); | ||
397 | |||
398 | Start = obj_priv->gtt_offset; | ||
399 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
400 | |||
401 | I915_WRITE(dspstride, crtc->fb->pitch); | ||
402 | |||
403 | dspcntr = I915_READ(dspcntr_reg); | ||
404 | /* Mask out pixel format bits in case we change it */ | ||
405 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
406 | switch (crtc->fb->bits_per_pixel) { | ||
407 | case 8: | ||
408 | dspcntr |= DISPPLANE_8BPP; | ||
409 | break; | ||
410 | case 16: | ||
411 | if (crtc->fb->depth == 15) | ||
412 | dspcntr |= DISPPLANE_15_16BPP; | ||
413 | else | ||
414 | dspcntr |= DISPPLANE_16BPP; | ||
415 | break; | ||
416 | case 24: | ||
417 | case 32: | ||
418 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
419 | break; | ||
420 | default: | ||
421 | DRM_ERROR("Unknown color depth\n"); | ||
422 | return; | ||
423 | } | ||
424 | I915_WRITE(dspcntr_reg, dspcntr); | ||
425 | |||
426 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | ||
427 | if (IS_I965G(dev)) { | ||
428 | I915_WRITE(dspbase, Offset); | ||
429 | I915_READ(dspbase); | ||
430 | I915_WRITE(dspsurf, Start); | ||
431 | I915_READ(dspsurf); | ||
432 | } else { | ||
433 | I915_WRITE(dspbase, Start + Offset); | ||
434 | I915_READ(dspbase); | ||
435 | } | ||
436 | |||
437 | intel_wait_for_vblank(dev); | ||
438 | |||
439 | if (old_fb) { | ||
440 | intel_fb = to_intel_framebuffer(old_fb); | ||
441 | i915_gem_object_unpin(intel_fb->obj); | ||
442 | } | ||
443 | |||
444 | if (!dev->primary->master) | ||
445 | return; | ||
446 | |||
447 | master_priv = dev->primary->master->driver_priv; | ||
448 | if (!master_priv->sarea_priv) | ||
449 | return; | ||
450 | |||
451 | switch (pipe) { | ||
452 | case 0: | ||
453 | master_priv->sarea_priv->pipeA_x = x; | ||
454 | master_priv->sarea_priv->pipeA_y = y; | ||
455 | break; | ||
456 | case 1: | ||
457 | master_priv->sarea_priv->pipeB_x = x; | ||
458 | master_priv->sarea_priv->pipeB_y = y; | ||
459 | break; | ||
460 | default: | ||
461 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | ||
462 | break; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | |||
467 | |||
468 | /** | ||
469 | * Sets the power management mode of the pipe and plane. | ||
470 | * | ||
471 | * This code should probably grow support for turning the cursor off and back | ||
472 | * on appropriately at the same time as we're turning the pipe off/on. | ||
473 | */ | ||
474 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
475 | { | ||
476 | struct drm_device *dev = crtc->dev; | ||
477 | struct drm_i915_master_private *master_priv; | ||
478 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
479 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
480 | int pipe = intel_crtc->pipe; | ||
481 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
482 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | ||
483 | int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; | ||
484 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
485 | u32 temp; | ||
486 | bool enabled; | ||
487 | |||
488 | /* XXX: When our outputs are all unaware of DPMS modes other than off | ||
489 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | ||
490 | */ | ||
491 | switch (mode) { | ||
492 | case DRM_MODE_DPMS_ON: | ||
493 | case DRM_MODE_DPMS_STANDBY: | ||
494 | case DRM_MODE_DPMS_SUSPEND: | ||
495 | /* Enable the DPLL */ | ||
496 | temp = I915_READ(dpll_reg); | ||
497 | if ((temp & DPLL_VCO_ENABLE) == 0) { | ||
498 | I915_WRITE(dpll_reg, temp); | ||
499 | I915_READ(dpll_reg); | ||
500 | /* Wait for the clocks to stabilize. */ | ||
501 | udelay(150); | ||
502 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
503 | I915_READ(dpll_reg); | ||
504 | /* Wait for the clocks to stabilize. */ | ||
505 | udelay(150); | ||
506 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
507 | I915_READ(dpll_reg); | ||
508 | /* Wait for the clocks to stabilize. */ | ||
509 | udelay(150); | ||
510 | } | ||
511 | |||
512 | /* Enable the pipe */ | ||
513 | temp = I915_READ(pipeconf_reg); | ||
514 | if ((temp & PIPEACONF_ENABLE) == 0) | ||
515 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | ||
516 | |||
517 | /* Enable the plane */ | ||
518 | temp = I915_READ(dspcntr_reg); | ||
519 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | ||
520 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | ||
521 | /* Flush the plane changes */ | ||
522 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
523 | } | ||
524 | |||
525 | intel_crtc_load_lut(crtc); | ||
526 | |||
527 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | ||
528 | //intel_crtc_dpms_video(crtc, true); TODO | ||
529 | break; | ||
530 | case DRM_MODE_DPMS_OFF: | ||
531 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
532 | //intel_crtc_dpms_video(crtc, FALSE); TODO | ||
533 | |||
534 | /* Disable the VGA plane that we never use */ | ||
535 | I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); | ||
536 | |||
537 | /* Disable display plane */ | ||
538 | temp = I915_READ(dspcntr_reg); | ||
539 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
540 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | ||
541 | /* Flush the plane changes */ | ||
542 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
543 | I915_READ(dspbase_reg); | ||
544 | } | ||
545 | |||
546 | if (!IS_I9XX(dev)) { | ||
547 | /* Wait for vblank for the disable to take effect */ | ||
548 | intel_wait_for_vblank(dev); | ||
549 | } | ||
550 | |||
551 | /* Next, disable display pipes */ | ||
552 | temp = I915_READ(pipeconf_reg); | ||
553 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
554 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | ||
555 | I915_READ(pipeconf_reg); | ||
556 | } | ||
557 | |||
558 | /* Wait for vblank for the disable to take effect. */ | ||
559 | intel_wait_for_vblank(dev); | ||
560 | |||
561 | temp = I915_READ(dpll_reg); | ||
562 | if ((temp & DPLL_VCO_ENABLE) != 0) { | ||
563 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | ||
564 | I915_READ(dpll_reg); | ||
565 | } | ||
566 | |||
567 | /* Wait for the clocks to turn off. */ | ||
568 | udelay(150); | ||
569 | break; | ||
570 | } | ||
571 | |||
572 | if (!dev->primary->master) | ||
573 | return; | ||
574 | |||
575 | master_priv = dev->primary->master->driver_priv; | ||
576 | if (!master_priv->sarea_priv) | ||
577 | return; | ||
578 | |||
579 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | ||
580 | |||
581 | switch (pipe) { | ||
582 | case 0: | ||
583 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; | ||
584 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; | ||
585 | break; | ||
586 | case 1: | ||
587 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; | ||
588 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | ||
589 | break; | ||
590 | default: | ||
591 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | ||
592 | break; | ||
593 | } | ||
594 | |||
595 | intel_crtc->dpms_mode = mode; | ||
596 | } | ||
597 | |||
598 | static void intel_crtc_prepare (struct drm_crtc *crtc) | ||
599 | { | ||
600 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
601 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
602 | } | ||
603 | |||
604 | static void intel_crtc_commit (struct drm_crtc *crtc) | ||
605 | { | ||
606 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
607 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
608 | } | ||
609 | |||
610 | void intel_encoder_prepare (struct drm_encoder *encoder) | ||
611 | { | ||
612 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
613 | /* lvds has its own version of prepare see intel_lvds_prepare */ | ||
614 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); | ||
615 | } | ||
616 | |||
617 | void intel_encoder_commit (struct drm_encoder *encoder) | ||
618 | { | ||
619 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
620 | /* lvds has its own version of commit see intel_lvds_commit */ | ||
621 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
622 | } | ||
623 | |||
624 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||
625 | struct drm_display_mode *mode, | ||
626 | struct drm_display_mode *adjusted_mode) | ||
627 | { | ||
628 | return true; | ||
629 | } | ||
630 | |||
631 | |||
632 | /** Returns the core display clock speed for i830 - i945 */ | ||
633 | static int intel_get_core_clock_speed(struct drm_device *dev) | ||
634 | { | ||
635 | |||
636 | /* Core clock values taken from the published datasheets. | ||
637 | * The 830 may go up to 166 Mhz, which we should check. | ||
638 | */ | ||
639 | if (IS_I945G(dev)) | ||
640 | return 400000; | ||
641 | else if (IS_I915G(dev)) | ||
642 | return 333000; | ||
643 | else if (IS_I945GM(dev) || IS_845G(dev)) | ||
644 | return 200000; | ||
645 | else if (IS_I915GM(dev)) { | ||
646 | u16 gcfgc = 0; | ||
647 | |||
648 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
649 | |||
650 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | ||
651 | return 133000; | ||
652 | else { | ||
653 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | ||
654 | case GC_DISPLAY_CLOCK_333_MHZ: | ||
655 | return 333000; | ||
656 | default: | ||
657 | case GC_DISPLAY_CLOCK_190_200_MHZ: | ||
658 | return 190000; | ||
659 | } | ||
660 | } | ||
661 | } else if (IS_I865G(dev)) | ||
662 | return 266000; | ||
663 | else if (IS_I855(dev)) { | ||
664 | u16 hpllcc = 0; | ||
665 | /* Assume that the hardware is in the high speed state. This | ||
666 | * should be the default. | ||
667 | */ | ||
668 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | ||
669 | case GC_CLOCK_133_200: | ||
670 | case GC_CLOCK_100_200: | ||
671 | return 200000; | ||
672 | case GC_CLOCK_166_250: | ||
673 | return 250000; | ||
674 | case GC_CLOCK_100_133: | ||
675 | return 133000; | ||
676 | } | ||
677 | } else /* 852, 830 */ | ||
678 | return 133000; | ||
679 | |||
680 | return 0; /* Silence gcc warning */ | ||
681 | } | ||
682 | |||
683 | |||
684 | /** | ||
685 | * Return the pipe currently connected to the panel fitter, | ||
686 | * or -1 if the panel fitter is not present or not in use | ||
687 | */ | ||
688 | static int intel_panel_fitter_pipe (struct drm_device *dev) | ||
689 | { | ||
690 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
691 | u32 pfit_control; | ||
692 | |||
693 | /* i830 doesn't have a panel fitter */ | ||
694 | if (IS_I830(dev)) | ||
695 | return -1; | ||
696 | |||
697 | pfit_control = I915_READ(PFIT_CONTROL); | ||
698 | |||
699 | /* See if the panel fitter is in use */ | ||
700 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
701 | return -1; | ||
702 | |||
703 | /* 965 can place panel fitter on either pipe */ | ||
704 | if (IS_I965G(dev)) | ||
705 | return (pfit_control >> 29) & 0x3; | ||
706 | |||
707 | /* older chips can only use pipe 1 */ | ||
708 | return 1; | ||
709 | } | ||
710 | |||
711 | static void intel_crtc_mode_set(struct drm_crtc *crtc, | ||
712 | struct drm_display_mode *mode, | ||
713 | struct drm_display_mode *adjusted_mode, | ||
714 | int x, int y, | ||
715 | struct drm_framebuffer *old_fb) | ||
716 | { | ||
717 | struct drm_device *dev = crtc->dev; | ||
718 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
719 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
720 | int pipe = intel_crtc->pipe; | ||
721 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | ||
722 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
723 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | ||
724 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | ||
725 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
726 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
727 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
728 | int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
729 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
730 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
731 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
732 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | ||
733 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | ||
734 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | ||
735 | int refclk; | ||
736 | intel_clock_t clock; | ||
737 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | ||
738 | bool ok, is_sdvo = false, is_dvo = false; | ||
739 | bool is_crt = false, is_lvds = false, is_tv = false; | ||
740 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
741 | struct drm_connector *connector; | ||
742 | |||
743 | drm_vblank_pre_modeset(dev, pipe); | ||
744 | |||
745 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
746 | struct intel_output *intel_output = to_intel_output(connector); | ||
747 | |||
748 | if (!connector->encoder || connector->encoder->crtc != crtc) | ||
749 | continue; | ||
750 | |||
751 | switch (intel_output->type) { | ||
752 | case INTEL_OUTPUT_LVDS: | ||
753 | is_lvds = true; | ||
754 | break; | ||
755 | case INTEL_OUTPUT_SDVO: | ||
756 | case INTEL_OUTPUT_HDMI: | ||
757 | is_sdvo = true; | ||
758 | if (intel_output->needs_tv_clock) | ||
759 | is_tv = true; | ||
760 | break; | ||
761 | case INTEL_OUTPUT_DVO: | ||
762 | is_dvo = true; | ||
763 | break; | ||
764 | case INTEL_OUTPUT_TVOUT: | ||
765 | is_tv = true; | ||
766 | break; | ||
767 | case INTEL_OUTPUT_ANALOG: | ||
768 | is_crt = true; | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | |||
773 | if (IS_I9XX(dev)) { | ||
774 | refclk = 96000; | ||
775 | } else { | ||
776 | refclk = 48000; | ||
777 | } | ||
778 | |||
779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); | ||
780 | if (!ok) { | ||
781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
782 | return; | ||
783 | } | ||
784 | |||
785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
786 | |||
787 | dpll = DPLL_VGA_MODE_DIS; | ||
788 | if (IS_I9XX(dev)) { | ||
789 | if (is_lvds) | ||
790 | dpll |= DPLLB_MODE_LVDS; | ||
791 | else | ||
792 | dpll |= DPLLB_MODE_DAC_SERIAL; | ||
793 | if (is_sdvo) { | ||
794 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
795 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
796 | int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
797 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /* compute bitmask from p1 value */ | ||
802 | dpll |= (1 << (clock.p1 - 1)) << 16; | ||
803 | switch (clock.p2) { | ||
804 | case 5: | ||
805 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | ||
806 | break; | ||
807 | case 7: | ||
808 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | ||
809 | break; | ||
810 | case 10: | ||
811 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | ||
812 | break; | ||
813 | case 14: | ||
814 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | ||
815 | break; | ||
816 | } | ||
817 | if (IS_I965G(dev)) | ||
818 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | ||
819 | } else { | ||
820 | if (is_lvds) { | ||
821 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
822 | } else { | ||
823 | if (clock.p1 == 2) | ||
824 | dpll |= PLL_P1_DIVIDE_BY_TWO; | ||
825 | else | ||
826 | dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
827 | if (clock.p2 == 4) | ||
828 | dpll |= PLL_P2_DIVIDE_BY_4; | ||
829 | } | ||
830 | } | ||
831 | |||
832 | if (is_tv) { | ||
833 | /* XXX: just matching BIOS for now */ | ||
834 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | ||
835 | dpll |= 3; | ||
836 | } | ||
837 | else | ||
838 | dpll |= PLL_REF_INPUT_DREFCLK; | ||
839 | |||
840 | /* setup pipeconf */ | ||
841 | pipeconf = I915_READ(pipeconf_reg); | ||
842 | |||
843 | /* Set up the display plane register */ | ||
844 | dspcntr = DISPPLANE_GAMMA_ENABLE; | ||
845 | |||
846 | if (pipe == 0) | ||
847 | dspcntr |= DISPPLANE_SEL_PIPE_A; | ||
848 | else | ||
849 | dspcntr |= DISPPLANE_SEL_PIPE_B; | ||
850 | |||
851 | if (pipe == 0 && !IS_I965G(dev)) { | ||
852 | /* Enable pixel doubling when the dot clock is > 90% of the (display) | ||
853 | * core speed. | ||
854 | * | ||
855 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the | ||
856 | * pipe == 0 check? | ||
857 | */ | ||
858 | if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) | ||
859 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | ||
860 | else | ||
861 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | ||
862 | } | ||
863 | |||
864 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
865 | pipeconf |= PIPEACONF_ENABLE; | ||
866 | dpll |= DPLL_VCO_ENABLE; | ||
867 | |||
868 | |||
869 | /* Disable the panel fitter if it was on our pipe */ | ||
870 | if (intel_panel_fitter_pipe(dev) == pipe) | ||
871 | I915_WRITE(PFIT_CONTROL, 0); | ||
872 | |||
873 | DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | ||
874 | drm_mode_debug_printmodeline(mode); | ||
875 | |||
876 | |||
877 | if (dpll & DPLL_VCO_ENABLE) { | ||
878 | I915_WRITE(fp_reg, fp); | ||
879 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | ||
880 | I915_READ(dpll_reg); | ||
881 | udelay(150); | ||
882 | } | ||
883 | |||
884 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | ||
885 | * This is an exception to the general rule that mode_set doesn't turn | ||
886 | * things on. | ||
887 | */ | ||
888 | if (is_lvds) { | ||
889 | u32 lvds = I915_READ(LVDS); | ||
890 | |||
891 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; | ||
892 | /* Set the B0-B3 data pairs corresponding to whether we're going to | ||
893 | * set the DPLLs for dual-channel mode or not. | ||
894 | */ | ||
895 | if (clock.p2 == 7) | ||
896 | lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | ||
897 | else | ||
898 | lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | ||
899 | |||
900 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | ||
901 | * appropriately here, but we need to look more thoroughly into how | ||
902 | * panels behave in the two modes. | ||
903 | */ | ||
904 | |||
905 | I915_WRITE(LVDS, lvds); | ||
906 | I915_READ(LVDS); | ||
907 | } | ||
908 | |||
909 | I915_WRITE(fp_reg, fp); | ||
910 | I915_WRITE(dpll_reg, dpll); | ||
911 | I915_READ(dpll_reg); | ||
912 | /* Wait for the clocks to stabilize. */ | ||
913 | udelay(150); | ||
914 | |||
915 | if (IS_I965G(dev)) { | ||
916 | int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
917 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
918 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
919 | } else { | ||
920 | /* write it again -- the BIOS does, after all */ | ||
921 | I915_WRITE(dpll_reg, dpll); | ||
922 | } | ||
923 | I915_READ(dpll_reg); | ||
924 | /* Wait for the clocks to stabilize. */ | ||
925 | udelay(150); | ||
926 | |||
927 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | ||
928 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
929 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | ||
930 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
931 | I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | | ||
932 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
933 | I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | | ||
934 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
935 | I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | | ||
936 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
937 | I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | | ||
938 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
939 | /* pipesrc and dspsize control the size that is scaled from, which should | ||
940 | * always be the user's requested size. | ||
941 | */ | ||
942 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); | ||
943 | I915_WRITE(dsppos_reg, 0); | ||
944 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
945 | I915_WRITE(pipeconf_reg, pipeconf); | ||
946 | I915_READ(pipeconf_reg); | ||
947 | |||
948 | intel_wait_for_vblank(dev); | ||
949 | |||
950 | I915_WRITE(dspcntr_reg, dspcntr); | ||
951 | |||
952 | /* Flush the plane changes */ | ||
953 | intel_pipe_set_base(crtc, x, y, old_fb); | ||
954 | |||
955 | drm_vblank_post_modeset(dev, pipe); | ||
956 | } | ||
957 | |||
958 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | ||
959 | void intel_crtc_load_lut(struct drm_crtc *crtc) | ||
960 | { | ||
961 | struct drm_device *dev = crtc->dev; | ||
962 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
963 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
964 | int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; | ||
965 | int i; | ||
966 | |||
967 | /* The clocks have to be on to load the palette. */ | ||
968 | if (!crtc->enabled) | ||
969 | return; | ||
970 | |||
971 | for (i = 0; i < 256; i++) { | ||
972 | I915_WRITE(palreg + 4 * i, | ||
973 | (intel_crtc->lut_r[i] << 16) | | ||
974 | (intel_crtc->lut_g[i] << 8) | | ||
975 | intel_crtc->lut_b[i]); | ||
976 | } | ||
977 | } | ||
978 | |||
979 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | ||
980 | struct drm_file *file_priv, | ||
981 | uint32_t handle, | ||
982 | uint32_t width, uint32_t height) | ||
983 | { | ||
984 | struct drm_device *dev = crtc->dev; | ||
985 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
986 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
987 | struct drm_gem_object *bo; | ||
988 | struct drm_i915_gem_object *obj_priv; | ||
989 | int pipe = intel_crtc->pipe; | ||
990 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | ||
991 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | ||
992 | uint32_t temp; | ||
993 | size_t addr; | ||
994 | int ret; | ||
995 | |||
996 | DRM_DEBUG("\n"); | ||
997 | |||
998 | /* if we want to turn off the cursor ignore width and height */ | ||
999 | if (!handle) { | ||
1000 | DRM_DEBUG("cursor off\n"); | ||
1001 | temp = CURSOR_MODE_DISABLE; | ||
1002 | addr = 0; | ||
1003 | bo = NULL; | ||
1004 | goto finish; | ||
1005 | } | ||
1006 | |||
1007 | /* Currently we only support 64x64 cursors */ | ||
1008 | if (width != 64 || height != 64) { | ||
1009 | DRM_ERROR("we currently only support 64x64 cursors\n"); | ||
1010 | return -EINVAL; | ||
1011 | } | ||
1012 | |||
1013 | bo = drm_gem_object_lookup(dev, file_priv, handle); | ||
1014 | if (!bo) | ||
1015 | return -ENOENT; | ||
1016 | |||
1017 | obj_priv = bo->driver_private; | ||
1018 | |||
1019 | if (bo->size < width * height * 4) { | ||
1020 | DRM_ERROR("buffer is to small\n"); | ||
1021 | ret = -ENOMEM; | ||
1022 | goto fail; | ||
1023 | } | ||
1024 | |||
1025 | /* we only need to pin inside GTT if cursor is non-phy */ | ||
1026 | if (!dev_priv->cursor_needs_physical) { | ||
1027 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | ||
1028 | if (ret) { | ||
1029 | DRM_ERROR("failed to pin cursor bo\n"); | ||
1030 | goto fail; | ||
1031 | } | ||
1032 | addr = obj_priv->gtt_offset; | ||
1033 | } else { | ||
1034 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | ||
1035 | if (ret) { | ||
1036 | DRM_ERROR("failed to attach phys object\n"); | ||
1037 | goto fail; | ||
1038 | } | ||
1039 | addr = obj_priv->phys_obj->handle->busaddr; | ||
1040 | } | ||
1041 | |||
1042 | temp = 0; | ||
1043 | /* set the pipe for the cursor */ | ||
1044 | temp |= (pipe << 28); | ||
1045 | temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
1046 | |||
1047 | finish: | ||
1048 | I915_WRITE(control, temp); | ||
1049 | I915_WRITE(base, addr); | ||
1050 | |||
1051 | if (intel_crtc->cursor_bo) { | ||
1052 | if (dev_priv->cursor_needs_physical) { | ||
1053 | if (intel_crtc->cursor_bo != bo) | ||
1054 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | ||
1055 | } else | ||
1056 | i915_gem_object_unpin(intel_crtc->cursor_bo); | ||
1057 | mutex_lock(&dev->struct_mutex); | ||
1058 | drm_gem_object_unreference(intel_crtc->cursor_bo); | ||
1059 | mutex_unlock(&dev->struct_mutex); | ||
1060 | } | ||
1061 | |||
1062 | intel_crtc->cursor_addr = addr; | ||
1063 | intel_crtc->cursor_bo = bo; | ||
1064 | |||
1065 | return 0; | ||
1066 | fail: | ||
1067 | mutex_lock(&dev->struct_mutex); | ||
1068 | drm_gem_object_unreference(bo); | ||
1069 | mutex_unlock(&dev->struct_mutex); | ||
1070 | return ret; | ||
1071 | } | ||
1072 | |||
1073 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
1074 | { | ||
1075 | struct drm_device *dev = crtc->dev; | ||
1076 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1077 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1078 | int pipe = intel_crtc->pipe; | ||
1079 | uint32_t temp = 0; | ||
1080 | uint32_t adder; | ||
1081 | |||
1082 | if (x < 0) { | ||
1083 | temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); | ||
1084 | x = -x; | ||
1085 | } | ||
1086 | if (y < 0) { | ||
1087 | temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); | ||
1088 | y = -y; | ||
1089 | } | ||
1090 | |||
1091 | temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); | ||
1092 | temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); | ||
1093 | |||
1094 | adder = intel_crtc->cursor_addr; | ||
1095 | I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); | ||
1096 | I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | /** Sets the color ramps on behalf of RandR */ | ||
1102 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
1103 | u16 blue, int regno) | ||
1104 | { | ||
1105 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1106 | |||
1107 | intel_crtc->lut_r[regno] = red >> 8; | ||
1108 | intel_crtc->lut_g[regno] = green >> 8; | ||
1109 | intel_crtc->lut_b[regno] = blue >> 8; | ||
1110 | } | ||
1111 | |||
1112 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
1113 | u16 *blue, uint32_t size) | ||
1114 | { | ||
1115 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1116 | int i; | ||
1117 | |||
1118 | if (size != 256) | ||
1119 | return; | ||
1120 | |||
1121 | for (i = 0; i < 256; i++) { | ||
1122 | intel_crtc->lut_r[i] = red[i] >> 8; | ||
1123 | intel_crtc->lut_g[i] = green[i] >> 8; | ||
1124 | intel_crtc->lut_b[i] = blue[i] >> 8; | ||
1125 | } | ||
1126 | |||
1127 | intel_crtc_load_lut(crtc); | ||
1128 | } | ||
1129 | |||
1130 | /** | ||
1131 | * Get a pipe with a simple mode set on it for doing load-based monitor | ||
1132 | * detection. | ||
1133 | * | ||
1134 | * It will be up to the load-detect code to adjust the pipe as appropriate for | ||
1135 | * its requirements. The pipe will be connected to no other outputs. | ||
1136 | * | ||
1137 | * Currently this code will only succeed if there is a pipe with no outputs | ||
1138 | * configured for it. In the future, it could choose to temporarily disable | ||
1139 | * some outputs to free up a pipe for its use. | ||
1140 | * | ||
1141 | * \return crtc, or NULL if no pipes are available. | ||
1142 | */ | ||
1143 | |||
1144 | /* VESA 640x480x72Hz mode to set on the pipe */ | ||
1145 | static struct drm_display_mode load_detect_mode = { | ||
1146 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, | ||
1147 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | ||
1148 | }; | ||
1149 | |||
1150 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | ||
1151 | struct drm_display_mode *mode, | ||
1152 | int *dpms_mode) | ||
1153 | { | ||
1154 | struct intel_crtc *intel_crtc; | ||
1155 | struct drm_crtc *possible_crtc; | ||
1156 | struct drm_crtc *supported_crtc =NULL; | ||
1157 | struct drm_encoder *encoder = &intel_output->enc; | ||
1158 | struct drm_crtc *crtc = NULL; | ||
1159 | struct drm_device *dev = encoder->dev; | ||
1160 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
1161 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
1162 | int i = -1; | ||
1163 | |||
1164 | /* | ||
1165 | * Algorithm gets a little messy: | ||
1166 | * - if the connector already has an assigned crtc, use it (but make | ||
1167 | * sure it's on first) | ||
1168 | * - try to find the first unused crtc that can drive this connector, | ||
1169 | * and use that if we find one | ||
1170 | * - if there are no unused crtcs available, try to use the first | ||
1171 | * one we found that supports the connector | ||
1172 | */ | ||
1173 | |||
1174 | /* See if we already have a CRTC for this connector */ | ||
1175 | if (encoder->crtc) { | ||
1176 | crtc = encoder->crtc; | ||
1177 | /* Make sure the crtc and connector are running */ | ||
1178 | intel_crtc = to_intel_crtc(crtc); | ||
1179 | *dpms_mode = intel_crtc->dpms_mode; | ||
1180 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | ||
1181 | crtc_funcs = crtc->helper_private; | ||
1182 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
1183 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
1184 | } | ||
1185 | return crtc; | ||
1186 | } | ||
1187 | |||
1188 | /* Find an unused one (if possible) */ | ||
1189 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { | ||
1190 | i++; | ||
1191 | if (!(encoder->possible_crtcs & (1 << i))) | ||
1192 | continue; | ||
1193 | if (!possible_crtc->enabled) { | ||
1194 | crtc = possible_crtc; | ||
1195 | break; | ||
1196 | } | ||
1197 | if (!supported_crtc) | ||
1198 | supported_crtc = possible_crtc; | ||
1199 | } | ||
1200 | |||
1201 | /* | ||
1202 | * If we didn't find an unused CRTC, don't use any. | ||
1203 | */ | ||
1204 | if (!crtc) { | ||
1205 | return NULL; | ||
1206 | } | ||
1207 | |||
1208 | encoder->crtc = crtc; | ||
1209 | intel_output->load_detect_temp = true; | ||
1210 | |||
1211 | intel_crtc = to_intel_crtc(crtc); | ||
1212 | *dpms_mode = intel_crtc->dpms_mode; | ||
1213 | |||
1214 | if (!crtc->enabled) { | ||
1215 | if (!mode) | ||
1216 | mode = &load_detect_mode; | ||
1217 | drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); | ||
1218 | } else { | ||
1219 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | ||
1220 | crtc_funcs = crtc->helper_private; | ||
1221 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
1222 | } | ||
1223 | |||
1224 | /* Add this connector to the crtc */ | ||
1225 | encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); | ||
1226 | encoder_funcs->commit(encoder); | ||
1227 | } | ||
1228 | /* let the connector get through one full cycle before testing */ | ||
1229 | intel_wait_for_vblank(dev); | ||
1230 | |||
1231 | return crtc; | ||
1232 | } | ||
1233 | |||
1234 | void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) | ||
1235 | { | ||
1236 | struct drm_encoder *encoder = &intel_output->enc; | ||
1237 | struct drm_device *dev = encoder->dev; | ||
1238 | struct drm_crtc *crtc = encoder->crtc; | ||
1239 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
1240 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
1241 | |||
1242 | if (intel_output->load_detect_temp) { | ||
1243 | encoder->crtc = NULL; | ||
1244 | intel_output->load_detect_temp = false; | ||
1245 | crtc->enabled = drm_helper_crtc_in_use(crtc); | ||
1246 | drm_helper_disable_unused_functions(dev); | ||
1247 | } | ||
1248 | |||
1249 | /* Switch crtc and output back off if necessary */ | ||
1250 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | ||
1251 | if (encoder->crtc == crtc) | ||
1252 | encoder_funcs->dpms(encoder, dpms_mode); | ||
1253 | crtc_funcs->dpms(crtc, dpms_mode); | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | /* Returns the clock of the currently programmed mode of the given pipe. */ | ||
1258 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | ||
1259 | { | ||
1260 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1261 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1262 | int pipe = intel_crtc->pipe; | ||
1263 | u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); | ||
1264 | u32 fp; | ||
1265 | intel_clock_t clock; | ||
1266 | |||
1267 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | ||
1268 | fp = I915_READ((pipe == 0) ? FPA0 : FPB0); | ||
1269 | else | ||
1270 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | ||
1271 | |||
1272 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | ||
1273 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | ||
1274 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | ||
1275 | if (IS_I9XX(dev)) { | ||
1276 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | ||
1277 | DPLL_FPA01_P1_POST_DIV_SHIFT); | ||
1278 | |||
1279 | switch (dpll & DPLL_MODE_MASK) { | ||
1280 | case DPLLB_MODE_DAC_SERIAL: | ||
1281 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? | ||
1282 | 5 : 10; | ||
1283 | break; | ||
1284 | case DPLLB_MODE_LVDS: | ||
1285 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? | ||
1286 | 7 : 14; | ||
1287 | break; | ||
1288 | default: | ||
1289 | DRM_DEBUG("Unknown DPLL mode %08x in programmed " | ||
1290 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); | ||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | /* XXX: Handle the 100Mhz refclk */ | ||
1295 | i9xx_clock(96000, &clock); | ||
1296 | } else { | ||
1297 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | ||
1298 | |||
1299 | if (is_lvds) { | ||
1300 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> | ||
1301 | DPLL_FPA01_P1_POST_DIV_SHIFT); | ||
1302 | clock.p2 = 14; | ||
1303 | |||
1304 | if ((dpll & PLL_REF_INPUT_MASK) == | ||
1305 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | ||
1306 | /* XXX: might not be 66MHz */ | ||
1307 | i8xx_clock(66000, &clock); | ||
1308 | } else | ||
1309 | i8xx_clock(48000, &clock); | ||
1310 | } else { | ||
1311 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | ||
1312 | clock.p1 = 2; | ||
1313 | else { | ||
1314 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> | ||
1315 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; | ||
1316 | } | ||
1317 | if (dpll & PLL_P2_DIVIDE_BY_4) | ||
1318 | clock.p2 = 4; | ||
1319 | else | ||
1320 | clock.p2 = 2; | ||
1321 | |||
1322 | i8xx_clock(48000, &clock); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | /* XXX: It would be nice to validate the clocks, but we can't reuse | ||
1327 | * i830PllIsValid() because it relies on the xf86_config connector | ||
1328 | * configuration being accurate, which it isn't necessarily. | ||
1329 | */ | ||
1330 | |||
1331 | return clock.dot; | ||
1332 | } | ||
1333 | |||
1334 | /** Returns the currently programmed mode of the given pipe. */ | ||
1335 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | ||
1336 | struct drm_crtc *crtc) | ||
1337 | { | ||
1338 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1339 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1340 | int pipe = intel_crtc->pipe; | ||
1341 | struct drm_display_mode *mode; | ||
1342 | int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); | ||
1343 | int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); | ||
1344 | int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); | ||
1345 | int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); | ||
1346 | |||
1347 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | ||
1348 | if (!mode) | ||
1349 | return NULL; | ||
1350 | |||
1351 | mode->clock = intel_crtc_clock_get(dev, crtc); | ||
1352 | mode->hdisplay = (htot & 0xffff) + 1; | ||
1353 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | ||
1354 | mode->hsync_start = (hsync & 0xffff) + 1; | ||
1355 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; | ||
1356 | mode->vdisplay = (vtot & 0xffff) + 1; | ||
1357 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; | ||
1358 | mode->vsync_start = (vsync & 0xffff) + 1; | ||
1359 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; | ||
1360 | |||
1361 | drm_mode_set_name(mode); | ||
1362 | drm_mode_set_crtcinfo(mode, 0); | ||
1363 | |||
1364 | return mode; | ||
1365 | } | ||
1366 | |||
1367 | static void intel_crtc_destroy(struct drm_crtc *crtc) | ||
1368 | { | ||
1369 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1370 | |||
1371 | drm_crtc_cleanup(crtc); | ||
1372 | kfree(intel_crtc); | ||
1373 | } | ||
1374 | |||
1375 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
1376 | .dpms = intel_crtc_dpms, | ||
1377 | .mode_fixup = intel_crtc_mode_fixup, | ||
1378 | .mode_set = intel_crtc_mode_set, | ||
1379 | .mode_set_base = intel_pipe_set_base, | ||
1380 | .prepare = intel_crtc_prepare, | ||
1381 | .commit = intel_crtc_commit, | ||
1382 | }; | ||
1383 | |||
1384 | static const struct drm_crtc_funcs intel_crtc_funcs = { | ||
1385 | .cursor_set = intel_crtc_cursor_set, | ||
1386 | .cursor_move = intel_crtc_cursor_move, | ||
1387 | .gamma_set = intel_crtc_gamma_set, | ||
1388 | .set_config = drm_crtc_helper_set_config, | ||
1389 | .destroy = intel_crtc_destroy, | ||
1390 | }; | ||
1391 | |||
1392 | |||
1393 | static void intel_crtc_init(struct drm_device *dev, int pipe) | ||
1394 | { | ||
1395 | struct intel_crtc *intel_crtc; | ||
1396 | int i; | ||
1397 | |||
1398 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); | ||
1399 | if (intel_crtc == NULL) | ||
1400 | return; | ||
1401 | |||
1402 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | ||
1403 | |||
1404 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | ||
1405 | intel_crtc->pipe = pipe; | ||
1406 | for (i = 0; i < 256; i++) { | ||
1407 | intel_crtc->lut_r[i] = i; | ||
1408 | intel_crtc->lut_g[i] = i; | ||
1409 | intel_crtc->lut_b[i] = i; | ||
1410 | } | ||
1411 | |||
1412 | intel_crtc->cursor_addr = 0; | ||
1413 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | ||
1414 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | ||
1415 | |||
1416 | intel_crtc->mode_set.crtc = &intel_crtc->base; | ||
1417 | intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1); | ||
1418 | intel_crtc->mode_set.num_connectors = 0; | ||
1419 | |||
1420 | if (i915_fbpercrtc) { | ||
1421 | |||
1422 | |||
1423 | |||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) | ||
1428 | { | ||
1429 | struct drm_crtc *crtc = NULL; | ||
1430 | |||
1431 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
1432 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1433 | if (intel_crtc->pipe == pipe) | ||
1434 | break; | ||
1435 | } | ||
1436 | return crtc; | ||
1437 | } | ||
1438 | |||
1439 | static int intel_connector_clones(struct drm_device *dev, int type_mask) | ||
1440 | { | ||
1441 | int index_mask = 0; | ||
1442 | struct drm_connector *connector; | ||
1443 | int entry = 0; | ||
1444 | |||
1445 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1446 | struct intel_output *intel_output = to_intel_output(connector); | ||
1447 | if (type_mask & (1 << intel_output->type)) | ||
1448 | index_mask |= (1 << entry); | ||
1449 | entry++; | ||
1450 | } | ||
1451 | return index_mask; | ||
1452 | } | ||
1453 | |||
1454 | |||
1455 | static void intel_setup_outputs(struct drm_device *dev) | ||
1456 | { | ||
1457 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1458 | struct drm_connector *connector; | ||
1459 | |||
1460 | intel_crt_init(dev); | ||
1461 | |||
1462 | /* Set up integrated LVDS */ | ||
1463 | if (IS_MOBILE(dev) && !IS_I830(dev)) | ||
1464 | intel_lvds_init(dev); | ||
1465 | |||
1466 | if (IS_I9XX(dev)) { | ||
1467 | int found; | ||
1468 | |||
1469 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | ||
1470 | found = intel_sdvo_init(dev, SDVOB); | ||
1471 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | ||
1472 | intel_hdmi_init(dev, SDVOB); | ||
1473 | } | ||
1474 | if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { | ||
1475 | found = intel_sdvo_init(dev, SDVOC); | ||
1476 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | ||
1477 | intel_hdmi_init(dev, SDVOC); | ||
1478 | } | ||
1479 | } else | ||
1480 | intel_dvo_init(dev); | ||
1481 | |||
1482 | if (IS_I9XX(dev) && IS_MOBILE(dev)) | ||
1483 | intel_tv_init(dev); | ||
1484 | |||
1485 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1486 | struct intel_output *intel_output = to_intel_output(connector); | ||
1487 | struct drm_encoder *encoder = &intel_output->enc; | ||
1488 | int crtc_mask = 0, clone_mask = 0; | ||
1489 | |||
1490 | /* valid crtcs */ | ||
1491 | switch(intel_output->type) { | ||
1492 | case INTEL_OUTPUT_HDMI: | ||
1493 | crtc_mask = ((1 << 0)| | ||
1494 | (1 << 1)); | ||
1495 | clone_mask = ((1 << INTEL_OUTPUT_HDMI)); | ||
1496 | break; | ||
1497 | case INTEL_OUTPUT_DVO: | ||
1498 | case INTEL_OUTPUT_SDVO: | ||
1499 | crtc_mask = ((1 << 0)| | ||
1500 | (1 << 1)); | ||
1501 | clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | | ||
1502 | (1 << INTEL_OUTPUT_DVO) | | ||
1503 | (1 << INTEL_OUTPUT_SDVO)); | ||
1504 | break; | ||
1505 | case INTEL_OUTPUT_ANALOG: | ||
1506 | crtc_mask = ((1 << 0)| | ||
1507 | (1 << 1)); | ||
1508 | clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | | ||
1509 | (1 << INTEL_OUTPUT_DVO) | | ||
1510 | (1 << INTEL_OUTPUT_SDVO)); | ||
1511 | break; | ||
1512 | case INTEL_OUTPUT_LVDS: | ||
1513 | crtc_mask = (1 << 1); | ||
1514 | clone_mask = (1 << INTEL_OUTPUT_LVDS); | ||
1515 | break; | ||
1516 | case INTEL_OUTPUT_TVOUT: | ||
1517 | crtc_mask = ((1 << 0) | | ||
1518 | (1 << 1)); | ||
1519 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); | ||
1520 | break; | ||
1521 | } | ||
1522 | encoder->possible_crtcs = crtc_mask; | ||
1523 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
1528 | { | ||
1529 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1530 | struct drm_device *dev = fb->dev; | ||
1531 | |||
1532 | if (fb->fbdev) | ||
1533 | intelfb_remove(dev, fb); | ||
1534 | |||
1535 | drm_framebuffer_cleanup(fb); | ||
1536 | mutex_lock(&dev->struct_mutex); | ||
1537 | drm_gem_object_unreference(intel_fb->obj); | ||
1538 | mutex_unlock(&dev->struct_mutex); | ||
1539 | |||
1540 | kfree(intel_fb); | ||
1541 | } | ||
1542 | |||
1543 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | ||
1544 | struct drm_file *file_priv, | ||
1545 | unsigned int *handle) | ||
1546 | { | ||
1547 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1548 | struct drm_gem_object *object = intel_fb->obj; | ||
1549 | |||
1550 | return drm_gem_handle_create(file_priv, object, handle); | ||
1551 | } | ||
1552 | |||
1553 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | ||
1554 | .destroy = intel_user_framebuffer_destroy, | ||
1555 | .create_handle = intel_user_framebuffer_create_handle, | ||
1556 | }; | ||
1557 | |||
1558 | int intel_framebuffer_create(struct drm_device *dev, | ||
1559 | struct drm_mode_fb_cmd *mode_cmd, | ||
1560 | struct drm_framebuffer **fb, | ||
1561 | struct drm_gem_object *obj) | ||
1562 | { | ||
1563 | struct intel_framebuffer *intel_fb; | ||
1564 | int ret; | ||
1565 | |||
1566 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | ||
1567 | if (!intel_fb) | ||
1568 | return -ENOMEM; | ||
1569 | |||
1570 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | ||
1571 | if (ret) { | ||
1572 | DRM_ERROR("framebuffer init failed %d\n", ret); | ||
1573 | return ret; | ||
1574 | } | ||
1575 | |||
1576 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); | ||
1577 | |||
1578 | intel_fb->obj = obj; | ||
1579 | |||
1580 | *fb = &intel_fb->base; | ||
1581 | |||
1582 | return 0; | ||
1583 | } | ||
1584 | |||
1585 | |||
1586 | static struct drm_framebuffer * | ||
1587 | intel_user_framebuffer_create(struct drm_device *dev, | ||
1588 | struct drm_file *filp, | ||
1589 | struct drm_mode_fb_cmd *mode_cmd) | ||
1590 | { | ||
1591 | struct drm_gem_object *obj; | ||
1592 | struct drm_framebuffer *fb; | ||
1593 | int ret; | ||
1594 | |||
1595 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | ||
1596 | if (!obj) | ||
1597 | return NULL; | ||
1598 | |||
1599 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); | ||
1600 | if (ret) { | ||
1601 | drm_gem_object_unreference(obj); | ||
1602 | return NULL; | ||
1603 | } | ||
1604 | |||
1605 | return fb; | ||
1606 | } | ||
1607 | |||
1608 | static const struct drm_mode_config_funcs intel_mode_funcs = { | ||
1609 | .fb_create = intel_user_framebuffer_create, | ||
1610 | .fb_changed = intelfb_probe, | ||
1611 | }; | ||
1612 | |||
1613 | void intel_modeset_init(struct drm_device *dev) | ||
1614 | { | ||
1615 | int num_pipe; | ||
1616 | int i; | ||
1617 | |||
1618 | drm_mode_config_init(dev); | ||
1619 | |||
1620 | dev->mode_config.min_width = 0; | ||
1621 | dev->mode_config.min_height = 0; | ||
1622 | |||
1623 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | ||
1624 | |||
1625 | if (IS_I965G(dev)) { | ||
1626 | dev->mode_config.max_width = 8192; | ||
1627 | dev->mode_config.max_height = 8192; | ||
1628 | } else { | ||
1629 | dev->mode_config.max_width = 2048; | ||
1630 | dev->mode_config.max_height = 2048; | ||
1631 | } | ||
1632 | |||
1633 | /* set memory base */ | ||
1634 | if (IS_I9XX(dev)) | ||
1635 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
1636 | else | ||
1637 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
1638 | |||
1639 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
1640 | num_pipe = 2; | ||
1641 | else | ||
1642 | num_pipe = 1; | ||
1643 | DRM_DEBUG("%d display pipe%s available.\n", | ||
1644 | num_pipe, num_pipe > 1 ? "s" : ""); | ||
1645 | |||
1646 | for (i = 0; i < num_pipe; i++) { | ||
1647 | intel_crtc_init(dev, i); | ||
1648 | } | ||
1649 | |||
1650 | intel_setup_outputs(dev); | ||
1651 | } | ||
1652 | |||
1653 | void intel_modeset_cleanup(struct drm_device *dev) | ||
1654 | { | ||
1655 | drm_mode_config_cleanup(dev); | ||
1656 | } | ||
1657 | |||
1658 | |||
1659 | /* current intel driver doesn't take advantage of encoders | ||
1660 | always give back the encoder for the connector | ||
1661 | */ | ||
1662 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | ||
1663 | { | ||
1664 | struct intel_output *intel_output = to_intel_output(connector); | ||
1665 | |||
1666 | return &intel_output->enc; | ||
1667 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h new file mode 100644 index 000000000000..957daef8edff --- /dev/null +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright (c) 2007-2008 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
23 | * IN THE SOFTWARE. | ||
24 | */ | ||
25 | #ifndef __INTEL_DRV_H__ | ||
26 | #define __INTEL_DRV_H__ | ||
27 | |||
28 | #include <linux/i2c.h> | ||
29 | #include <linux/i2c-id.h> | ||
30 | #include <linux/i2c-algo-bit.h> | ||
31 | #include "drm_crtc.h" | ||
32 | |||
33 | #include "drm_crtc_helper.h" | ||
34 | /* | ||
35 | * Display related stuff | ||
36 | */ | ||
37 | |||
38 | /* store information about an Ixxx DVO */ | ||
39 | /* The i830->i865 use multiple DVOs with multiple i2cs */ | ||
40 | /* the i915, i945 have a single sDVO i2c bus - which is different */ | ||
41 | #define MAX_OUTPUTS 6 | ||
42 | /* maximum connectors per crtcs in the mode set */ | ||
43 | #define INTELFB_CONN_LIMIT 4 | ||
44 | |||
45 | #define INTEL_I2C_BUS_DVO 1 | ||
46 | #define INTEL_I2C_BUS_SDVO 2 | ||
47 | |||
48 | /* these are outputs from the chip - integrated only | ||
49 | external chips are via DVO or SDVO output */ | ||
50 | #define INTEL_OUTPUT_UNUSED 0 | ||
51 | #define INTEL_OUTPUT_ANALOG 1 | ||
52 | #define INTEL_OUTPUT_DVO 2 | ||
53 | #define INTEL_OUTPUT_SDVO 3 | ||
54 | #define INTEL_OUTPUT_LVDS 4 | ||
55 | #define INTEL_OUTPUT_TVOUT 5 | ||
56 | #define INTEL_OUTPUT_HDMI 6 | ||
57 | |||
58 | #define INTEL_DVO_CHIP_NONE 0 | ||
59 | #define INTEL_DVO_CHIP_LVDS 1 | ||
60 | #define INTEL_DVO_CHIP_TMDS 2 | ||
61 | #define INTEL_DVO_CHIP_TVOUT 4 | ||
62 | |||
63 | struct intel_i2c_chan { | ||
64 | struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ | ||
65 | u32 reg; /* GPIO reg */ | ||
66 | struct i2c_adapter adapter; | ||
67 | struct i2c_algo_bit_data algo; | ||
68 | u8 slave_addr; | ||
69 | }; | ||
70 | |||
71 | struct intel_framebuffer { | ||
72 | struct drm_framebuffer base; | ||
73 | struct drm_gem_object *obj; | ||
74 | }; | ||
75 | |||
76 | |||
77 | struct intel_output { | ||
78 | struct drm_connector base; | ||
79 | |||
80 | struct drm_encoder enc; | ||
81 | int type; | ||
82 | struct intel_i2c_chan *i2c_bus; /* for control functions */ | ||
83 | struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ | ||
84 | bool load_detect_temp; | ||
85 | bool needs_tv_clock; | ||
86 | void *dev_priv; | ||
87 | }; | ||
88 | |||
89 | struct intel_crtc { | ||
90 | struct drm_crtc base; | ||
91 | int pipe; | ||
92 | int plane; | ||
93 | struct drm_gem_object *cursor_bo; | ||
94 | uint32_t cursor_addr; | ||
95 | u8 lut_r[256], lut_g[256], lut_b[256]; | ||
96 | int dpms_mode; | ||
97 | struct intel_framebuffer *fbdev_fb; | ||
98 | /* a mode_set for fbdev users on this crtc */ | ||
99 | struct drm_mode_set mode_set; | ||
100 | }; | ||
101 | |||
102 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | ||
103 | #define to_intel_output(x) container_of(x, struct intel_output, base) | ||
104 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | ||
105 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | ||
106 | |||
107 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | ||
108 | const char *name); | ||
109 | void intel_i2c_destroy(struct intel_i2c_chan *chan); | ||
110 | int intel_ddc_get_modes(struct intel_output *intel_output); | ||
111 | extern bool intel_ddc_probe(struct intel_output *intel_output); | ||
112 | |||
113 | extern void intel_crt_init(struct drm_device *dev); | ||
114 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | ||
115 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | ||
116 | extern void intel_dvo_init(struct drm_device *dev); | ||
117 | extern void intel_tv_init(struct drm_device *dev); | ||
118 | extern void intel_lvds_init(struct drm_device *dev); | ||
119 | |||
120 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | ||
121 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | ||
122 | extern void intel_encoder_commit (struct drm_encoder *encoder); | ||
123 | |||
124 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | ||
125 | |||
126 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | ||
127 | struct drm_crtc *crtc); | ||
128 | extern void intel_wait_for_vblank(struct drm_device *dev); | ||
129 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | ||
130 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | ||
131 | struct drm_display_mode *mode, | ||
132 | int *dpms_mode); | ||
133 | extern void intel_release_load_detect_pipe(struct intel_output *intel_output, | ||
134 | int dpms_mode); | ||
135 | |||
136 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
137 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
138 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
139 | extern int intelfb_probe(struct drm_device *dev); | ||
140 | extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); | ||
141 | extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); | ||
142 | extern void intelfb_restore(void); | ||
143 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
144 | u16 blue, int regno); | ||
145 | |||
146 | extern int intel_framebuffer_create(struct drm_device *dev, | ||
147 | struct drm_mode_fb_cmd *mode_cmd, | ||
148 | struct drm_framebuffer **fb, | ||
149 | struct drm_gem_object *obj); | ||
150 | #endif /* __INTEL_DRV_H__ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c new file mode 100644 index 000000000000..8b8d6e65cd3f --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * Copyright 2006 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright © 2006-2007 Intel Corporation | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Eric Anholt <eric@anholt.net> | ||
26 | */ | ||
27 | #include <linux/i2c.h> | ||
28 | #include "drmP.h" | ||
29 | #include "drm.h" | ||
30 | #include "drm_crtc.h" | ||
31 | #include "intel_drv.h" | ||
32 | #include "i915_drm.h" | ||
33 | #include "i915_drv.h" | ||
34 | #include "dvo.h" | ||
35 | |||
36 | #define SIL164_ADDR 0x38 | ||
37 | #define CH7xxx_ADDR 0x76 | ||
38 | #define TFP410_ADDR 0x38 | ||
39 | |||
40 | static struct intel_dvo_device intel_dvo_devices[] = { | ||
41 | { | ||
42 | .type = INTEL_DVO_CHIP_TMDS, | ||
43 | .name = "sil164", | ||
44 | .dvo_reg = DVOC, | ||
45 | .slave_addr = SIL164_ADDR, | ||
46 | .dev_ops = &sil164_ops, | ||
47 | }, | ||
48 | { | ||
49 | .type = INTEL_DVO_CHIP_TMDS, | ||
50 | .name = "ch7xxx", | ||
51 | .dvo_reg = DVOC, | ||
52 | .slave_addr = CH7xxx_ADDR, | ||
53 | .dev_ops = &ch7xxx_ops, | ||
54 | }, | ||
55 | { | ||
56 | .type = INTEL_DVO_CHIP_LVDS, | ||
57 | .name = "ivch", | ||
58 | .dvo_reg = DVOA, | ||
59 | .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ | ||
60 | .dev_ops = &ivch_ops, | ||
61 | }, | ||
62 | { | ||
63 | .type = INTEL_DVO_CHIP_TMDS, | ||
64 | .name = "tfp410", | ||
65 | .dvo_reg = DVOC, | ||
66 | .slave_addr = TFP410_ADDR, | ||
67 | .dev_ops = &tfp410_ops, | ||
68 | }, | ||
69 | { | ||
70 | .type = INTEL_DVO_CHIP_LVDS, | ||
71 | .name = "ch7017", | ||
72 | .dvo_reg = DVOC, | ||
73 | .slave_addr = 0x75, | ||
74 | .gpio = GPIOE, | ||
75 | .dev_ops = &ch7017_ops, | ||
76 | } | ||
77 | }; | ||
78 | |||
79 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | ||
80 | { | ||
81 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
82 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
83 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
84 | u32 dvo_reg = dvo->dvo_reg; | ||
85 | u32 temp = I915_READ(dvo_reg); | ||
86 | |||
87 | if (mode == DRM_MODE_DPMS_ON) { | ||
88 | I915_WRITE(dvo_reg, temp | DVO_ENABLE); | ||
89 | I915_READ(dvo_reg); | ||
90 | dvo->dev_ops->dpms(dvo, mode); | ||
91 | } else { | ||
92 | dvo->dev_ops->dpms(dvo, mode); | ||
93 | I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); | ||
94 | I915_READ(dvo_reg); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static void intel_dvo_save(struct drm_connector *connector) | ||
99 | { | ||
100 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
101 | struct intel_output *intel_output = to_intel_output(connector); | ||
102 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
103 | |||
104 | /* Each output should probably just save the registers it touches, | ||
105 | * but for now, use more overkill. | ||
106 | */ | ||
107 | dev_priv->saveDVOA = I915_READ(DVOA); | ||
108 | dev_priv->saveDVOB = I915_READ(DVOB); | ||
109 | dev_priv->saveDVOC = I915_READ(DVOC); | ||
110 | |||
111 | dvo->dev_ops->save(dvo); | ||
112 | } | ||
113 | |||
114 | static void intel_dvo_restore(struct drm_connector *connector) | ||
115 | { | ||
116 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
117 | struct intel_output *intel_output = to_intel_output(connector); | ||
118 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
119 | |||
120 | dvo->dev_ops->restore(dvo); | ||
121 | |||
122 | I915_WRITE(DVOA, dev_priv->saveDVOA); | ||
123 | I915_WRITE(DVOB, dev_priv->saveDVOB); | ||
124 | I915_WRITE(DVOC, dev_priv->saveDVOC); | ||
125 | } | ||
126 | |||
127 | static int intel_dvo_mode_valid(struct drm_connector *connector, | ||
128 | struct drm_display_mode *mode) | ||
129 | { | ||
130 | struct intel_output *intel_output = to_intel_output(connector); | ||
131 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
132 | |||
133 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
134 | return MODE_NO_DBLESCAN; | ||
135 | |||
136 | /* XXX: Validate clock range */ | ||
137 | |||
138 | if (dvo->panel_fixed_mode) { | ||
139 | if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) | ||
140 | return MODE_PANEL; | ||
141 | if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) | ||
142 | return MODE_PANEL; | ||
143 | } | ||
144 | |||
145 | return dvo->dev_ops->mode_valid(dvo, mode); | ||
146 | } | ||
147 | |||
148 | static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | ||
149 | struct drm_display_mode *mode, | ||
150 | struct drm_display_mode *adjusted_mode) | ||
151 | { | ||
152 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
153 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
154 | |||
155 | /* If we have timings from the BIOS for the panel, put them in | ||
156 | * to the adjusted mode. The CRTC will be set up for this mode, | ||
157 | * with the panel scaling set up to source from the H/VDisplay | ||
158 | * of the original mode. | ||
159 | */ | ||
160 | if (dvo->panel_fixed_mode != NULL) { | ||
161 | #define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x | ||
162 | C(hdisplay); | ||
163 | C(hsync_start); | ||
164 | C(hsync_end); | ||
165 | C(htotal); | ||
166 | C(vdisplay); | ||
167 | C(vsync_start); | ||
168 | C(vsync_end); | ||
169 | C(vtotal); | ||
170 | C(clock); | ||
171 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
172 | #undef C | ||
173 | } | ||
174 | |||
175 | if (dvo->dev_ops->mode_fixup) | ||
176 | return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); | ||
177 | |||
178 | return true; | ||
179 | } | ||
180 | |||
181 | static void intel_dvo_mode_set(struct drm_encoder *encoder, | ||
182 | struct drm_display_mode *mode, | ||
183 | struct drm_display_mode *adjusted_mode) | ||
184 | { | ||
185 | struct drm_device *dev = encoder->dev; | ||
186 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
187 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
188 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
189 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
190 | int pipe = intel_crtc->pipe; | ||
191 | u32 dvo_val; | ||
192 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; | ||
193 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
194 | |||
195 | switch (dvo_reg) { | ||
196 | case DVOA: | ||
197 | default: | ||
198 | dvo_srcdim_reg = DVOA_SRCDIM; | ||
199 | break; | ||
200 | case DVOB: | ||
201 | dvo_srcdim_reg = DVOB_SRCDIM; | ||
202 | break; | ||
203 | case DVOC: | ||
204 | dvo_srcdim_reg = DVOC_SRCDIM; | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); | ||
209 | |||
210 | /* Save the data order, since I don't know what it should be set to. */ | ||
211 | dvo_val = I915_READ(dvo_reg) & | ||
212 | (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); | ||
213 | dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | | ||
214 | DVO_BLANK_ACTIVE_HIGH; | ||
215 | |||
216 | if (pipe == 1) | ||
217 | dvo_val |= DVO_PIPE_B_SELECT; | ||
218 | dvo_val |= DVO_PIPE_STALL; | ||
219 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
220 | dvo_val |= DVO_HSYNC_ACTIVE_HIGH; | ||
221 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
222 | dvo_val |= DVO_VSYNC_ACTIVE_HIGH; | ||
223 | |||
224 | I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); | ||
225 | |||
226 | /*I915_WRITE(DVOB_SRCDIM, | ||
227 | (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | | ||
228 | (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ | ||
229 | I915_WRITE(dvo_srcdim_reg, | ||
230 | (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | | ||
231 | (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); | ||
232 | /*I915_WRITE(DVOB, dvo_val);*/ | ||
233 | I915_WRITE(dvo_reg, dvo_val); | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * Detect the output connection on our DVO device. | ||
238 | * | ||
239 | * Unimplemented. | ||
240 | */ | ||
241 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | ||
242 | { | ||
243 | struct intel_output *intel_output = to_intel_output(connector); | ||
244 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
245 | |||
246 | return dvo->dev_ops->detect(dvo); | ||
247 | } | ||
248 | |||
249 | static int intel_dvo_get_modes(struct drm_connector *connector) | ||
250 | { | ||
251 | struct intel_output *intel_output = to_intel_output(connector); | ||
252 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
253 | |||
254 | /* We should probably have an i2c driver get_modes function for those | ||
255 | * devices which will have a fixed set of modes determined by the chip | ||
256 | * (TV-out, for example), but for now with just TMDS and LVDS, | ||
257 | * that's not the case. | ||
258 | */ | ||
259 | intel_ddc_get_modes(intel_output); | ||
260 | if (!list_empty(&connector->probed_modes)) | ||
261 | return 1; | ||
262 | |||
263 | |||
264 | if (dvo->panel_fixed_mode != NULL) { | ||
265 | struct drm_display_mode *mode; | ||
266 | mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); | ||
267 | if (mode) { | ||
268 | drm_mode_probed_add(connector, mode); | ||
269 | return 1; | ||
270 | } | ||
271 | } | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void intel_dvo_destroy (struct drm_connector *connector) | ||
276 | { | ||
277 | struct intel_output *intel_output = to_intel_output(connector); | ||
278 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
279 | |||
280 | if (dvo) { | ||
281 | if (dvo->dev_ops->destroy) | ||
282 | dvo->dev_ops->destroy(dvo); | ||
283 | if (dvo->panel_fixed_mode) | ||
284 | kfree(dvo->panel_fixed_mode); | ||
285 | /* no need, in i830_dvoices[] now */ | ||
286 | //kfree(dvo); | ||
287 | } | ||
288 | if (intel_output->i2c_bus) | ||
289 | intel_i2c_destroy(intel_output->i2c_bus); | ||
290 | if (intel_output->ddc_bus) | ||
291 | intel_i2c_destroy(intel_output->ddc_bus); | ||
292 | drm_sysfs_connector_remove(connector); | ||
293 | drm_connector_cleanup(connector); | ||
294 | kfree(intel_output); | ||
295 | } | ||
296 | |||
297 | #ifdef RANDR_GET_CRTC_INTERFACE | ||
298 | static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) | ||
299 | { | ||
300 | struct drm_device *dev = connector->dev; | ||
301 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
302 | struct intel_output *intel_output = to_intel_output(connector); | ||
303 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
304 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); | ||
305 | |||
306 | return intel_pipe_to_crtc(pScrn, pipe); | ||
307 | } | ||
308 | #endif | ||
309 | |||
310 | static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { | ||
311 | .dpms = intel_dvo_dpms, | ||
312 | .mode_fixup = intel_dvo_mode_fixup, | ||
313 | .prepare = intel_encoder_prepare, | ||
314 | .mode_set = intel_dvo_mode_set, | ||
315 | .commit = intel_encoder_commit, | ||
316 | }; | ||
317 | |||
318 | static const struct drm_connector_funcs intel_dvo_connector_funcs = { | ||
319 | .save = intel_dvo_save, | ||
320 | .restore = intel_dvo_restore, | ||
321 | .detect = intel_dvo_detect, | ||
322 | .destroy = intel_dvo_destroy, | ||
323 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
324 | }; | ||
325 | |||
326 | static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { | ||
327 | .mode_valid = intel_dvo_mode_valid, | ||
328 | .get_modes = intel_dvo_get_modes, | ||
329 | .best_encoder = intel_best_encoder, | ||
330 | }; | ||
331 | |||
332 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) | ||
333 | { | ||
334 | drm_encoder_cleanup(encoder); | ||
335 | } | ||
336 | |||
337 | static const struct drm_encoder_funcs intel_dvo_enc_funcs = { | ||
338 | .destroy = intel_dvo_enc_destroy, | ||
339 | }; | ||
340 | |||
341 | |||
342 | /** | ||
343 | * Attempts to get a fixed panel timing for LVDS (currently only the i830). | ||
344 | * | ||
345 | * Other chips with DVO LVDS will need to extend this to deal with the LVDS | ||
346 | * chip being on DVOB/C and having multiple pipes. | ||
347 | */ | ||
348 | static struct drm_display_mode * | ||
349 | intel_dvo_get_current_mode (struct drm_connector *connector) | ||
350 | { | ||
351 | struct drm_device *dev = connector->dev; | ||
352 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
353 | struct intel_output *intel_output = to_intel_output(connector); | ||
354 | struct intel_dvo_device *dvo = intel_output->dev_priv; | ||
355 | uint32_t dvo_reg = dvo->dvo_reg; | ||
356 | uint32_t dvo_val = I915_READ(dvo_reg); | ||
357 | struct drm_display_mode *mode = NULL; | ||
358 | |||
359 | /* If the DVO port is active, that'll be the LVDS, so we can pull out | ||
360 | * its timings to get how the BIOS set up the panel. | ||
361 | */ | ||
362 | if (dvo_val & DVO_ENABLE) { | ||
363 | struct drm_crtc *crtc; | ||
364 | int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; | ||
365 | |||
366 | crtc = intel_get_crtc_from_pipe(dev, pipe); | ||
367 | if (crtc) { | ||
368 | mode = intel_crtc_mode_get(dev, crtc); | ||
369 | |||
370 | if (mode) { | ||
371 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
372 | if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) | ||
373 | mode->flags |= DRM_MODE_FLAG_PHSYNC; | ||
374 | if (dvo_val & DVO_VSYNC_ACTIVE_HIGH) | ||
375 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | ||
376 | } | ||
377 | } | ||
378 | } | ||
379 | return mode; | ||
380 | } | ||
381 | |||
382 | void intel_dvo_init(struct drm_device *dev) | ||
383 | { | ||
384 | struct intel_output *intel_output; | ||
385 | struct intel_dvo_device *dvo; | ||
386 | struct intel_i2c_chan *i2cbus = NULL; | ||
387 | int ret = 0; | ||
388 | int i; | ||
389 | int gpio_inited = 0; | ||
390 | int encoder_type = DRM_MODE_ENCODER_NONE; | ||
391 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | ||
392 | if (!intel_output) | ||
393 | return; | ||
394 | |||
395 | /* Set up the DDC bus */ | ||
396 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | ||
397 | if (!intel_output->ddc_bus) | ||
398 | goto free_intel; | ||
399 | |||
400 | /* Now, try to find a controller */ | ||
401 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | ||
402 | struct drm_connector *connector = &intel_output->base; | ||
403 | int gpio; | ||
404 | |||
405 | dvo = &intel_dvo_devices[i]; | ||
406 | |||
407 | /* Allow the I2C driver info to specify the GPIO to be used in | ||
408 | * special cases, but otherwise default to what's defined | ||
409 | * in the spec. | ||
410 | */ | ||
411 | if (dvo->gpio != 0) | ||
412 | gpio = dvo->gpio; | ||
413 | else if (dvo->type == INTEL_DVO_CHIP_LVDS) | ||
414 | gpio = GPIOB; | ||
415 | else | ||
416 | gpio = GPIOE; | ||
417 | |||
418 | /* Set up the I2C bus necessary for the chip we're probing. | ||
419 | * It appears that everything is on GPIOE except for panels | ||
420 | * on i830 laptops, which are on GPIOB (DVOA). | ||
421 | */ | ||
422 | if (gpio_inited != gpio) { | ||
423 | if (i2cbus != NULL) | ||
424 | intel_i2c_destroy(i2cbus); | ||
425 | if (!(i2cbus = intel_i2c_create(dev, gpio, | ||
426 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { | ||
427 | continue; | ||
428 | } | ||
429 | gpio_inited = gpio; | ||
430 | } | ||
431 | |||
432 | if (dvo->dev_ops!= NULL) | ||
433 | ret = dvo->dev_ops->init(dvo, i2cbus); | ||
434 | else | ||
435 | ret = false; | ||
436 | |||
437 | if (!ret) | ||
438 | continue; | ||
439 | |||
440 | intel_output->type = INTEL_OUTPUT_DVO; | ||
441 | switch (dvo->type) { | ||
442 | case INTEL_DVO_CHIP_TMDS: | ||
443 | drm_connector_init(dev, connector, | ||
444 | &intel_dvo_connector_funcs, | ||
445 | DRM_MODE_CONNECTOR_DVII); | ||
446 | encoder_type = DRM_MODE_ENCODER_TMDS; | ||
447 | break; | ||
448 | case INTEL_DVO_CHIP_LVDS: | ||
449 | drm_connector_init(dev, connector, | ||
450 | &intel_dvo_connector_funcs, | ||
451 | DRM_MODE_CONNECTOR_LVDS); | ||
452 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
453 | break; | ||
454 | } | ||
455 | |||
456 | drm_connector_helper_add(connector, | ||
457 | &intel_dvo_connector_helper_funcs); | ||
458 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
459 | connector->interlace_allowed = false; | ||
460 | connector->doublescan_allowed = false; | ||
461 | |||
462 | intel_output->dev_priv = dvo; | ||
463 | intel_output->i2c_bus = i2cbus; | ||
464 | |||
465 | drm_encoder_init(dev, &intel_output->enc, | ||
466 | &intel_dvo_enc_funcs, encoder_type); | ||
467 | drm_encoder_helper_add(&intel_output->enc, | ||
468 | &intel_dvo_helper_funcs); | ||
469 | |||
470 | drm_mode_connector_attach_encoder(&intel_output->base, | ||
471 | &intel_output->enc); | ||
472 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | ||
473 | /* For our LVDS chipsets, we should hopefully be able | ||
474 | * to dig the fixed panel mode out of the BIOS data. | ||
475 | * However, it's in a different format from the BIOS | ||
476 | * data on chipsets with integrated LVDS (stored in AIM | ||
477 | * headers, likely), so for now, just get the current | ||
478 | * mode being output through DVO. | ||
479 | */ | ||
480 | dvo->panel_fixed_mode = | ||
481 | intel_dvo_get_current_mode(connector); | ||
482 | dvo->panel_wants_dither = true; | ||
483 | } | ||
484 | |||
485 | drm_sysfs_connector_add(connector); | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | intel_i2c_destroy(intel_output->ddc_bus); | ||
490 | /* Didn't find a chip, so tear down. */ | ||
491 | if (i2cbus != NULL) | ||
492 | intel_i2c_destroy(i2cbus); | ||
493 | free_intel: | ||
494 | kfree(intel_output); | ||
495 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c new file mode 100644 index 000000000000..afd1217b8a02 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -0,0 +1,925 @@ | |||
1 | /* | ||
2 | * Copyright © 2007 David Airlie | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * David Airlie | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/tty.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/sysrq.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/fb.h> | ||
37 | #include <linux/init.h> | ||
38 | |||
39 | #include "drmP.h" | ||
40 | #include "drm.h" | ||
41 | #include "drm_crtc.h" | ||
42 | #include "intel_drv.h" | ||
43 | #include "i915_drm.h" | ||
44 | #include "i915_drv.h" | ||
45 | |||
46 | struct intelfb_par { | ||
47 | struct drm_device *dev; | ||
48 | struct drm_display_mode *our_mode; | ||
49 | struct intel_framebuffer *intel_fb; | ||
50 | int crtc_count; | ||
51 | /* crtc currently bound to this */ | ||
52 | uint32_t crtc_ids[2]; | ||
53 | }; | ||
54 | |||
55 | static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green, | ||
56 | unsigned blue, unsigned transp, | ||
57 | struct fb_info *info) | ||
58 | { | ||
59 | struct intelfb_par *par = info->par; | ||
60 | struct drm_device *dev = par->dev; | ||
61 | struct drm_crtc *crtc; | ||
62 | int i; | ||
63 | |||
64 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
65 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
66 | struct drm_mode_set *modeset = &intel_crtc->mode_set; | ||
67 | struct drm_framebuffer *fb = modeset->fb; | ||
68 | |||
69 | for (i = 0; i < par->crtc_count; i++) | ||
70 | if (crtc->base.id == par->crtc_ids[i]) | ||
71 | break; | ||
72 | |||
73 | if (i == par->crtc_count) | ||
74 | continue; | ||
75 | |||
76 | |||
77 | if (regno > 255) | ||
78 | return 1; | ||
79 | |||
80 | if (fb->depth == 8) { | ||
81 | intel_crtc_fb_gamma_set(crtc, red, green, blue, regno); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | if (regno < 16) { | ||
86 | switch (fb->depth) { | ||
87 | case 15: | ||
88 | fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | | ||
89 | ((green & 0xf800) >> 6) | | ||
90 | ((blue & 0xf800) >> 11); | ||
91 | break; | ||
92 | case 16: | ||
93 | fb->pseudo_palette[regno] = (red & 0xf800) | | ||
94 | ((green & 0xfc00) >> 5) | | ||
95 | ((blue & 0xf800) >> 11); | ||
96 | break; | ||
97 | case 24: | ||
98 | case 32: | ||
99 | fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | | ||
100 | (green & 0xff00) | | ||
101 | ((blue & 0xff00) >> 8); | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | } | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static int intelfb_check_var(struct fb_var_screeninfo *var, | ||
110 | struct fb_info *info) | ||
111 | { | ||
112 | struct intelfb_par *par = info->par; | ||
113 | struct intel_framebuffer *intel_fb = par->intel_fb; | ||
114 | struct drm_framebuffer *fb = &intel_fb->base; | ||
115 | int depth; | ||
116 | |||
117 | if (var->pixclock == -1 || !var->pixclock) | ||
118 | return -EINVAL; | ||
119 | |||
120 | /* Need to resize the fb object !!! */ | ||
121 | if (var->xres > fb->width || var->yres > fb->height) { | ||
122 | DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height); | ||
123 | DRM_ERROR("Need resizing code.\n"); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | switch (var->bits_per_pixel) { | ||
128 | case 16: | ||
129 | depth = (var->green.length == 6) ? 16 : 15; | ||
130 | break; | ||
131 | case 32: | ||
132 | depth = (var->transp.length > 0) ? 32 : 24; | ||
133 | break; | ||
134 | default: | ||
135 | depth = var->bits_per_pixel; | ||
136 | break; | ||
137 | } | ||
138 | |||
139 | switch (depth) { | ||
140 | case 8: | ||
141 | var->red.offset = 0; | ||
142 | var->green.offset = 0; | ||
143 | var->blue.offset = 0; | ||
144 | var->red.length = 8; | ||
145 | var->green.length = 8; | ||
146 | var->blue.length = 8; | ||
147 | var->transp.length = 0; | ||
148 | var->transp.offset = 0; | ||
149 | break; | ||
150 | case 15: | ||
151 | var->red.offset = 10; | ||
152 | var->green.offset = 5; | ||
153 | var->blue.offset = 0; | ||
154 | var->red.length = 5; | ||
155 | var->green.length = 5; | ||
156 | var->blue.length = 5; | ||
157 | var->transp.length = 1; | ||
158 | var->transp.offset = 15; | ||
159 | break; | ||
160 | case 16: | ||
161 | var->red.offset = 11; | ||
162 | var->green.offset = 5; | ||
163 | var->blue.offset = 0; | ||
164 | var->red.length = 5; | ||
165 | var->green.length = 6; | ||
166 | var->blue.length = 5; | ||
167 | var->transp.length = 0; | ||
168 | var->transp.offset = 0; | ||
169 | break; | ||
170 | case 24: | ||
171 | var->red.offset = 16; | ||
172 | var->green.offset = 8; | ||
173 | var->blue.offset = 0; | ||
174 | var->red.length = 8; | ||
175 | var->green.length = 8; | ||
176 | var->blue.length = 8; | ||
177 | var->transp.length = 0; | ||
178 | var->transp.offset = 0; | ||
179 | break; | ||
180 | case 32: | ||
181 | var->red.offset = 16; | ||
182 | var->green.offset = 8; | ||
183 | var->blue.offset = 0; | ||
184 | var->red.length = 8; | ||
185 | var->green.length = 8; | ||
186 | var->blue.length = 8; | ||
187 | var->transp.length = 8; | ||
188 | var->transp.offset = 24; | ||
189 | break; | ||
190 | default: | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | /* this will let fbcon do the mode init */ | ||
198 | /* FIXME: take mode config lock? */ | ||
199 | static int intelfb_set_par(struct fb_info *info) | ||
200 | { | ||
201 | struct intelfb_par *par = info->par; | ||
202 | struct drm_device *dev = par->dev; | ||
203 | struct fb_var_screeninfo *var = &info->var; | ||
204 | int i; | ||
205 | |||
206 | DRM_DEBUG("%d %d\n", var->xres, var->pixclock); | ||
207 | |||
208 | if (var->pixclock != -1) { | ||
209 | |||
210 | DRM_ERROR("PIXEL CLCOK SET\n"); | ||
211 | return -EINVAL; | ||
212 | } else { | ||
213 | struct drm_crtc *crtc; | ||
214 | int ret; | ||
215 | |||
216 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
217 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
218 | |||
219 | for (i = 0; i < par->crtc_count; i++) | ||
220 | if (crtc->base.id == par->crtc_ids[i]) | ||
221 | break; | ||
222 | |||
223 | if (i == par->crtc_count) | ||
224 | continue; | ||
225 | |||
226 | if (crtc->fb == intel_crtc->mode_set.fb) { | ||
227 | mutex_lock(&dev->mode_config.mutex); | ||
228 | ret = crtc->funcs->set_config(&intel_crtc->mode_set); | ||
229 | mutex_unlock(&dev->mode_config.mutex); | ||
230 | if (ret) | ||
231 | return ret; | ||
232 | } | ||
233 | } | ||
234 | return 0; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static int intelfb_pan_display(struct fb_var_screeninfo *var, | ||
239 | struct fb_info *info) | ||
240 | { | ||
241 | struct intelfb_par *par = info->par; | ||
242 | struct drm_device *dev = par->dev; | ||
243 | struct drm_mode_set *modeset; | ||
244 | struct drm_crtc *crtc; | ||
245 | struct intel_crtc *intel_crtc; | ||
246 | int ret = 0; | ||
247 | int i; | ||
248 | |||
249 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
250 | for (i = 0; i < par->crtc_count; i++) | ||
251 | if (crtc->base.id == par->crtc_ids[i]) | ||
252 | break; | ||
253 | |||
254 | if (i == par->crtc_count) | ||
255 | continue; | ||
256 | |||
257 | intel_crtc = to_intel_crtc(crtc); | ||
258 | modeset = &intel_crtc->mode_set; | ||
259 | |||
260 | modeset->x = var->xoffset; | ||
261 | modeset->y = var->yoffset; | ||
262 | |||
263 | if (modeset->num_connectors) { | ||
264 | mutex_lock(&dev->mode_config.mutex); | ||
265 | ret = crtc->funcs->set_config(modeset); | ||
266 | mutex_unlock(&dev->mode_config.mutex); | ||
267 | if (!ret) { | ||
268 | info->var.xoffset = var->xoffset; | ||
269 | info->var.yoffset = var->yoffset; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | static void intelfb_on(struct fb_info *info) | ||
278 | { | ||
279 | struct intelfb_par *par = info->par; | ||
280 | struct drm_device *dev = par->dev; | ||
281 | struct drm_crtc *crtc; | ||
282 | struct drm_encoder *encoder; | ||
283 | int i; | ||
284 | |||
285 | /* | ||
286 | * For each CRTC in this fb, find all associated encoders | ||
287 | * and turn them off, then turn off the CRTC. | ||
288 | */ | ||
289 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
290 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
291 | |||
292 | for (i = 0; i < par->crtc_count; i++) | ||
293 | if (crtc->base.id == par->crtc_ids[i]) | ||
294 | break; | ||
295 | |||
296 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
297 | |||
298 | /* Found a CRTC on this fb, now find encoders */ | ||
299 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
300 | if (encoder->crtc == crtc) { | ||
301 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
302 | encoder_funcs = encoder->helper_private; | ||
303 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | } | ||
308 | |||
309 | static void intelfb_off(struct fb_info *info, int dpms_mode) | ||
310 | { | ||
311 | struct intelfb_par *par = info->par; | ||
312 | struct drm_device *dev = par->dev; | ||
313 | struct drm_crtc *crtc; | ||
314 | struct drm_encoder *encoder; | ||
315 | int i; | ||
316 | |||
317 | /* | ||
318 | * For each CRTC in this fb, find all associated encoders | ||
319 | * and turn them off, then turn off the CRTC. | ||
320 | */ | ||
321 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
322 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
323 | |||
324 | for (i = 0; i < par->crtc_count; i++) | ||
325 | if (crtc->base.id == par->crtc_ids[i]) | ||
326 | break; | ||
327 | |||
328 | /* Found a CRTC on this fb, now find encoders */ | ||
329 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
330 | if (encoder->crtc == crtc) { | ||
331 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
332 | encoder_funcs = encoder->helper_private; | ||
333 | encoder_funcs->dpms(encoder, dpms_mode); | ||
334 | } | ||
335 | } | ||
336 | if (dpms_mode == DRM_MODE_DPMS_OFF) | ||
337 | crtc_funcs->dpms(crtc, dpms_mode); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static int intelfb_blank(int blank, struct fb_info *info) | ||
342 | { | ||
343 | switch (blank) { | ||
344 | case FB_BLANK_UNBLANK: | ||
345 | intelfb_on(info); | ||
346 | break; | ||
347 | case FB_BLANK_NORMAL: | ||
348 | intelfb_off(info, DRM_MODE_DPMS_STANDBY); | ||
349 | break; | ||
350 | case FB_BLANK_HSYNC_SUSPEND: | ||
351 | intelfb_off(info, DRM_MODE_DPMS_STANDBY); | ||
352 | break; | ||
353 | case FB_BLANK_VSYNC_SUSPEND: | ||
354 | intelfb_off(info, DRM_MODE_DPMS_SUSPEND); | ||
355 | break; | ||
356 | case FB_BLANK_POWERDOWN: | ||
357 | intelfb_off(info, DRM_MODE_DPMS_OFF); | ||
358 | break; | ||
359 | } | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static struct fb_ops intelfb_ops = { | ||
364 | .owner = THIS_MODULE, | ||
365 | .fb_check_var = intelfb_check_var, | ||
366 | .fb_set_par = intelfb_set_par, | ||
367 | .fb_setcolreg = intelfb_setcolreg, | ||
368 | .fb_fillrect = cfb_fillrect, | ||
369 | .fb_copyarea = cfb_copyarea, | ||
370 | .fb_imageblit = cfb_imageblit, | ||
371 | .fb_pan_display = intelfb_pan_display, | ||
372 | .fb_blank = intelfb_blank, | ||
373 | }; | ||
374 | |||
375 | /** | ||
376 | * Curretly it is assumed that the old framebuffer is reused. | ||
377 | * | ||
378 | * LOCKING | ||
379 | * caller should hold the mode config lock. | ||
380 | * | ||
381 | */ | ||
382 | int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) | ||
383 | { | ||
384 | struct fb_info *info; | ||
385 | struct drm_framebuffer *fb; | ||
386 | struct drm_display_mode *mode = crtc->desired_mode; | ||
387 | |||
388 | fb = crtc->fb; | ||
389 | if (!fb) | ||
390 | return 1; | ||
391 | |||
392 | info = fb->fbdev; | ||
393 | if (!info) | ||
394 | return 1; | ||
395 | |||
396 | if (!mode) | ||
397 | return 1; | ||
398 | |||
399 | info->var.xres = mode->hdisplay; | ||
400 | info->var.right_margin = mode->hsync_start - mode->hdisplay; | ||
401 | info->var.hsync_len = mode->hsync_end - mode->hsync_start; | ||
402 | info->var.left_margin = mode->htotal - mode->hsync_end; | ||
403 | info->var.yres = mode->vdisplay; | ||
404 | info->var.lower_margin = mode->vsync_start - mode->vdisplay; | ||
405 | info->var.vsync_len = mode->vsync_end - mode->vsync_start; | ||
406 | info->var.upper_margin = mode->vtotal - mode->vsync_end; | ||
407 | info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; | ||
408 | /* avoid overflow */ | ||
409 | info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | EXPORT_SYMBOL(intelfb_resize); | ||
414 | |||
415 | static struct drm_mode_set kernelfb_mode; | ||
416 | |||
417 | static int intelfb_panic(struct notifier_block *n, unsigned long ununsed, | ||
418 | void *panic_str) | ||
419 | { | ||
420 | DRM_ERROR("panic occurred, switching back to text console\n"); | ||
421 | |||
422 | intelfb_restore(); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static struct notifier_block paniced = { | ||
427 | .notifier_call = intelfb_panic, | ||
428 | }; | ||
429 | |||
430 | static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | ||
431 | uint32_t fb_height, uint32_t surface_width, | ||
432 | uint32_t surface_height, | ||
433 | struct intel_framebuffer **intel_fb_p) | ||
434 | { | ||
435 | struct fb_info *info; | ||
436 | struct intelfb_par *par; | ||
437 | struct drm_framebuffer *fb; | ||
438 | struct intel_framebuffer *intel_fb; | ||
439 | struct drm_mode_fb_cmd mode_cmd; | ||
440 | struct drm_gem_object *fbo = NULL; | ||
441 | struct drm_i915_gem_object *obj_priv; | ||
442 | struct device *device = &dev->pdev->dev; | ||
443 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
444 | |||
445 | mode_cmd.width = surface_width; | ||
446 | mode_cmd.height = surface_height; | ||
447 | |||
448 | mode_cmd.bpp = 32; | ||
449 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); | ||
450 | mode_cmd.depth = 24; | ||
451 | |||
452 | size = mode_cmd.pitch * mode_cmd.height; | ||
453 | size = ALIGN(size, PAGE_SIZE); | ||
454 | fbo = drm_gem_object_alloc(dev, size); | ||
455 | if (!fbo) { | ||
456 | printk(KERN_ERR "failed to allocate framebuffer\n"); | ||
457 | ret = -ENOMEM; | ||
458 | goto out; | ||
459 | } | ||
460 | obj_priv = fbo->driver_private; | ||
461 | |||
462 | mutex_lock(&dev->struct_mutex); | ||
463 | |||
464 | ret = i915_gem_object_pin(fbo, PAGE_SIZE); | ||
465 | if (ret) { | ||
466 | DRM_ERROR("failed to pin fb: %d\n", ret); | ||
467 | goto out_unref; | ||
468 | } | ||
469 | |||
470 | /* Flush everything out, we'll be doing GTT only from now on */ | ||
471 | i915_gem_object_set_to_gtt_domain(fbo, 1); | ||
472 | |||
473 | ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); | ||
474 | if (ret) { | ||
475 | DRM_ERROR("failed to allocate fb.\n"); | ||
476 | goto out_unref; | ||
477 | } | ||
478 | |||
479 | list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); | ||
480 | |||
481 | intel_fb = to_intel_framebuffer(fb); | ||
482 | *intel_fb_p = intel_fb; | ||
483 | |||
484 | info = framebuffer_alloc(sizeof(struct intelfb_par), device); | ||
485 | if (!info) { | ||
486 | ret = -ENOMEM; | ||
487 | goto out_unref; | ||
488 | } | ||
489 | |||
490 | par = info->par; | ||
491 | |||
492 | strcpy(info->fix.id, "inteldrmfb"); | ||
493 | info->fix.type = FB_TYPE_PACKED_PIXELS; | ||
494 | info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
495 | info->fix.type_aux = 0; | ||
496 | info->fix.xpanstep = 1; /* doing it in hw */ | ||
497 | info->fix.ypanstep = 1; /* doing it in hw */ | ||
498 | info->fix.ywrapstep = 0; | ||
499 | info->fix.accel = FB_ACCEL_I830; | ||
500 | info->fix.type_aux = 0; | ||
501 | |||
502 | info->flags = FBINFO_DEFAULT; | ||
503 | |||
504 | info->fbops = &intelfb_ops; | ||
505 | |||
506 | info->fix.line_length = fb->pitch; | ||
507 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; | ||
508 | info->fix.smem_len = size; | ||
509 | |||
510 | info->flags = FBINFO_DEFAULT; | ||
511 | |||
512 | info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, | ||
513 | size); | ||
514 | if (!info->screen_base) { | ||
515 | ret = -ENOSPC; | ||
516 | goto out_unref; | ||
517 | } | ||
518 | info->screen_size = size; | ||
519 | |||
520 | // memset(info->screen_base, 0, size); | ||
521 | |||
522 | info->pseudo_palette = fb->pseudo_palette; | ||
523 | info->var.xres_virtual = fb->width; | ||
524 | info->var.yres_virtual = fb->height; | ||
525 | info->var.bits_per_pixel = fb->bits_per_pixel; | ||
526 | info->var.xoffset = 0; | ||
527 | info->var.yoffset = 0; | ||
528 | info->var.activate = FB_ACTIVATE_NOW; | ||
529 | info->var.height = -1; | ||
530 | info->var.width = -1; | ||
531 | |||
532 | info->var.xres = fb_width; | ||
533 | info->var.yres = fb_height; | ||
534 | |||
535 | /* FIXME: we really shouldn't expose mmio space at all */ | ||
536 | info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); | ||
537 | info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar); | ||
538 | |||
539 | info->pixmap.size = 64*1024; | ||
540 | info->pixmap.buf_align = 8; | ||
541 | info->pixmap.access_align = 32; | ||
542 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
543 | info->pixmap.scan_align = 1; | ||
544 | |||
545 | switch(fb->depth) { | ||
546 | case 8: | ||
547 | info->var.red.offset = 0; | ||
548 | info->var.green.offset = 0; | ||
549 | info->var.blue.offset = 0; | ||
550 | info->var.red.length = 8; /* 8bit DAC */ | ||
551 | info->var.green.length = 8; | ||
552 | info->var.blue.length = 8; | ||
553 | info->var.transp.offset = 0; | ||
554 | info->var.transp.length = 0; | ||
555 | break; | ||
556 | case 15: | ||
557 | info->var.red.offset = 10; | ||
558 | info->var.green.offset = 5; | ||
559 | info->var.blue.offset = 0; | ||
560 | info->var.red.length = 5; | ||
561 | info->var.green.length = 5; | ||
562 | info->var.blue.length = 5; | ||
563 | info->var.transp.offset = 15; | ||
564 | info->var.transp.length = 1; | ||
565 | break; | ||
566 | case 16: | ||
567 | info->var.red.offset = 11; | ||
568 | info->var.green.offset = 5; | ||
569 | info->var.blue.offset = 0; | ||
570 | info->var.red.length = 5; | ||
571 | info->var.green.length = 6; | ||
572 | info->var.blue.length = 5; | ||
573 | info->var.transp.offset = 0; | ||
574 | break; | ||
575 | case 24: | ||
576 | info->var.red.offset = 16; | ||
577 | info->var.green.offset = 8; | ||
578 | info->var.blue.offset = 0; | ||
579 | info->var.red.length = 8; | ||
580 | info->var.green.length = 8; | ||
581 | info->var.blue.length = 8; | ||
582 | info->var.transp.offset = 0; | ||
583 | info->var.transp.length = 0; | ||
584 | break; | ||
585 | case 32: | ||
586 | info->var.red.offset = 16; | ||
587 | info->var.green.offset = 8; | ||
588 | info->var.blue.offset = 0; | ||
589 | info->var.red.length = 8; | ||
590 | info->var.green.length = 8; | ||
591 | info->var.blue.length = 8; | ||
592 | info->var.transp.offset = 24; | ||
593 | info->var.transp.length = 8; | ||
594 | break; | ||
595 | default: | ||
596 | break; | ||
597 | } | ||
598 | |||
599 | fb->fbdev = info; | ||
600 | |||
601 | par->intel_fb = intel_fb; | ||
602 | par->dev = dev; | ||
603 | |||
604 | /* To allow resizeing without swapping buffers */ | ||
605 | printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, | ||
606 | intel_fb->base.height, obj_priv->gtt_offset, fbo); | ||
607 | |||
608 | mutex_unlock(&dev->struct_mutex); | ||
609 | return 0; | ||
610 | |||
611 | out_unref: | ||
612 | drm_gem_object_unreference(fbo); | ||
613 | mutex_unlock(&dev->struct_mutex); | ||
614 | out: | ||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc) | ||
619 | { | ||
620 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
621 | struct intel_framebuffer *intel_fb; | ||
622 | struct drm_framebuffer *fb; | ||
623 | struct drm_connector *connector; | ||
624 | struct fb_info *info; | ||
625 | struct intelfb_par *par; | ||
626 | struct drm_mode_set *modeset; | ||
627 | unsigned int width, height; | ||
628 | int new_fb = 0; | ||
629 | int ret, i, conn_count; | ||
630 | |||
631 | if (!drm_helper_crtc_in_use(crtc)) | ||
632 | return 0; | ||
633 | |||
634 | if (!crtc->desired_mode) | ||
635 | return 0; | ||
636 | |||
637 | width = crtc->desired_mode->hdisplay; | ||
638 | height = crtc->desired_mode->vdisplay; | ||
639 | |||
640 | /* is there an fb bound to this crtc already */ | ||
641 | if (!intel_crtc->mode_set.fb) { | ||
642 | ret = intelfb_create(dev, width, height, width, height, &intel_fb); | ||
643 | if (ret) | ||
644 | return -EINVAL; | ||
645 | new_fb = 1; | ||
646 | } else { | ||
647 | fb = intel_crtc->mode_set.fb; | ||
648 | intel_fb = to_intel_framebuffer(fb); | ||
649 | if ((intel_fb->base.width < width) || (intel_fb->base.height < height)) | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | |||
653 | info = intel_fb->base.fbdev; | ||
654 | par = info->par; | ||
655 | |||
656 | modeset = &intel_crtc->mode_set; | ||
657 | modeset->fb = &intel_fb->base; | ||
658 | conn_count = 0; | ||
659 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
660 | if (connector->encoder) | ||
661 | if (connector->encoder->crtc == modeset->crtc) { | ||
662 | modeset->connectors[conn_count] = connector; | ||
663 | conn_count++; | ||
664 | if (conn_count > INTELFB_CONN_LIMIT) | ||
665 | BUG(); | ||
666 | } | ||
667 | } | ||
668 | |||
669 | for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) | ||
670 | modeset->connectors[i] = NULL; | ||
671 | |||
672 | par->crtc_ids[0] = crtc->base.id; | ||
673 | |||
674 | modeset->num_connectors = conn_count; | ||
675 | if (modeset->mode != modeset->crtc->desired_mode) | ||
676 | modeset->mode = modeset->crtc->desired_mode; | ||
677 | |||
678 | par->crtc_count = 1; | ||
679 | |||
680 | if (new_fb) { | ||
681 | info->var.pixclock = -1; | ||
682 | if (register_framebuffer(info) < 0) | ||
683 | return -EINVAL; | ||
684 | } else | ||
685 | intelfb_set_par(info); | ||
686 | |||
687 | printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, | ||
688 | info->fix.id); | ||
689 | |||
690 | /* Switch back to kernel console on panic */ | ||
691 | kernelfb_mode = *modeset; | ||
692 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | ||
693 | printk(KERN_INFO "registered panic notifier\n"); | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static int intelfb_multi_fb_probe(struct drm_device *dev) | ||
699 | { | ||
700 | |||
701 | struct drm_crtc *crtc; | ||
702 | int ret = 0; | ||
703 | |||
704 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
705 | ret = intelfb_multi_fb_probe_crtc(dev, crtc); | ||
706 | if (ret) | ||
707 | return ret; | ||
708 | } | ||
709 | return ret; | ||
710 | } | ||
711 | |||
712 | static int intelfb_single_fb_probe(struct drm_device *dev) | ||
713 | { | ||
714 | struct drm_crtc *crtc; | ||
715 | struct drm_connector *connector; | ||
716 | unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; | ||
717 | unsigned int surface_width = 0, surface_height = 0; | ||
718 | int new_fb = 0; | ||
719 | int crtc_count = 0; | ||
720 | int ret, i, conn_count = 0; | ||
721 | struct intel_framebuffer *intel_fb; | ||
722 | struct fb_info *info; | ||
723 | struct intelfb_par *par; | ||
724 | struct drm_mode_set *modeset = NULL; | ||
725 | |||
726 | DRM_DEBUG("\n"); | ||
727 | |||
728 | /* Get a count of crtcs now in use and new min/maxes width/heights */ | ||
729 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
730 | if (!drm_helper_crtc_in_use(crtc)) | ||
731 | continue; | ||
732 | |||
733 | crtc_count++; | ||
734 | if (!crtc->desired_mode) | ||
735 | continue; | ||
736 | |||
737 | /* Smallest mode determines console size... */ | ||
738 | if (crtc->desired_mode->hdisplay < fb_width) | ||
739 | fb_width = crtc->desired_mode->hdisplay; | ||
740 | |||
741 | if (crtc->desired_mode->vdisplay < fb_height) | ||
742 | fb_height = crtc->desired_mode->vdisplay; | ||
743 | |||
744 | /* ... but largest for memory allocation dimensions */ | ||
745 | if (crtc->desired_mode->hdisplay > surface_width) | ||
746 | surface_width = crtc->desired_mode->hdisplay; | ||
747 | |||
748 | if (crtc->desired_mode->vdisplay > surface_height) | ||
749 | surface_height = crtc->desired_mode->vdisplay; | ||
750 | } | ||
751 | |||
752 | if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { | ||
753 | /* hmm everyone went away - assume VGA cable just fell out | ||
754 | and will come back later. */ | ||
755 | DRM_DEBUG("no CRTCs available?\n"); | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | //fail | ||
760 | /* Find the fb for our new config */ | ||
761 | if (list_empty(&dev->mode_config.fb_kernel_list)) { | ||
762 | DRM_DEBUG("creating new fb (console size %dx%d, " | ||
763 | "buffer size %dx%d)\n", fb_width, fb_height, | ||
764 | surface_width, surface_height); | ||
765 | ret = intelfb_create(dev, fb_width, fb_height, surface_width, | ||
766 | surface_height, &intel_fb); | ||
767 | if (ret) | ||
768 | return -EINVAL; | ||
769 | new_fb = 1; | ||
770 | } else { | ||
771 | struct drm_framebuffer *fb; | ||
772 | |||
773 | fb = list_first_entry(&dev->mode_config.fb_kernel_list, | ||
774 | struct drm_framebuffer, filp_head); | ||
775 | intel_fb = to_intel_framebuffer(fb); | ||
776 | |||
777 | /* if someone hotplugs something bigger than we have already | ||
778 | * allocated, we are pwned. As really we can't resize an | ||
779 | * fbdev that is in the wild currently due to fbdev not really | ||
780 | * being designed for the lower layers moving stuff around | ||
781 | * under it. | ||
782 | * - so in the grand style of things - punt. | ||
783 | */ | ||
784 | if ((fb->width < surface_width) || | ||
785 | (fb->height < surface_height)) { | ||
786 | DRM_ERROR("fb not large enough for console\n"); | ||
787 | return -EINVAL; | ||
788 | } | ||
789 | } | ||
790 | // fail | ||
791 | |||
792 | info = intel_fb->base.fbdev; | ||
793 | par = info->par; | ||
794 | |||
795 | crtc_count = 0; | ||
796 | /* | ||
797 | * For each CRTC, set up the connector list for the CRTC's mode | ||
798 | * set configuration. | ||
799 | */ | ||
800 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
801 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
802 | |||
803 | modeset = &intel_crtc->mode_set; | ||
804 | modeset->fb = &intel_fb->base; | ||
805 | conn_count = 0; | ||
806 | list_for_each_entry(connector, &dev->mode_config.connector_list, | ||
807 | head) { | ||
808 | if (!connector->encoder) | ||
809 | continue; | ||
810 | |||
811 | if(connector->encoder->crtc == modeset->crtc) { | ||
812 | modeset->connectors[conn_count++] = connector; | ||
813 | if (conn_count > INTELFB_CONN_LIMIT) | ||
814 | BUG(); | ||
815 | } | ||
816 | } | ||
817 | |||
818 | /* Zero out remaining connector pointers */ | ||
819 | for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) | ||
820 | modeset->connectors[i] = NULL; | ||
821 | |||
822 | par->crtc_ids[crtc_count++] = crtc->base.id; | ||
823 | |||
824 | modeset->num_connectors = conn_count; | ||
825 | if (modeset->mode != modeset->crtc->desired_mode) | ||
826 | modeset->mode = modeset->crtc->desired_mode; | ||
827 | } | ||
828 | par->crtc_count = crtc_count; | ||
829 | |||
830 | if (new_fb) { | ||
831 | info->var.pixclock = -1; | ||
832 | if (register_framebuffer(info) < 0) | ||
833 | return -EINVAL; | ||
834 | } else | ||
835 | intelfb_set_par(info); | ||
836 | |||
837 | printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, | ||
838 | info->fix.id); | ||
839 | |||
840 | /* Switch back to kernel console on panic */ | ||
841 | kernelfb_mode = *modeset; | ||
842 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | ||
843 | printk(KERN_INFO "registered panic notifier\n"); | ||
844 | |||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | /** | ||
849 | * intelfb_restore - restore the framebuffer console (kernel) config | ||
850 | * | ||
851 | * Restore's the kernel's fbcon mode, used for lastclose & panic paths. | ||
852 | */ | ||
853 | void intelfb_restore(void) | ||
854 | { | ||
855 | drm_crtc_helper_set_config(&kernelfb_mode); | ||
856 | } | ||
857 | |||
858 | static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) | ||
859 | { | ||
860 | intelfb_restore(); | ||
861 | } | ||
862 | |||
863 | static struct sysrq_key_op sysrq_intelfb_restore_op = { | ||
864 | .handler = intelfb_sysrq, | ||
865 | .help_msg = "force fb", | ||
866 | .action_msg = "force restore of fb console", | ||
867 | }; | ||
868 | |||
869 | int intelfb_probe(struct drm_device *dev) | ||
870 | { | ||
871 | int ret; | ||
872 | |||
873 | DRM_DEBUG("\n"); | ||
874 | |||
875 | /* something has changed in the lower levels of hell - deal with it | ||
876 | here */ | ||
877 | |||
878 | /* two modes : a) 1 fb to rule all crtcs. | ||
879 | b) one fb per crtc. | ||
880 | two actions 1) new connected device | ||
881 | 2) device removed. | ||
882 | case a/1 : if the fb surface isn't big enough - resize the surface fb. | ||
883 | if the fb size isn't big enough - resize fb into surface. | ||
884 | if everything big enough configure the new crtc/etc. | ||
885 | case a/2 : undo the configuration | ||
886 | possibly resize down the fb to fit the new configuration. | ||
887 | case b/1 : see if it is on a new crtc - setup a new fb and add it. | ||
888 | case b/2 : teardown the new fb. | ||
889 | */ | ||
890 | |||
891 | /* mode a first */ | ||
892 | /* search for an fb */ | ||
893 | if (i915_fbpercrtc == 1) { | ||
894 | ret = intelfb_multi_fb_probe(dev); | ||
895 | } else { | ||
896 | ret = intelfb_single_fb_probe(dev); | ||
897 | } | ||
898 | |||
899 | register_sysrq_key('g', &sysrq_intelfb_restore_op); | ||
900 | |||
901 | return ret; | ||
902 | } | ||
903 | EXPORT_SYMBOL(intelfb_probe); | ||
904 | |||
905 | int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | ||
906 | { | ||
907 | struct fb_info *info; | ||
908 | |||
909 | if (!fb) | ||
910 | return -EINVAL; | ||
911 | |||
912 | info = fb->fbdev; | ||
913 | |||
914 | if (info) { | ||
915 | unregister_framebuffer(info); | ||
916 | iounmap(info->screen_base); | ||
917 | framebuffer_release(info); | ||
918 | } | ||
919 | |||
920 | atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); | ||
921 | memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set)); | ||
922 | return 0; | ||
923 | } | ||
924 | EXPORT_SYMBOL(intelfb_remove); | ||
925 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c new file mode 100644 index 000000000000..b06a4a3ff08d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * Copyright 2006 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright © 2006-2009 Intel Corporation | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Eric Anholt <eric@anholt.net> | ||
26 | * Jesse Barnes <jesse.barnes@intel.com> | ||
27 | */ | ||
28 | |||
29 | #include <linux/i2c.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include "drmP.h" | ||
32 | #include "drm.h" | ||
33 | #include "drm_crtc.h" | ||
34 | #include "intel_drv.h" | ||
35 | #include "i915_drm.h" | ||
36 | #include "i915_drv.h" | ||
37 | |||
38 | struct intel_hdmi_priv { | ||
39 | u32 sdvox_reg; | ||
40 | u32 save_SDVOX; | ||
41 | int has_hdmi_sink; | ||
42 | }; | ||
43 | |||
44 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | ||
45 | struct drm_display_mode *mode, | ||
46 | struct drm_display_mode *adjusted_mode) | ||
47 | { | ||
48 | struct drm_device *dev = encoder->dev; | ||
49 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
50 | struct drm_crtc *crtc = encoder->crtc; | ||
51 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
52 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
53 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
54 | u32 sdvox; | ||
55 | |||
56 | sdvox = SDVO_ENCODING_HDMI | | ||
57 | SDVO_BORDER_ENABLE | | ||
58 | SDVO_VSYNC_ACTIVE_HIGH | | ||
59 | SDVO_HSYNC_ACTIVE_HIGH; | ||
60 | |||
61 | if (hdmi_priv->has_hdmi_sink) | ||
62 | sdvox |= SDVO_AUDIO_ENABLE; | ||
63 | |||
64 | if (intel_crtc->pipe == 1) | ||
65 | sdvox |= SDVO_PIPE_B_SELECT; | ||
66 | |||
67 | I915_WRITE(hdmi_priv->sdvox_reg, sdvox); | ||
68 | POSTING_READ(hdmi_priv->sdvox_reg); | ||
69 | } | ||
70 | |||
71 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | ||
72 | { | ||
73 | struct drm_device *dev = encoder->dev; | ||
74 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
75 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
76 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
77 | u32 temp; | ||
78 | |||
79 | if (mode != DRM_MODE_DPMS_ON) { | ||
80 | temp = I915_READ(hdmi_priv->sdvox_reg); | ||
81 | I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); | ||
82 | } else { | ||
83 | temp = I915_READ(hdmi_priv->sdvox_reg); | ||
84 | I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE); | ||
85 | } | ||
86 | POSTING_READ(hdmi_priv->sdvox_reg); | ||
87 | } | ||
88 | |||
89 | static void intel_hdmi_save(struct drm_connector *connector) | ||
90 | { | ||
91 | struct drm_device *dev = connector->dev; | ||
92 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
93 | struct intel_output *intel_output = to_intel_output(connector); | ||
94 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
95 | |||
96 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); | ||
97 | } | ||
98 | |||
99 | static void intel_hdmi_restore(struct drm_connector *connector) | ||
100 | { | ||
101 | struct drm_device *dev = connector->dev; | ||
102 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
103 | struct intel_output *intel_output = to_intel_output(connector); | ||
104 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
105 | |||
106 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); | ||
107 | POSTING_READ(hdmi_priv->sdvox_reg); | ||
108 | } | ||
109 | |||
110 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | ||
111 | struct drm_display_mode *mode) | ||
112 | { | ||
113 | if (mode->clock > 165000) | ||
114 | return MODE_CLOCK_HIGH; | ||
115 | if (mode->clock < 20000) | ||
116 | return MODE_CLOCK_HIGH; | ||
117 | |||
118 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
119 | return MODE_NO_DBLESCAN; | ||
120 | |||
121 | return MODE_OK; | ||
122 | } | ||
123 | |||
124 | static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | ||
125 | struct drm_display_mode *mode, | ||
126 | struct drm_display_mode *adjusted_mode) | ||
127 | { | ||
128 | return true; | ||
129 | } | ||
130 | |||
131 | static enum drm_connector_status | ||
132 | intel_hdmi_detect(struct drm_connector *connector) | ||
133 | { | ||
134 | struct drm_device *dev = connector->dev; | ||
135 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
136 | struct intel_output *intel_output = to_intel_output(connector); | ||
137 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
138 | u32 temp, bit; | ||
139 | |||
140 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
141 | |||
142 | I915_WRITE(PORT_HOTPLUG_EN, | ||
143 | temp | | ||
144 | HDMIB_HOTPLUG_INT_EN | | ||
145 | HDMIC_HOTPLUG_INT_EN | | ||
146 | HDMID_HOTPLUG_INT_EN); | ||
147 | |||
148 | POSTING_READ(PORT_HOTPLUG_EN); | ||
149 | |||
150 | switch (hdmi_priv->sdvox_reg) { | ||
151 | case SDVOB: | ||
152 | bit = HDMIB_HOTPLUG_INT_STATUS; | ||
153 | break; | ||
154 | case SDVOC: | ||
155 | bit = HDMIC_HOTPLUG_INT_STATUS; | ||
156 | break; | ||
157 | default: | ||
158 | return connector_status_unknown; | ||
159 | } | ||
160 | |||
161 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) | ||
162 | return connector_status_connected; | ||
163 | else | ||
164 | return connector_status_disconnected; | ||
165 | } | ||
166 | |||
167 | static int intel_hdmi_get_modes(struct drm_connector *connector) | ||
168 | { | ||
169 | struct intel_output *intel_output = to_intel_output(connector); | ||
170 | |||
171 | /* We should parse the EDID data and find out if it's an HDMI sink so | ||
172 | * we can send audio to it. | ||
173 | */ | ||
174 | |||
175 | return intel_ddc_get_modes(intel_output); | ||
176 | } | ||
177 | |||
178 | static void intel_hdmi_destroy(struct drm_connector *connector) | ||
179 | { | ||
180 | struct intel_output *intel_output = to_intel_output(connector); | ||
181 | |||
182 | if (intel_output->i2c_bus) | ||
183 | intel_i2c_destroy(intel_output->i2c_bus); | ||
184 | drm_sysfs_connector_remove(connector); | ||
185 | drm_connector_cleanup(connector); | ||
186 | kfree(intel_output); | ||
187 | } | ||
188 | |||
189 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | ||
190 | .dpms = intel_hdmi_dpms, | ||
191 | .mode_fixup = intel_hdmi_mode_fixup, | ||
192 | .prepare = intel_encoder_prepare, | ||
193 | .mode_set = intel_hdmi_mode_set, | ||
194 | .commit = intel_encoder_commit, | ||
195 | }; | ||
196 | |||
197 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | ||
198 | .save = intel_hdmi_save, | ||
199 | .restore = intel_hdmi_restore, | ||
200 | .detect = intel_hdmi_detect, | ||
201 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
202 | .destroy = intel_hdmi_destroy, | ||
203 | }; | ||
204 | |||
205 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { | ||
206 | .get_modes = intel_hdmi_get_modes, | ||
207 | .mode_valid = intel_hdmi_mode_valid, | ||
208 | .best_encoder = intel_best_encoder, | ||
209 | }; | ||
210 | |||
211 | static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) | ||
212 | { | ||
213 | drm_encoder_cleanup(encoder); | ||
214 | } | ||
215 | |||
216 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | ||
217 | .destroy = intel_hdmi_enc_destroy, | ||
218 | }; | ||
219 | |||
220 | |||
221 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | ||
222 | { | ||
223 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
224 | struct drm_connector *connector; | ||
225 | struct intel_output *intel_output; | ||
226 | struct intel_hdmi_priv *hdmi_priv; | ||
227 | |||
228 | intel_output = kcalloc(sizeof(struct intel_output) + | ||
229 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | ||
230 | if (!intel_output) | ||
231 | return; | ||
232 | hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); | ||
233 | |||
234 | connector = &intel_output->base; | ||
235 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | ||
236 | DRM_MODE_CONNECTOR_DVID); | ||
237 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | ||
238 | |||
239 | intel_output->type = INTEL_OUTPUT_HDMI; | ||
240 | |||
241 | connector->interlace_allowed = 0; | ||
242 | connector->doublescan_allowed = 0; | ||
243 | |||
244 | /* Set up the DDC bus. */ | ||
245 | if (sdvox_reg == SDVOB) | ||
246 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | ||
247 | else | ||
248 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | ||
249 | |||
250 | if (!intel_output->ddc_bus) | ||
251 | goto err_connector; | ||
252 | |||
253 | hdmi_priv->sdvox_reg = sdvox_reg; | ||
254 | intel_output->dev_priv = hdmi_priv; | ||
255 | |||
256 | drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, | ||
257 | DRM_MODE_ENCODER_TMDS); | ||
258 | drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); | ||
259 | |||
260 | drm_mode_connector_attach_encoder(&intel_output->base, | ||
261 | &intel_output->enc); | ||
262 | drm_sysfs_connector_add(connector); | ||
263 | |||
264 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | ||
265 | * 0xd. Failure to do so will result in spurious interrupts being | ||
266 | * generated on the port when a cable is not attached. | ||
267 | */ | ||
268 | if (IS_G4X(dev) && !IS_GM45(dev)) { | ||
269 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | ||
270 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | ||
271 | } | ||
272 | |||
273 | return; | ||
274 | |||
275 | err_connector: | ||
276 | drm_connector_cleanup(connector); | ||
277 | kfree(intel_output); | ||
278 | |||
279 | return; | ||
280 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c new file mode 100644 index 000000000000..5ee9d4c25753 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright © 2006-2008 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Authors: | ||
26 | * Eric Anholt <eric@anholt.net> | ||
27 | */ | ||
28 | #include <linux/i2c.h> | ||
29 | #include <linux/i2c-id.h> | ||
30 | #include <linux/i2c-algo-bit.h> | ||
31 | #include "drmP.h" | ||
32 | #include "drm.h" | ||
33 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | ||
35 | #include "i915_drv.h" | ||
36 | |||
37 | /* | ||
38 | * Intel GPIO access functions | ||
39 | */ | ||
40 | |||
41 | #define I2C_RISEFALL_TIME 20 | ||
42 | |||
43 | static int get_clock(void *data) | ||
44 | { | ||
45 | struct intel_i2c_chan *chan = data; | ||
46 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | ||
47 | u32 val; | ||
48 | |||
49 | val = I915_READ(chan->reg); | ||
50 | return ((val & GPIO_CLOCK_VAL_IN) != 0); | ||
51 | } | ||
52 | |||
53 | static int get_data(void *data) | ||
54 | { | ||
55 | struct intel_i2c_chan *chan = data; | ||
56 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | ||
57 | u32 val; | ||
58 | |||
59 | val = I915_READ(chan->reg); | ||
60 | return ((val & GPIO_DATA_VAL_IN) != 0); | ||
61 | } | ||
62 | |||
63 | static void set_clock(void *data, int state_high) | ||
64 | { | ||
65 | struct intel_i2c_chan *chan = data; | ||
66 | struct drm_device *dev = chan->drm_dev; | ||
67 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | ||
68 | u32 reserved = 0, clock_bits; | ||
69 | |||
70 | /* On most chips, these bits must be preserved in software. */ | ||
71 | if (!IS_I830(dev) && !IS_845G(dev)) | ||
72 | reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | | ||
73 | GPIO_CLOCK_PULLUP_DISABLE); | ||
74 | |||
75 | if (state_high) | ||
76 | clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; | ||
77 | else | ||
78 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | | ||
79 | GPIO_CLOCK_VAL_MASK; | ||
80 | I915_WRITE(chan->reg, reserved | clock_bits); | ||
81 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | ||
82 | } | ||
83 | |||
84 | static void set_data(void *data, int state_high) | ||
85 | { | ||
86 | struct intel_i2c_chan *chan = data; | ||
87 | struct drm_device *dev = chan->drm_dev; | ||
88 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | ||
89 | u32 reserved = 0, data_bits; | ||
90 | |||
91 | /* On most chips, these bits must be preserved in software. */ | ||
92 | if (!IS_I830(dev) && !IS_845G(dev)) | ||
93 | reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | | ||
94 | GPIO_CLOCK_PULLUP_DISABLE); | ||
95 | |||
96 | if (state_high) | ||
97 | data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; | ||
98 | else | ||
99 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | | ||
100 | GPIO_DATA_VAL_MASK; | ||
101 | |||
102 | I915_WRITE(chan->reg, reserved | data_bits); | ||
103 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg | ||
108 | * @dev: DRM device | ||
109 | * @output: driver specific output device | ||
110 | * @reg: GPIO reg to use | ||
111 | * @name: name for this bus | ||
112 | * | ||
113 | * Creates and registers a new i2c bus with the Linux i2c layer, for use | ||
114 | * in output probing and control (e.g. DDC or SDVO control functions). | ||
115 | * | ||
116 | * Possible values for @reg include: | ||
117 | * %GPIOA | ||
118 | * %GPIOB | ||
119 | * %GPIOC | ||
120 | * %GPIOD | ||
121 | * %GPIOE | ||
122 | * %GPIOF | ||
123 | * %GPIOG | ||
124 | * %GPIOH | ||
125 | * see PRM for details on how these different busses are used. | ||
126 | */ | ||
127 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | ||
128 | const char *name) | ||
129 | { | ||
130 | struct intel_i2c_chan *chan; | ||
131 | |||
132 | chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); | ||
133 | if (!chan) | ||
134 | goto out_free; | ||
135 | |||
136 | chan->drm_dev = dev; | ||
137 | chan->reg = reg; | ||
138 | snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); | ||
139 | chan->adapter.owner = THIS_MODULE; | ||
140 | chan->adapter.algo_data = &chan->algo; | ||
141 | chan->adapter.dev.parent = &dev->pdev->dev; | ||
142 | chan->algo.setsda = set_data; | ||
143 | chan->algo.setscl = set_clock; | ||
144 | chan->algo.getsda = get_data; | ||
145 | chan->algo.getscl = get_clock; | ||
146 | chan->algo.udelay = 20; | ||
147 | chan->algo.timeout = usecs_to_jiffies(2200); | ||
148 | chan->algo.data = chan; | ||
149 | |||
150 | i2c_set_adapdata(&chan->adapter, chan); | ||
151 | |||
152 | if(i2c_bit_add_bus(&chan->adapter)) | ||
153 | goto out_free; | ||
154 | |||
155 | /* JJJ: raise SCL and SDA? */ | ||
156 | set_data(chan, 1); | ||
157 | set_clock(chan, 1); | ||
158 | udelay(20); | ||
159 | |||
160 | return chan; | ||
161 | |||
162 | out_free: | ||
163 | kfree(chan); | ||
164 | return NULL; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * intel_i2c_destroy - unregister and free i2c bus resources | ||
169 | * @output: channel to free | ||
170 | * | ||
171 | * Unregister the adapter from the i2c layer, then free the structure. | ||
172 | */ | ||
173 | void intel_i2c_destroy(struct intel_i2c_chan *chan) | ||
174 | { | ||
175 | if (!chan) | ||
176 | return; | ||
177 | |||
178 | i2c_del_adapter(&chan->adapter); | ||
179 | kfree(chan); | ||
180 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c new file mode 100644 index 000000000000..6d4f91265354 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -0,0 +1,522 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2007 Intel Corporation | ||
3 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Eric Anholt <eric@anholt.net> | ||
26 | * Dave Airlie <airlied@linux.ie> | ||
27 | * Jesse Barnes <jesse.barnes@intel.com> | ||
28 | */ | ||
29 | |||
30 | #include <linux/dmi.h> | ||
31 | #include <linux/i2c.h> | ||
32 | #include "drmP.h" | ||
33 | #include "drm.h" | ||
34 | #include "drm_crtc.h" | ||
35 | #include "drm_edid.h" | ||
36 | #include "intel_drv.h" | ||
37 | #include "i915_drm.h" | ||
38 | #include "i915_drv.h" | ||
39 | |||
40 | /** | ||
41 | * Sets the backlight level. | ||
42 | * | ||
43 | * \param level backlight level, from 0 to intel_lvds_get_max_backlight(). | ||
44 | */ | ||
45 | static void intel_lvds_set_backlight(struct drm_device *dev, int level) | ||
46 | { | ||
47 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
48 | u32 blc_pwm_ctl; | ||
49 | |||
50 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
51 | I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | | ||
52 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * Returns the maximum level of the backlight duty cycle field. | ||
57 | */ | ||
58 | static u32 intel_lvds_get_max_backlight(struct drm_device *dev) | ||
59 | { | ||
60 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
61 | |||
62 | return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
63 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * Sets the power state for the panel. | ||
68 | */ | ||
69 | static void intel_lvds_set_power(struct drm_device *dev, bool on) | ||
70 | { | ||
71 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
72 | u32 pp_status; | ||
73 | |||
74 | if (on) { | ||
75 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | ||
76 | POWER_TARGET_ON); | ||
77 | do { | ||
78 | pp_status = I915_READ(PP_STATUS); | ||
79 | } while ((pp_status & PP_ON) == 0); | ||
80 | |||
81 | intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); | ||
82 | } else { | ||
83 | intel_lvds_set_backlight(dev, 0); | ||
84 | |||
85 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & | ||
86 | ~POWER_TARGET_ON); | ||
87 | do { | ||
88 | pp_status = I915_READ(PP_STATUS); | ||
89 | } while (pp_status & PP_ON); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | ||
94 | { | ||
95 | struct drm_device *dev = encoder->dev; | ||
96 | |||
97 | if (mode == DRM_MODE_DPMS_ON) | ||
98 | intel_lvds_set_power(dev, true); | ||
99 | else | ||
100 | intel_lvds_set_power(dev, false); | ||
101 | |||
102 | /* XXX: We never power down the LVDS pairs. */ | ||
103 | } | ||
104 | |||
105 | static void intel_lvds_save(struct drm_connector *connector) | ||
106 | { | ||
107 | struct drm_device *dev = connector->dev; | ||
108 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
109 | |||
110 | dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); | ||
111 | dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); | ||
112 | dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); | ||
113 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | ||
114 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | ||
115 | dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & | ||
116 | BACKLIGHT_DUTY_CYCLE_MASK); | ||
117 | |||
118 | /* | ||
119 | * If the light is off at server startup, just make it full brightness | ||
120 | */ | ||
121 | if (dev_priv->backlight_duty_cycle == 0) | ||
122 | dev_priv->backlight_duty_cycle = | ||
123 | intel_lvds_get_max_backlight(dev); | ||
124 | } | ||
125 | |||
126 | static void intel_lvds_restore(struct drm_connector *connector) | ||
127 | { | ||
128 | struct drm_device *dev = connector->dev; | ||
129 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
130 | |||
131 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); | ||
132 | I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); | ||
133 | I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); | ||
134 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | ||
135 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | ||
136 | if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) | ||
137 | intel_lvds_set_power(dev, true); | ||
138 | else | ||
139 | intel_lvds_set_power(dev, false); | ||
140 | } | ||
141 | |||
142 | static int intel_lvds_mode_valid(struct drm_connector *connector, | ||
143 | struct drm_display_mode *mode) | ||
144 | { | ||
145 | struct drm_device *dev = connector->dev; | ||
146 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
147 | struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; | ||
148 | |||
149 | if (fixed_mode) { | ||
150 | if (mode->hdisplay > fixed_mode->hdisplay) | ||
151 | return MODE_PANEL; | ||
152 | if (mode->vdisplay > fixed_mode->vdisplay) | ||
153 | return MODE_PANEL; | ||
154 | } | ||
155 | |||
156 | return MODE_OK; | ||
157 | } | ||
158 | |||
159 | static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | ||
160 | struct drm_display_mode *mode, | ||
161 | struct drm_display_mode *adjusted_mode) | ||
162 | { | ||
163 | struct drm_device *dev = encoder->dev; | ||
164 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
165 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
166 | struct drm_encoder *tmp_encoder; | ||
167 | |||
168 | /* Should never happen!! */ | ||
169 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | ||
170 | printk(KERN_ERR "Can't support LVDS on pipe A\n"); | ||
171 | return false; | ||
172 | } | ||
173 | |||
174 | /* Should never happen!! */ | ||
175 | list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { | ||
176 | if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { | ||
177 | printk(KERN_ERR "Can't enable LVDS and another " | ||
178 | "encoder on the same pipe\n"); | ||
179 | return false; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * If we have timings from the BIOS for the panel, put them in | ||
185 | * to the adjusted mode. The CRTC will be set up for this mode, | ||
186 | * with the panel scaling set up to source from the H/VDisplay | ||
187 | * of the original mode. | ||
188 | */ | ||
189 | if (dev_priv->panel_fixed_mode != NULL) { | ||
190 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; | ||
191 | adjusted_mode->hsync_start = | ||
192 | dev_priv->panel_fixed_mode->hsync_start; | ||
193 | adjusted_mode->hsync_end = | ||
194 | dev_priv->panel_fixed_mode->hsync_end; | ||
195 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; | ||
196 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; | ||
197 | adjusted_mode->vsync_start = | ||
198 | dev_priv->panel_fixed_mode->vsync_start; | ||
199 | adjusted_mode->vsync_end = | ||
200 | dev_priv->panel_fixed_mode->vsync_end; | ||
201 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; | ||
202 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; | ||
203 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * XXX: It would be nice to support lower refresh rates on the | ||
208 | * panels to reduce power consumption, and perhaps match the | ||
209 | * user's requested refresh rate. | ||
210 | */ | ||
211 | |||
212 | return true; | ||
213 | } | ||
214 | |||
215 | static void intel_lvds_prepare(struct drm_encoder *encoder) | ||
216 | { | ||
217 | struct drm_device *dev = encoder->dev; | ||
218 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
219 | |||
220 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | ||
221 | dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & | ||
222 | BACKLIGHT_DUTY_CYCLE_MASK); | ||
223 | |||
224 | intel_lvds_set_power(dev, false); | ||
225 | } | ||
226 | |||
227 | static void intel_lvds_commit( struct drm_encoder *encoder) | ||
228 | { | ||
229 | struct drm_device *dev = encoder->dev; | ||
230 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
231 | |||
232 | if (dev_priv->backlight_duty_cycle == 0) | ||
233 | dev_priv->backlight_duty_cycle = | ||
234 | intel_lvds_get_max_backlight(dev); | ||
235 | |||
236 | intel_lvds_set_power(dev, true); | ||
237 | } | ||
238 | |||
239 | static void intel_lvds_mode_set(struct drm_encoder *encoder, | ||
240 | struct drm_display_mode *mode, | ||
241 | struct drm_display_mode *adjusted_mode) | ||
242 | { | ||
243 | struct drm_device *dev = encoder->dev; | ||
244 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
245 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
246 | u32 pfit_control; | ||
247 | |||
248 | /* | ||
249 | * The LVDS pin pair will already have been turned on in the | ||
250 | * intel_crtc_mode_set since it has a large impact on the DPLL | ||
251 | * settings. | ||
252 | */ | ||
253 | |||
254 | /* | ||
255 | * Enable automatic panel scaling so that non-native modes fill the | ||
256 | * screen. Should be enabled before the pipe is enabled, according to | ||
257 | * register description and PRM. | ||
258 | */ | ||
259 | if (mode->hdisplay != adjusted_mode->hdisplay || | ||
260 | mode->vdisplay != adjusted_mode->vdisplay) | ||
261 | pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | | ||
262 | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | | ||
263 | HORIZ_INTERP_BILINEAR); | ||
264 | else | ||
265 | pfit_control = 0; | ||
266 | |||
267 | if (!IS_I965G(dev)) { | ||
268 | if (dev_priv->panel_wants_dither) | ||
269 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
270 | } | ||
271 | else | ||
272 | pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; | ||
273 | |||
274 | I915_WRITE(PFIT_CONTROL, pfit_control); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * Detect the LVDS connection. | ||
279 | * | ||
280 | * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have | ||
281 | * been set up if the LVDS was actually connected anyway. | ||
282 | */ | ||
283 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | ||
284 | { | ||
285 | return connector_status_connected; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. | ||
290 | */ | ||
291 | static int intel_lvds_get_modes(struct drm_connector *connector) | ||
292 | { | ||
293 | struct drm_device *dev = connector->dev; | ||
294 | struct intel_output *intel_output = to_intel_output(connector); | ||
295 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
296 | int ret = 0; | ||
297 | |||
298 | ret = intel_ddc_get_modes(intel_output); | ||
299 | |||
300 | if (ret) | ||
301 | return ret; | ||
302 | |||
303 | /* Didn't get an EDID, so | ||
304 | * Set wide sync ranges so we get all modes | ||
305 | * handed to valid_mode for checking | ||
306 | */ | ||
307 | connector->display_info.min_vfreq = 0; | ||
308 | connector->display_info.max_vfreq = 200; | ||
309 | connector->display_info.min_hfreq = 0; | ||
310 | connector->display_info.max_hfreq = 200; | ||
311 | |||
312 | if (dev_priv->panel_fixed_mode != NULL) { | ||
313 | struct drm_display_mode *mode; | ||
314 | |||
315 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | ||
316 | drm_mode_probed_add(connector, mode); | ||
317 | |||
318 | return 1; | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * intel_lvds_destroy - unregister and free LVDS structures | ||
326 | * @connector: connector to free | ||
327 | * | ||
328 | * Unregister the DDC bus for this connector then free the driver private | ||
329 | * structure. | ||
330 | */ | ||
331 | static void intel_lvds_destroy(struct drm_connector *connector) | ||
332 | { | ||
333 | struct intel_output *intel_output = to_intel_output(connector); | ||
334 | |||
335 | if (intel_output->ddc_bus) | ||
336 | intel_i2c_destroy(intel_output->ddc_bus); | ||
337 | drm_sysfs_connector_remove(connector); | ||
338 | drm_connector_cleanup(connector); | ||
339 | kfree(connector); | ||
340 | } | ||
341 | |||
342 | static int intel_lvds_set_property(struct drm_connector *connector, | ||
343 | struct drm_property *property, | ||
344 | uint64_t value) | ||
345 | { | ||
346 | struct drm_device *dev = connector->dev; | ||
347 | |||
348 | if (property == dev->mode_config.dpms_property && connector->encoder) | ||
349 | intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf)); | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { | ||
355 | .dpms = intel_lvds_dpms, | ||
356 | .mode_fixup = intel_lvds_mode_fixup, | ||
357 | .prepare = intel_lvds_prepare, | ||
358 | .mode_set = intel_lvds_mode_set, | ||
359 | .commit = intel_lvds_commit, | ||
360 | }; | ||
361 | |||
362 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { | ||
363 | .get_modes = intel_lvds_get_modes, | ||
364 | .mode_valid = intel_lvds_mode_valid, | ||
365 | .best_encoder = intel_best_encoder, | ||
366 | }; | ||
367 | |||
368 | static const struct drm_connector_funcs intel_lvds_connector_funcs = { | ||
369 | .save = intel_lvds_save, | ||
370 | .restore = intel_lvds_restore, | ||
371 | .detect = intel_lvds_detect, | ||
372 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
373 | .set_property = intel_lvds_set_property, | ||
374 | .destroy = intel_lvds_destroy, | ||
375 | }; | ||
376 | |||
377 | |||
378 | static void intel_lvds_enc_destroy(struct drm_encoder *encoder) | ||
379 | { | ||
380 | drm_encoder_cleanup(encoder); | ||
381 | } | ||
382 | |||
383 | static const struct drm_encoder_funcs intel_lvds_enc_funcs = { | ||
384 | .destroy = intel_lvds_enc_destroy, | ||
385 | }; | ||
386 | |||
387 | |||
388 | |||
389 | /** | ||
390 | * intel_lvds_init - setup LVDS connectors on this device | ||
391 | * @dev: drm device | ||
392 | * | ||
393 | * Create the connector, register the LVDS DDC bus, and try to figure out what | ||
394 | * modes we can display on the LVDS panel (if present). | ||
395 | */ | ||
396 | void intel_lvds_init(struct drm_device *dev) | ||
397 | { | ||
398 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
399 | struct intel_output *intel_output; | ||
400 | struct drm_connector *connector; | ||
401 | struct drm_encoder *encoder; | ||
402 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | ||
403 | struct drm_crtc *crtc; | ||
404 | u32 lvds; | ||
405 | int pipe; | ||
406 | |||
407 | /* Blacklist machines that we know falsely report LVDS. */ | ||
408 | /* FIXME: add a check for the Aopen Mini PC */ | ||
409 | |||
410 | /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */ | ||
411 | if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") || | ||
412 | dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) { | ||
413 | DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n"); | ||
414 | return; | ||
415 | } | ||
416 | |||
417 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | ||
418 | if (!intel_output) { | ||
419 | return; | ||
420 | } | ||
421 | |||
422 | connector = &intel_output->base; | ||
423 | encoder = &intel_output->enc; | ||
424 | drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, | ||
425 | DRM_MODE_CONNECTOR_LVDS); | ||
426 | |||
427 | drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, | ||
428 | DRM_MODE_ENCODER_LVDS); | ||
429 | |||
430 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | ||
431 | intel_output->type = INTEL_OUTPUT_LVDS; | ||
432 | |||
433 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | ||
434 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | ||
435 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
436 | connector->interlace_allowed = false; | ||
437 | connector->doublescan_allowed = false; | ||
438 | |||
439 | |||
440 | /* | ||
441 | * LVDS discovery: | ||
442 | * 1) check for EDID on DDC | ||
443 | * 2) check for VBT data | ||
444 | * 3) check to see if LVDS is already on | ||
445 | * if none of the above, no panel | ||
446 | * 4) make sure lid is open | ||
447 | * if closed, act like it's not there for now | ||
448 | */ | ||
449 | |||
450 | /* Set up the DDC bus. */ | ||
451 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); | ||
452 | if (!intel_output->ddc_bus) { | ||
453 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | ||
454 | "failed.\n"); | ||
455 | goto failed; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Attempt to get the fixed panel mode from DDC. Assume that the | ||
460 | * preferred mode is the right one. | ||
461 | */ | ||
462 | intel_ddc_get_modes(intel_output); | ||
463 | |||
464 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
465 | mutex_lock(&dev->mode_config.mutex); | ||
466 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | ||
467 | dev_priv->panel_fixed_mode = | ||
468 | drm_mode_duplicate(dev, scan); | ||
469 | mutex_unlock(&dev->mode_config.mutex); | ||
470 | goto out; | ||
471 | } | ||
472 | mutex_unlock(&dev->mode_config.mutex); | ||
473 | } | ||
474 | |||
475 | /* Failed to get EDID, what about VBT? */ | ||
476 | if (dev_priv->vbt_mode) { | ||
477 | mutex_lock(&dev->mode_config.mutex); | ||
478 | dev_priv->panel_fixed_mode = | ||
479 | drm_mode_duplicate(dev, dev_priv->vbt_mode); | ||
480 | mutex_unlock(&dev->mode_config.mutex); | ||
481 | if (dev_priv->panel_fixed_mode) { | ||
482 | dev_priv->panel_fixed_mode->type |= | ||
483 | DRM_MODE_TYPE_PREFERRED; | ||
484 | drm_mode_probed_add(connector, | ||
485 | dev_priv->panel_fixed_mode); | ||
486 | goto out; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * If we didn't get EDID, try checking if the panel is already turned | ||
492 | * on. If so, assume that whatever is currently programmed is the | ||
493 | * correct mode. | ||
494 | */ | ||
495 | lvds = I915_READ(LVDS); | ||
496 | pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; | ||
497 | crtc = intel_get_crtc_from_pipe(dev, pipe); | ||
498 | |||
499 | if (crtc && (lvds & LVDS_PORT_EN)) { | ||
500 | dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); | ||
501 | if (dev_priv->panel_fixed_mode) { | ||
502 | dev_priv->panel_fixed_mode->type |= | ||
503 | DRM_MODE_TYPE_PREFERRED; | ||
504 | goto out; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | /* If we still don't have a mode after all that, give up. */ | ||
509 | if (!dev_priv->panel_fixed_mode) | ||
510 | goto failed; | ||
511 | |||
512 | out: | ||
513 | drm_sysfs_connector_add(connector); | ||
514 | return; | ||
515 | |||
516 | failed: | ||
517 | DRM_DEBUG("No LVDS modes found, disabling.\n"); | ||
518 | if (intel_output->ddc_bus) | ||
519 | intel_i2c_destroy(intel_output->ddc_bus); | ||
520 | drm_connector_cleanup(connector); | ||
521 | kfree(connector); | ||
522 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c new file mode 100644 index 000000000000..e42019e5d661 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright (c) 2007 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include <linux/i2c.h> | ||
27 | #include <linux/fb.h> | ||
28 | #include "drmP.h" | ||
29 | #include "intel_drv.h" | ||
30 | |||
31 | /** | ||
32 | * intel_ddc_probe | ||
33 | * | ||
34 | */ | ||
35 | bool intel_ddc_probe(struct intel_output *intel_output) | ||
36 | { | ||
37 | u8 out_buf[] = { 0x0, 0x0}; | ||
38 | u8 buf[2]; | ||
39 | int ret; | ||
40 | struct i2c_msg msgs[] = { | ||
41 | { | ||
42 | .addr = 0x50, | ||
43 | .flags = 0, | ||
44 | .len = 1, | ||
45 | .buf = out_buf, | ||
46 | }, | ||
47 | { | ||
48 | .addr = 0x50, | ||
49 | .flags = I2C_M_RD, | ||
50 | .len = 1, | ||
51 | .buf = buf, | ||
52 | } | ||
53 | }; | ||
54 | |||
55 | ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); | ||
56 | if (ret == 2) | ||
57 | return true; | ||
58 | |||
59 | return false; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * intel_ddc_get_modes - get modelist from monitor | ||
64 | * @connector: DRM connector device to use | ||
65 | * | ||
66 | * Fetch the EDID information from @connector using the DDC bus. | ||
67 | */ | ||
68 | int intel_ddc_get_modes(struct intel_output *intel_output) | ||
69 | { | ||
70 | struct edid *edid; | ||
71 | int ret = 0; | ||
72 | |||
73 | edid = drm_get_edid(&intel_output->base, | ||
74 | &intel_output->ddc_bus->adapter); | ||
75 | if (edid) { | ||
76 | drm_mode_connector_update_edid_property(&intel_output->base, | ||
77 | edid); | ||
78 | ret = drm_add_edid_modes(&intel_output->base, edid); | ||
79 | kfree(edid); | ||
80 | } | ||
81 | |||
82 | return ret; | ||
83 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c new file mode 100644 index 000000000000..a30508b639ba --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -0,0 +1,1802 @@ | |||
1 | /* | ||
2 | * Copyright 2006 Dave Airlie <airlied@linux.ie> | ||
3 | * Copyright © 2006-2007 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Authors: | ||
26 | * Eric Anholt <eric@anholt.net> | ||
27 | */ | ||
28 | #include <linux/i2c.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include "drmP.h" | ||
31 | #include "drm.h" | ||
32 | #include "drm_crtc.h" | ||
33 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | ||
35 | #include "i915_drv.h" | ||
36 | #include "intel_sdvo_regs.h" | ||
37 | |||
38 | #undef SDVO_DEBUG | ||
39 | |||
40 | struct intel_sdvo_priv { | ||
41 | struct intel_i2c_chan *i2c_bus; | ||
42 | int slaveaddr; | ||
43 | |||
44 | /* Register for the SDVO device: SDVOB or SDVOC */ | ||
45 | int output_device; | ||
46 | |||
47 | /* Active outputs controlled by this SDVO output */ | ||
48 | uint16_t controlled_output; | ||
49 | |||
50 | /* | ||
51 | * Capabilities of the SDVO device returned by | ||
52 | * i830_sdvo_get_capabilities() | ||
53 | */ | ||
54 | struct intel_sdvo_caps caps; | ||
55 | |||
56 | /* Pixel clock limitations reported by the SDVO device, in kHz */ | ||
57 | int pixel_clock_min, pixel_clock_max; | ||
58 | |||
59 | /** | ||
60 | * This is set if we're going to treat the device as TV-out. | ||
61 | * | ||
62 | * While we have these nice friendly flags for output types that ought | ||
63 | * to decide this for us, the S-Video output on our HDMI+S-Video card | ||
64 | * shows up as RGB1 (VGA). | ||
65 | */ | ||
66 | bool is_tv; | ||
67 | |||
68 | /** | ||
69 | * This is set if we treat the device as HDMI, instead of DVI. | ||
70 | */ | ||
71 | bool is_hdmi; | ||
72 | |||
73 | /** | ||
74 | * Returned SDTV resolutions allowed for the current format, if the | ||
75 | * device reported it. | ||
76 | */ | ||
77 | struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; | ||
78 | |||
79 | /** | ||
80 | * Current selected TV format. | ||
81 | * | ||
82 | * This is stored in the same structure that's passed to the device, for | ||
83 | * convenience. | ||
84 | */ | ||
85 | struct intel_sdvo_tv_format tv_format; | ||
86 | |||
87 | /* | ||
88 | * supported encoding mode, used to determine whether HDMI is | ||
89 | * supported | ||
90 | */ | ||
91 | struct intel_sdvo_encode encode; | ||
92 | |||
93 | /* DDC bus used by this SDVO output */ | ||
94 | uint8_t ddc_bus; | ||
95 | |||
96 | int save_sdvo_mult; | ||
97 | u16 save_active_outputs; | ||
98 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; | ||
99 | struct intel_sdvo_dtd save_output_dtd[16]; | ||
100 | u32 save_SDVOX; | ||
101 | }; | ||
102 | |||
103 | /** | ||
104 | * Writes the SDVOB or SDVOC with the given value, but always writes both | ||
105 | * SDVOB and SDVOC to work around apparent hardware issues (according to | ||
106 | * comments in the BIOS). | ||
107 | */ | ||
108 | static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | ||
109 | { | ||
110 | struct drm_device *dev = intel_output->base.dev; | ||
111 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
112 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
113 | u32 bval = val, cval = val; | ||
114 | int i; | ||
115 | |||
116 | if (sdvo_priv->output_device == SDVOB) { | ||
117 | cval = I915_READ(SDVOC); | ||
118 | } else { | ||
119 | bval = I915_READ(SDVOB); | ||
120 | } | ||
121 | /* | ||
122 | * Write the registers twice for luck. Sometimes, | ||
123 | * writing them only once doesn't appear to 'stick'. | ||
124 | * The BIOS does this too. Yay, magic | ||
125 | */ | ||
126 | for (i = 0; i < 2; i++) | ||
127 | { | ||
128 | I915_WRITE(SDVOB, bval); | ||
129 | I915_READ(SDVOB); | ||
130 | I915_WRITE(SDVOC, cval); | ||
131 | I915_READ(SDVOC); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | ||
136 | u8 *ch) | ||
137 | { | ||
138 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
139 | u8 out_buf[2]; | ||
140 | u8 buf[2]; | ||
141 | int ret; | ||
142 | |||
143 | struct i2c_msg msgs[] = { | ||
144 | { | ||
145 | .addr = sdvo_priv->i2c_bus->slave_addr, | ||
146 | .flags = 0, | ||
147 | .len = 1, | ||
148 | .buf = out_buf, | ||
149 | }, | ||
150 | { | ||
151 | .addr = sdvo_priv->i2c_bus->slave_addr, | ||
152 | .flags = I2C_M_RD, | ||
153 | .len = 1, | ||
154 | .buf = buf, | ||
155 | } | ||
156 | }; | ||
157 | |||
158 | out_buf[0] = addr; | ||
159 | out_buf[1] = 0; | ||
160 | |||
161 | if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) | ||
162 | { | ||
163 | *ch = buf[0]; | ||
164 | return true; | ||
165 | } | ||
166 | |||
167 | DRM_DEBUG("i2c transfer returned %d\n", ret); | ||
168 | return false; | ||
169 | } | ||
170 | |||
171 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | ||
172 | u8 ch) | ||
173 | { | ||
174 | u8 out_buf[2]; | ||
175 | struct i2c_msg msgs[] = { | ||
176 | { | ||
177 | .addr = intel_output->i2c_bus->slave_addr, | ||
178 | .flags = 0, | ||
179 | .len = 2, | ||
180 | .buf = out_buf, | ||
181 | } | ||
182 | }; | ||
183 | |||
184 | out_buf[0] = addr; | ||
185 | out_buf[1] = ch; | ||
186 | |||
187 | if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) | ||
188 | { | ||
189 | return true; | ||
190 | } | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} | ||
195 | /** Mapping of command numbers to names, for debug output */ | ||
196 | const static struct _sdvo_cmd_name { | ||
197 | u8 cmd; | ||
198 | char *name; | ||
199 | } sdvo_cmd_names[] = { | ||
200 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), | ||
201 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), | ||
202 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), | ||
203 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), | ||
204 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), | ||
205 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), | ||
206 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), | ||
207 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), | ||
208 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), | ||
209 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), | ||
210 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), | ||
211 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), | ||
212 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), | ||
213 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), | ||
214 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), | ||
215 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), | ||
216 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), | ||
217 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), | ||
218 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), | ||
219 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), | ||
220 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), | ||
221 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), | ||
222 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), | ||
223 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), | ||
224 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), | ||
225 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), | ||
226 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), | ||
227 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), | ||
228 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), | ||
229 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), | ||
230 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), | ||
231 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), | ||
232 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), | ||
233 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), | ||
234 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), | ||
235 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), | ||
236 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), | ||
237 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), | ||
238 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), | ||
239 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), | ||
240 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | ||
241 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | ||
242 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | ||
243 | /* HDMI op code */ | ||
244 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | ||
245 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | ||
246 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), | ||
247 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), | ||
248 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), | ||
249 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), | ||
250 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), | ||
251 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), | ||
252 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), | ||
253 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), | ||
254 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), | ||
255 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), | ||
256 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), | ||
257 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), | ||
258 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), | ||
259 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), | ||
260 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), | ||
261 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), | ||
262 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), | ||
263 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | ||
264 | }; | ||
265 | |||
266 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | ||
267 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) | ||
268 | |||
269 | #ifdef SDVO_DEBUG | ||
270 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | ||
271 | void *args, int args_len) | ||
272 | { | ||
273 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
274 | int i; | ||
275 | |||
276 | DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); | ||
277 | for (i = 0; i < args_len; i++) | ||
278 | printk("%02X ", ((u8 *)args)[i]); | ||
279 | for (; i < 8; i++) | ||
280 | printk(" "); | ||
281 | for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { | ||
282 | if (cmd == sdvo_cmd_names[i].cmd) { | ||
283 | printk("(%s)", sdvo_cmd_names[i].name); | ||
284 | break; | ||
285 | } | ||
286 | } | ||
287 | if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) | ||
288 | printk("(%02X)",cmd); | ||
289 | printk("\n"); | ||
290 | } | ||
291 | #else | ||
292 | #define intel_sdvo_debug_write(o, c, a, l) | ||
293 | #endif | ||
294 | |||
295 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | ||
296 | void *args, int args_len) | ||
297 | { | ||
298 | int i; | ||
299 | |||
300 | intel_sdvo_debug_write(intel_output, cmd, args, args_len); | ||
301 | |||
302 | for (i = 0; i < args_len; i++) { | ||
303 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, | ||
304 | ((u8*)args)[i]); | ||
305 | } | ||
306 | |||
307 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); | ||
308 | } | ||
309 | |||
310 | #ifdef SDVO_DEBUG | ||
311 | static const char *cmd_status_names[] = { | ||
312 | "Power on", | ||
313 | "Success", | ||
314 | "Not supported", | ||
315 | "Invalid arg", | ||
316 | "Pending", | ||
317 | "Target not specified", | ||
318 | "Scaling not supported" | ||
319 | }; | ||
320 | |||
321 | static void intel_sdvo_debug_response(struct intel_output *intel_output, | ||
322 | void *response, int response_len, | ||
323 | u8 status) | ||
324 | { | ||
325 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
326 | |||
327 | DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); | ||
328 | for (i = 0; i < response_len; i++) | ||
329 | printk("%02X ", ((u8 *)response)[i]); | ||
330 | for (; i < 8; i++) | ||
331 | printk(" "); | ||
332 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) | ||
333 | printk("(%s)", cmd_status_names[status]); | ||
334 | else | ||
335 | printk("(??? %d)", status); | ||
336 | printk("\n"); | ||
337 | } | ||
338 | #else | ||
339 | #define intel_sdvo_debug_response(o, r, l, s) | ||
340 | #endif | ||
341 | |||
342 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, | ||
343 | void *response, int response_len) | ||
344 | { | ||
345 | int i; | ||
346 | u8 status; | ||
347 | u8 retry = 50; | ||
348 | |||
349 | while (retry--) { | ||
350 | /* Read the command response */ | ||
351 | for (i = 0; i < response_len; i++) { | ||
352 | intel_sdvo_read_byte(intel_output, | ||
353 | SDVO_I2C_RETURN_0 + i, | ||
354 | &((u8 *)response)[i]); | ||
355 | } | ||
356 | |||
357 | /* read the return status */ | ||
358 | intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, | ||
359 | &status); | ||
360 | |||
361 | intel_sdvo_debug_response(intel_output, response, response_len, | ||
362 | status); | ||
363 | if (status != SDVO_CMD_STATUS_PENDING) | ||
364 | return status; | ||
365 | |||
366 | mdelay(50); | ||
367 | } | ||
368 | |||
369 | return status; | ||
370 | } | ||
371 | |||
372 | static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | ||
373 | { | ||
374 | if (mode->clock >= 100000) | ||
375 | return 1; | ||
376 | else if (mode->clock >= 50000) | ||
377 | return 2; | ||
378 | else | ||
379 | return 4; | ||
380 | } | ||
381 | |||
382 | /** | ||
383 | * Don't check status code from this as it switches the bus back to the | ||
384 | * SDVO chips which defeats the purpose of doing a bus switch in the first | ||
385 | * place. | ||
386 | */ | ||
387 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | ||
388 | u8 target) | ||
389 | { | ||
390 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); | ||
391 | } | ||
392 | |||
393 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | ||
394 | { | ||
395 | struct intel_sdvo_set_target_input_args targets = {0}; | ||
396 | u8 status; | ||
397 | |||
398 | if (target_0 && target_1) | ||
399 | return SDVO_CMD_STATUS_NOTSUPP; | ||
400 | |||
401 | if (target_1) | ||
402 | targets.target_1 = 1; | ||
403 | |||
404 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, | ||
405 | sizeof(targets)); | ||
406 | |||
407 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
408 | |||
409 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
410 | } | ||
411 | |||
412 | /** | ||
413 | * Return whether each input is trained. | ||
414 | * | ||
415 | * This function is making an assumption about the layout of the response, | ||
416 | * which should be checked against the docs. | ||
417 | */ | ||
418 | static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) | ||
419 | { | ||
420 | struct intel_sdvo_get_trained_inputs_response response; | ||
421 | u8 status; | ||
422 | |||
423 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); | ||
424 | status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); | ||
425 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
426 | return false; | ||
427 | |||
428 | *input_1 = response.input0_trained; | ||
429 | *input_2 = response.input1_trained; | ||
430 | return true; | ||
431 | } | ||
432 | |||
433 | static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, | ||
434 | u16 *outputs) | ||
435 | { | ||
436 | u8 status; | ||
437 | |||
438 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); | ||
439 | status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); | ||
440 | |||
441 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
442 | } | ||
443 | |||
444 | static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, | ||
445 | u16 outputs) | ||
446 | { | ||
447 | u8 status; | ||
448 | |||
449 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, | ||
450 | sizeof(outputs)); | ||
451 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
452 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
453 | } | ||
454 | |||
455 | static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, | ||
456 | int mode) | ||
457 | { | ||
458 | u8 status, state = SDVO_ENCODER_STATE_ON; | ||
459 | |||
460 | switch (mode) { | ||
461 | case DRM_MODE_DPMS_ON: | ||
462 | state = SDVO_ENCODER_STATE_ON; | ||
463 | break; | ||
464 | case DRM_MODE_DPMS_STANDBY: | ||
465 | state = SDVO_ENCODER_STATE_STANDBY; | ||
466 | break; | ||
467 | case DRM_MODE_DPMS_SUSPEND: | ||
468 | state = SDVO_ENCODER_STATE_SUSPEND; | ||
469 | break; | ||
470 | case DRM_MODE_DPMS_OFF: | ||
471 | state = SDVO_ENCODER_STATE_OFF; | ||
472 | break; | ||
473 | } | ||
474 | |||
475 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, | ||
476 | sizeof(state)); | ||
477 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
478 | |||
479 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
480 | } | ||
481 | |||
482 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, | ||
483 | int *clock_min, | ||
484 | int *clock_max) | ||
485 | { | ||
486 | struct intel_sdvo_pixel_clock_range clocks; | ||
487 | u8 status; | ||
488 | |||
489 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | ||
490 | NULL, 0); | ||
491 | |||
492 | status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); | ||
493 | |||
494 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
495 | return false; | ||
496 | |||
497 | /* Convert the values from units of 10 kHz to kHz. */ | ||
498 | *clock_min = clocks.min * 10; | ||
499 | *clock_max = clocks.max * 10; | ||
500 | |||
501 | return true; | ||
502 | } | ||
503 | |||
504 | static bool intel_sdvo_set_target_output(struct intel_output *intel_output, | ||
505 | u16 outputs) | ||
506 | { | ||
507 | u8 status; | ||
508 | |||
509 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, | ||
510 | sizeof(outputs)); | ||
511 | |||
512 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
513 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
514 | } | ||
515 | |||
516 | static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | ||
517 | struct intel_sdvo_dtd *dtd) | ||
518 | { | ||
519 | u8 status; | ||
520 | |||
521 | intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); | ||
522 | status = intel_sdvo_read_response(intel_output, &dtd->part1, | ||
523 | sizeof(dtd->part1)); | ||
524 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
525 | return false; | ||
526 | |||
527 | intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); | ||
528 | status = intel_sdvo_read_response(intel_output, &dtd->part2, | ||
529 | sizeof(dtd->part2)); | ||
530 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
531 | return false; | ||
532 | |||
533 | return true; | ||
534 | } | ||
535 | |||
536 | static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, | ||
537 | struct intel_sdvo_dtd *dtd) | ||
538 | { | ||
539 | return intel_sdvo_get_timing(intel_output, | ||
540 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); | ||
541 | } | ||
542 | |||
543 | static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, | ||
544 | struct intel_sdvo_dtd *dtd) | ||
545 | { | ||
546 | return intel_sdvo_get_timing(intel_output, | ||
547 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); | ||
548 | } | ||
549 | |||
550 | static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, | ||
551 | struct intel_sdvo_dtd *dtd) | ||
552 | { | ||
553 | u8 status; | ||
554 | |||
555 | intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); | ||
556 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
557 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
558 | return false; | ||
559 | |||
560 | intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | ||
561 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
562 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
563 | return false; | ||
564 | |||
565 | return true; | ||
566 | } | ||
567 | |||
568 | static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, | ||
569 | struct intel_sdvo_dtd *dtd) | ||
570 | { | ||
571 | return intel_sdvo_set_timing(intel_output, | ||
572 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); | ||
573 | } | ||
574 | |||
575 | static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, | ||
576 | struct intel_sdvo_dtd *dtd) | ||
577 | { | ||
578 | return intel_sdvo_set_timing(intel_output, | ||
579 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | ||
580 | } | ||
581 | |||
582 | static bool | ||
583 | intel_sdvo_create_preferred_input_timing(struct intel_output *output, | ||
584 | uint16_t clock, | ||
585 | uint16_t width, | ||
586 | uint16_t height) | ||
587 | { | ||
588 | struct intel_sdvo_preferred_input_timing_args args; | ||
589 | uint8_t status; | ||
590 | |||
591 | args.clock = clock; | ||
592 | args.width = width; | ||
593 | args.height = height; | ||
594 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | ||
595 | &args, sizeof(args)); | ||
596 | status = intel_sdvo_read_response(output, NULL, 0); | ||
597 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
598 | return false; | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | ||
604 | struct intel_sdvo_dtd *dtd) | ||
605 | { | ||
606 | bool status; | ||
607 | |||
608 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | ||
609 | NULL, 0); | ||
610 | |||
611 | status = intel_sdvo_read_response(output, &dtd->part1, | ||
612 | sizeof(dtd->part1)); | ||
613 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
614 | return false; | ||
615 | |||
616 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | ||
617 | NULL, 0); | ||
618 | |||
619 | status = intel_sdvo_read_response(output, &dtd->part2, | ||
620 | sizeof(dtd->part2)); | ||
621 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
622 | return false; | ||
623 | |||
624 | return false; | ||
625 | } | ||
626 | |||
627 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | ||
628 | { | ||
629 | u8 response, status; | ||
630 | |||
631 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); | ||
632 | status = intel_sdvo_read_response(intel_output, &response, 1); | ||
633 | |||
634 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
635 | DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); | ||
636 | return SDVO_CLOCK_RATE_MULT_1X; | ||
637 | } else { | ||
638 | DRM_DEBUG("Current clock rate multiplier: %d\n", response); | ||
639 | } | ||
640 | |||
641 | return response; | ||
642 | } | ||
643 | |||
644 | static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) | ||
645 | { | ||
646 | u8 status; | ||
647 | |||
648 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); | ||
649 | status = intel_sdvo_read_response(intel_output, NULL, 0); | ||
650 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
651 | return false; | ||
652 | |||
653 | return true; | ||
654 | } | ||
655 | |||
656 | static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | ||
657 | struct drm_display_mode *mode) | ||
658 | { | ||
659 | uint16_t width, height; | ||
660 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; | ||
661 | uint16_t h_sync_offset, v_sync_offset; | ||
662 | |||
663 | width = mode->crtc_hdisplay; | ||
664 | height = mode->crtc_vdisplay; | ||
665 | |||
666 | /* do some mode translations */ | ||
667 | h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; | ||
668 | h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; | ||
669 | |||
670 | v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; | ||
671 | v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
672 | |||
673 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; | ||
674 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; | ||
675 | |||
676 | dtd->part1.clock = mode->clock / 10; | ||
677 | dtd->part1.h_active = width & 0xff; | ||
678 | dtd->part1.h_blank = h_blank_len & 0xff; | ||
679 | dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | | ||
680 | ((h_blank_len >> 8) & 0xf); | ||
681 | dtd->part1.v_active = height & 0xff; | ||
682 | dtd->part1.v_blank = v_blank_len & 0xff; | ||
683 | dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | | ||
684 | ((v_blank_len >> 8) & 0xf); | ||
685 | |||
686 | dtd->part2.h_sync_off = h_sync_offset; | ||
687 | dtd->part2.h_sync_width = h_sync_len & 0xff; | ||
688 | dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | | ||
689 | (v_sync_len & 0xf); | ||
690 | dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | | ||
691 | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | | ||
692 | ((v_sync_len & 0x30) >> 4); | ||
693 | |||
694 | dtd->part2.dtd_flags = 0x18; | ||
695 | if (mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
696 | dtd->part2.dtd_flags |= 0x2; | ||
697 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
698 | dtd->part2.dtd_flags |= 0x4; | ||
699 | |||
700 | dtd->part2.sdvo_flags = 0; | ||
701 | dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; | ||
702 | dtd->part2.reserved = 0; | ||
703 | } | ||
704 | |||
705 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | ||
706 | struct intel_sdvo_dtd *dtd) | ||
707 | { | ||
708 | uint16_t width, height; | ||
709 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; | ||
710 | uint16_t h_sync_offset, v_sync_offset; | ||
711 | |||
712 | width = mode->crtc_hdisplay; | ||
713 | height = mode->crtc_vdisplay; | ||
714 | |||
715 | /* do some mode translations */ | ||
716 | h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; | ||
717 | h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; | ||
718 | |||
719 | v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; | ||
720 | v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
721 | |||
722 | h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; | ||
723 | v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; | ||
724 | |||
725 | mode->hdisplay = dtd->part1.h_active; | ||
726 | mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; | ||
727 | mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; | ||
728 | mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2; | ||
729 | mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; | ||
730 | mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; | ||
731 | mode->htotal = mode->hdisplay + dtd->part1.h_blank; | ||
732 | mode->htotal += (dtd->part1.h_high & 0xf) << 8; | ||
733 | |||
734 | mode->vdisplay = dtd->part1.v_active; | ||
735 | mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; | ||
736 | mode->vsync_start = mode->vdisplay; | ||
737 | mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; | ||
738 | mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2; | ||
739 | mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; | ||
740 | mode->vsync_end = mode->vsync_start + | ||
741 | (dtd->part2.v_sync_off_width & 0xf); | ||
742 | mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; | ||
743 | mode->vtotal = mode->vdisplay + dtd->part1.v_blank; | ||
744 | mode->vtotal += (dtd->part1.v_high & 0xf) << 8; | ||
745 | |||
746 | mode->clock = dtd->part1.clock * 10; | ||
747 | |||
748 | mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); | ||
749 | if (dtd->part2.dtd_flags & 0x2) | ||
750 | mode->flags |= DRM_MODE_FLAG_PHSYNC; | ||
751 | if (dtd->part2.dtd_flags & 0x4) | ||
752 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | ||
753 | } | ||
754 | |||
755 | static bool intel_sdvo_get_supp_encode(struct intel_output *output, | ||
756 | struct intel_sdvo_encode *encode) | ||
757 | { | ||
758 | uint8_t status; | ||
759 | |||
760 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | ||
761 | status = intel_sdvo_read_response(output, encode, sizeof(*encode)); | ||
762 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | ||
763 | memset(encode, 0, sizeof(*encode)); | ||
764 | return false; | ||
765 | } | ||
766 | |||
767 | return true; | ||
768 | } | ||
769 | |||
770 | static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) | ||
771 | { | ||
772 | uint8_t status; | ||
773 | |||
774 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); | ||
775 | status = intel_sdvo_read_response(output, NULL, 0); | ||
776 | |||
777 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
778 | } | ||
779 | |||
780 | static bool intel_sdvo_set_colorimetry(struct intel_output *output, | ||
781 | uint8_t mode) | ||
782 | { | ||
783 | uint8_t status; | ||
784 | |||
785 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | ||
786 | status = intel_sdvo_read_response(output, NULL, 0); | ||
787 | |||
788 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
789 | } | ||
790 | |||
791 | #if 0 | ||
792 | static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | ||
793 | { | ||
794 | int i, j; | ||
795 | uint8_t set_buf_index[2]; | ||
796 | uint8_t av_split; | ||
797 | uint8_t buf_size; | ||
798 | uint8_t buf[48]; | ||
799 | uint8_t *pos; | ||
800 | |||
801 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | ||
802 | intel_sdvo_read_response(output, &av_split, 1); | ||
803 | |||
804 | for (i = 0; i <= av_split; i++) { | ||
805 | set_buf_index[0] = i; set_buf_index[1] = 0; | ||
806 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, | ||
807 | set_buf_index, 2); | ||
808 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); | ||
809 | intel_sdvo_read_response(output, &buf_size, 1); | ||
810 | |||
811 | pos = buf; | ||
812 | for (j = 0; j <= buf_size; j += 8) { | ||
813 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, | ||
814 | NULL, 0); | ||
815 | intel_sdvo_read_response(output, pos, 8); | ||
816 | pos += 8; | ||
817 | } | ||
818 | } | ||
819 | } | ||
820 | #endif | ||
821 | |||
822 | static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, | ||
823 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
824 | { | ||
825 | uint8_t set_buf_index[2]; | ||
826 | |||
827 | set_buf_index[0] = index; | ||
828 | set_buf_index[1] = 0; | ||
829 | |||
830 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); | ||
831 | |||
832 | for (; size > 0; size -= 8) { | ||
833 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); | ||
834 | data += 8; | ||
835 | } | ||
836 | |||
837 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | ||
838 | } | ||
839 | |||
840 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | ||
841 | { | ||
842 | uint8_t csum = 0; | ||
843 | int i; | ||
844 | |||
845 | for (i = 0; i < size; i++) | ||
846 | csum += data[i]; | ||
847 | |||
848 | return 0x100 - csum; | ||
849 | } | ||
850 | |||
851 | #define DIP_TYPE_AVI 0x82 | ||
852 | #define DIP_VERSION_AVI 0x2 | ||
853 | #define DIP_LEN_AVI 13 | ||
854 | |||
855 | struct dip_infoframe { | ||
856 | uint8_t type; | ||
857 | uint8_t version; | ||
858 | uint8_t len; | ||
859 | uint8_t checksum; | ||
860 | union { | ||
861 | struct { | ||
862 | /* Packet Byte #1 */ | ||
863 | uint8_t S:2; | ||
864 | uint8_t B:2; | ||
865 | uint8_t A:1; | ||
866 | uint8_t Y:2; | ||
867 | uint8_t rsvd1:1; | ||
868 | /* Packet Byte #2 */ | ||
869 | uint8_t R:4; | ||
870 | uint8_t M:2; | ||
871 | uint8_t C:2; | ||
872 | /* Packet Byte #3 */ | ||
873 | uint8_t SC:2; | ||
874 | uint8_t Q:2; | ||
875 | uint8_t EC:3; | ||
876 | uint8_t ITC:1; | ||
877 | /* Packet Byte #4 */ | ||
878 | uint8_t VIC:7; | ||
879 | uint8_t rsvd2:1; | ||
880 | /* Packet Byte #5 */ | ||
881 | uint8_t PR:4; | ||
882 | uint8_t rsvd3:4; | ||
883 | /* Packet Byte #6~13 */ | ||
884 | uint16_t top_bar_end; | ||
885 | uint16_t bottom_bar_start; | ||
886 | uint16_t left_bar_end; | ||
887 | uint16_t right_bar_start; | ||
888 | } avi; | ||
889 | struct { | ||
890 | /* Packet Byte #1 */ | ||
891 | uint8_t channel_count:3; | ||
892 | uint8_t rsvd1:1; | ||
893 | uint8_t coding_type:4; | ||
894 | /* Packet Byte #2 */ | ||
895 | uint8_t sample_size:2; /* SS0, SS1 */ | ||
896 | uint8_t sample_frequency:3; | ||
897 | uint8_t rsvd2:3; | ||
898 | /* Packet Byte #3 */ | ||
899 | uint8_t coding_type_private:5; | ||
900 | uint8_t rsvd3:3; | ||
901 | /* Packet Byte #4 */ | ||
902 | uint8_t channel_allocation; | ||
903 | /* Packet Byte #5 */ | ||
904 | uint8_t rsvd4:3; | ||
905 | uint8_t level_shift:4; | ||
906 | uint8_t downmix_inhibit:1; | ||
907 | } audio; | ||
908 | uint8_t payload[28]; | ||
909 | } __attribute__ ((packed)) u; | ||
910 | } __attribute__((packed)); | ||
911 | |||
912 | static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | ||
913 | struct drm_display_mode * mode) | ||
914 | { | ||
915 | struct dip_infoframe avi_if = { | ||
916 | .type = DIP_TYPE_AVI, | ||
917 | .version = DIP_VERSION_AVI, | ||
918 | .len = DIP_LEN_AVI, | ||
919 | }; | ||
920 | |||
921 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | ||
922 | 4 + avi_if.len); | ||
923 | intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, | ||
924 | SDVO_HBUF_TX_VSYNC); | ||
925 | } | ||
926 | |||
927 | static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | ||
928 | struct drm_display_mode *mode, | ||
929 | struct drm_display_mode *adjusted_mode) | ||
930 | { | ||
931 | struct intel_output *output = enc_to_intel_output(encoder); | ||
932 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | ||
933 | |||
934 | if (!dev_priv->is_tv) { | ||
935 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | ||
936 | * SDVO device will be told of the multiplier during mode_set. | ||
937 | */ | ||
938 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
939 | } else { | ||
940 | struct intel_sdvo_dtd output_dtd; | ||
941 | bool success; | ||
942 | |||
943 | /* We need to construct preferred input timings based on our | ||
944 | * output timings. To do that, we have to set the output | ||
945 | * timings, even though this isn't really the right place in | ||
946 | * the sequence to do it. Oh well. | ||
947 | */ | ||
948 | |||
949 | |||
950 | /* Set output timings */ | ||
951 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | ||
952 | intel_sdvo_set_target_output(output, | ||
953 | dev_priv->controlled_output); | ||
954 | intel_sdvo_set_output_timing(output, &output_dtd); | ||
955 | |||
956 | /* Set the input timing to the screen. Assume always input 0. */ | ||
957 | intel_sdvo_set_target_input(output, true, false); | ||
958 | |||
959 | |||
960 | success = intel_sdvo_create_preferred_input_timing(output, | ||
961 | mode->clock / 10, | ||
962 | mode->hdisplay, | ||
963 | mode->vdisplay); | ||
964 | if (success) { | ||
965 | struct intel_sdvo_dtd input_dtd; | ||
966 | |||
967 | intel_sdvo_get_preferred_input_timing(output, | ||
968 | &input_dtd); | ||
969 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | ||
970 | |||
971 | } else { | ||
972 | return false; | ||
973 | } | ||
974 | } | ||
975 | return true; | ||
976 | } | ||
977 | |||
978 | static void intel_sdvo_mode_set(struct drm_encoder *encoder, | ||
979 | struct drm_display_mode *mode, | ||
980 | struct drm_display_mode *adjusted_mode) | ||
981 | { | ||
982 | struct drm_device *dev = encoder->dev; | ||
983 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
984 | struct drm_crtc *crtc = encoder->crtc; | ||
985 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
986 | struct intel_output *output = enc_to_intel_output(encoder); | ||
987 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
988 | u32 sdvox = 0; | ||
989 | int sdvo_pixel_multiply; | ||
990 | struct intel_sdvo_in_out_map in_out; | ||
991 | struct intel_sdvo_dtd input_dtd; | ||
992 | u8 status; | ||
993 | |||
994 | if (!mode) | ||
995 | return; | ||
996 | |||
997 | /* First, set the input mapping for the first input to our controlled | ||
998 | * output. This is only correct if we're a single-input device, in | ||
999 | * which case the first input is the output from the appropriate SDVO | ||
1000 | * channel on the motherboard. In a two-input device, the first input | ||
1001 | * will be SDVOB and the second SDVOC. | ||
1002 | */ | ||
1003 | in_out.in0 = sdvo_priv->controlled_output; | ||
1004 | in_out.in1 = 0; | ||
1005 | |||
1006 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, | ||
1007 | &in_out, sizeof(in_out)); | ||
1008 | status = intel_sdvo_read_response(output, NULL, 0); | ||
1009 | |||
1010 | if (sdvo_priv->is_hdmi) { | ||
1011 | intel_sdvo_set_avi_infoframe(output, mode); | ||
1012 | sdvox |= SDVO_AUDIO_ENABLE; | ||
1013 | } | ||
1014 | |||
1015 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | ||
1016 | |||
1017 | /* If it's a TV, we already set the output timing in mode_fixup. | ||
1018 | * Otherwise, the output timing is equal to the input timing. | ||
1019 | */ | ||
1020 | if (!sdvo_priv->is_tv) { | ||
1021 | /* Set the output timing to the screen */ | ||
1022 | intel_sdvo_set_target_output(output, | ||
1023 | sdvo_priv->controlled_output); | ||
1024 | intel_sdvo_set_output_timing(output, &input_dtd); | ||
1025 | } | ||
1026 | |||
1027 | /* Set the input timing to the screen. Assume always input 0. */ | ||
1028 | intel_sdvo_set_target_input(output, true, false); | ||
1029 | |||
1030 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | ||
1031 | * provide the device with a timing it can support, if it supports that | ||
1032 | * feature. However, presumably we would need to adjust the CRTC to | ||
1033 | * output the preferred timing, and we don't support that currently. | ||
1034 | */ | ||
1035 | #if 0 | ||
1036 | success = intel_sdvo_create_preferred_input_timing(output, clock, | ||
1037 | width, height); | ||
1038 | if (success) { | ||
1039 | struct intel_sdvo_dtd *input_dtd; | ||
1040 | |||
1041 | intel_sdvo_get_preferred_input_timing(output, &input_dtd); | ||
1042 | intel_sdvo_set_input_timing(output, &input_dtd); | ||
1043 | } | ||
1044 | #else | ||
1045 | intel_sdvo_set_input_timing(output, &input_dtd); | ||
1046 | #endif | ||
1047 | |||
1048 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | ||
1049 | case 1: | ||
1050 | intel_sdvo_set_clock_rate_mult(output, | ||
1051 | SDVO_CLOCK_RATE_MULT_1X); | ||
1052 | break; | ||
1053 | case 2: | ||
1054 | intel_sdvo_set_clock_rate_mult(output, | ||
1055 | SDVO_CLOCK_RATE_MULT_2X); | ||
1056 | break; | ||
1057 | case 4: | ||
1058 | intel_sdvo_set_clock_rate_mult(output, | ||
1059 | SDVO_CLOCK_RATE_MULT_4X); | ||
1060 | break; | ||
1061 | } | ||
1062 | |||
1063 | /* Set the SDVO control regs. */ | ||
1064 | if (IS_I965G(dev)) { | ||
1065 | sdvox |= SDVO_BORDER_ENABLE | | ||
1066 | SDVO_VSYNC_ACTIVE_HIGH | | ||
1067 | SDVO_HSYNC_ACTIVE_HIGH; | ||
1068 | } else { | ||
1069 | sdvox |= I915_READ(sdvo_priv->output_device); | ||
1070 | switch (sdvo_priv->output_device) { | ||
1071 | case SDVOB: | ||
1072 | sdvox &= SDVOB_PRESERVE_MASK; | ||
1073 | break; | ||
1074 | case SDVOC: | ||
1075 | sdvox &= SDVOC_PRESERVE_MASK; | ||
1076 | break; | ||
1077 | } | ||
1078 | sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; | ||
1079 | } | ||
1080 | if (intel_crtc->pipe == 1) | ||
1081 | sdvox |= SDVO_PIPE_B_SELECT; | ||
1082 | |||
1083 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | ||
1084 | if (IS_I965G(dev)) { | ||
1085 | /* done in crtc_mode_set as the dpll_md reg must be written early */ | ||
1086 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { | ||
1087 | /* done in crtc_mode_set as it lives inside the dpll register */ | ||
1088 | } else { | ||
1089 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; | ||
1090 | } | ||
1091 | |||
1092 | intel_sdvo_write_sdvox(output, sdvox); | ||
1093 | } | ||
1094 | |||
1095 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | ||
1096 | { | ||
1097 | struct drm_device *dev = encoder->dev; | ||
1098 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1099 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
1100 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1101 | u32 temp; | ||
1102 | |||
1103 | if (mode != DRM_MODE_DPMS_ON) { | ||
1104 | intel_sdvo_set_active_outputs(intel_output, 0); | ||
1105 | if (0) | ||
1106 | intel_sdvo_set_encoder_power_state(intel_output, mode); | ||
1107 | |||
1108 | if (mode == DRM_MODE_DPMS_OFF) { | ||
1109 | temp = I915_READ(sdvo_priv->output_device); | ||
1110 | if ((temp & SDVO_ENABLE) != 0) { | ||
1111 | intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); | ||
1112 | } | ||
1113 | } | ||
1114 | } else { | ||
1115 | bool input1, input2; | ||
1116 | int i; | ||
1117 | u8 status; | ||
1118 | |||
1119 | temp = I915_READ(sdvo_priv->output_device); | ||
1120 | if ((temp & SDVO_ENABLE) == 0) | ||
1121 | intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); | ||
1122 | for (i = 0; i < 2; i++) | ||
1123 | intel_wait_for_vblank(dev); | ||
1124 | |||
1125 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, | ||
1126 | &input2); | ||
1127 | |||
1128 | |||
1129 | /* Warn if the device reported failure to sync. | ||
1130 | * A lot of SDVO devices fail to notify of sync, but it's | ||
1131 | * a given it the status is a success, we succeeded. | ||
1132 | */ | ||
1133 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { | ||
1134 | DRM_DEBUG("First %s output reported failure to sync\n", | ||
1135 | SDVO_NAME(sdvo_priv)); | ||
1136 | } | ||
1137 | |||
1138 | if (0) | ||
1139 | intel_sdvo_set_encoder_power_state(intel_output, mode); | ||
1140 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); | ||
1141 | } | ||
1142 | return; | ||
1143 | } | ||
1144 | |||
1145 | static void intel_sdvo_save(struct drm_connector *connector) | ||
1146 | { | ||
1147 | struct drm_device *dev = connector->dev; | ||
1148 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1149 | struct intel_output *intel_output = to_intel_output(connector); | ||
1150 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1151 | int o; | ||
1152 | |||
1153 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); | ||
1154 | intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); | ||
1155 | |||
1156 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | ||
1157 | intel_sdvo_set_target_input(intel_output, true, false); | ||
1158 | intel_sdvo_get_input_timing(intel_output, | ||
1159 | &sdvo_priv->save_input_dtd_1); | ||
1160 | } | ||
1161 | |||
1162 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | ||
1163 | intel_sdvo_set_target_input(intel_output, false, true); | ||
1164 | intel_sdvo_get_input_timing(intel_output, | ||
1165 | &sdvo_priv->save_input_dtd_2); | ||
1166 | } | ||
1167 | |||
1168 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | ||
1169 | { | ||
1170 | u16 this_output = (1 << o); | ||
1171 | if (sdvo_priv->caps.output_flags & this_output) | ||
1172 | { | ||
1173 | intel_sdvo_set_target_output(intel_output, this_output); | ||
1174 | intel_sdvo_get_output_timing(intel_output, | ||
1175 | &sdvo_priv->save_output_dtd[o]); | ||
1176 | } | ||
1177 | } | ||
1178 | if (sdvo_priv->is_tv) { | ||
1179 | /* XXX: Save TV format/enhancements. */ | ||
1180 | } | ||
1181 | |||
1182 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); | ||
1183 | } | ||
1184 | |||
1185 | static void intel_sdvo_restore(struct drm_connector *connector) | ||
1186 | { | ||
1187 | struct drm_device *dev = connector->dev; | ||
1188 | struct intel_output *intel_output = to_intel_output(connector); | ||
1189 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1190 | int o; | ||
1191 | int i; | ||
1192 | bool input1, input2; | ||
1193 | u8 status; | ||
1194 | |||
1195 | intel_sdvo_set_active_outputs(intel_output, 0); | ||
1196 | |||
1197 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | ||
1198 | { | ||
1199 | u16 this_output = (1 << o); | ||
1200 | if (sdvo_priv->caps.output_flags & this_output) { | ||
1201 | intel_sdvo_set_target_output(intel_output, this_output); | ||
1202 | intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); | ||
1203 | } | ||
1204 | } | ||
1205 | |||
1206 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | ||
1207 | intel_sdvo_set_target_input(intel_output, true, false); | ||
1208 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); | ||
1209 | } | ||
1210 | |||
1211 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | ||
1212 | intel_sdvo_set_target_input(intel_output, false, true); | ||
1213 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); | ||
1214 | } | ||
1215 | |||
1216 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); | ||
1217 | |||
1218 | if (sdvo_priv->is_tv) { | ||
1219 | /* XXX: Restore TV format/enhancements. */ | ||
1220 | } | ||
1221 | |||
1222 | intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); | ||
1223 | |||
1224 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | ||
1225 | { | ||
1226 | for (i = 0; i < 2; i++) | ||
1227 | intel_wait_for_vblank(dev); | ||
1228 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); | ||
1229 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) | ||
1230 | DRM_DEBUG("First %s output reported failure to sync\n", | ||
1231 | SDVO_NAME(sdvo_priv)); | ||
1232 | } | ||
1233 | |||
1234 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); | ||
1235 | } | ||
1236 | |||
1237 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | ||
1238 | struct drm_display_mode *mode) | ||
1239 | { | ||
1240 | struct intel_output *intel_output = to_intel_output(connector); | ||
1241 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1242 | |||
1243 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
1244 | return MODE_NO_DBLESCAN; | ||
1245 | |||
1246 | if (sdvo_priv->pixel_clock_min > mode->clock) | ||
1247 | return MODE_CLOCK_LOW; | ||
1248 | |||
1249 | if (sdvo_priv->pixel_clock_max < mode->clock) | ||
1250 | return MODE_CLOCK_HIGH; | ||
1251 | |||
1252 | return MODE_OK; | ||
1253 | } | ||
1254 | |||
1255 | static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) | ||
1256 | { | ||
1257 | u8 status; | ||
1258 | |||
1259 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); | ||
1260 | status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); | ||
1261 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1262 | return false; | ||
1263 | |||
1264 | return true; | ||
1265 | } | ||
1266 | |||
1267 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1268 | { | ||
1269 | struct drm_connector *connector = NULL; | ||
1270 | struct intel_output *iout = NULL; | ||
1271 | struct intel_sdvo_priv *sdvo; | ||
1272 | |||
1273 | /* find the sdvo connector */ | ||
1274 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1275 | iout = to_intel_output(connector); | ||
1276 | |||
1277 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1278 | continue; | ||
1279 | |||
1280 | sdvo = iout->dev_priv; | ||
1281 | |||
1282 | if (sdvo->output_device == SDVOB && sdvoB) | ||
1283 | return connector; | ||
1284 | |||
1285 | if (sdvo->output_device == SDVOC && !sdvoB) | ||
1286 | return connector; | ||
1287 | |||
1288 | } | ||
1289 | |||
1290 | return NULL; | ||
1291 | } | ||
1292 | |||
1293 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1294 | { | ||
1295 | u8 response[2]; | ||
1296 | u8 status; | ||
1297 | struct intel_output *intel_output; | ||
1298 | DRM_DEBUG("\n"); | ||
1299 | |||
1300 | if (!connector) | ||
1301 | return 0; | ||
1302 | |||
1303 | intel_output = to_intel_output(connector); | ||
1304 | |||
1305 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1306 | status = intel_sdvo_read_response(intel_output, &response, 2); | ||
1307 | |||
1308 | if (response[0] !=0) | ||
1309 | return 1; | ||
1310 | |||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | ||
1315 | { | ||
1316 | u8 response[2]; | ||
1317 | u8 status; | ||
1318 | struct intel_output *intel_output = to_intel_output(connector); | ||
1319 | |||
1320 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1321 | intel_sdvo_read_response(intel_output, &response, 2); | ||
1322 | |||
1323 | if (on) { | ||
1324 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1325 | status = intel_sdvo_read_response(intel_output, &response, 2); | ||
1326 | |||
1327 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1328 | } else { | ||
1329 | response[0] = 0; | ||
1330 | response[1] = 0; | ||
1331 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1332 | } | ||
1333 | |||
1334 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1335 | intel_sdvo_read_response(intel_output, &response, 2); | ||
1336 | } | ||
1337 | |||
1338 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | ||
1339 | { | ||
1340 | u8 response[2]; | ||
1341 | u8 status; | ||
1342 | struct intel_output *intel_output = to_intel_output(connector); | ||
1343 | |||
1344 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | ||
1345 | status = intel_sdvo_read_response(intel_output, &response, 2); | ||
1346 | |||
1347 | DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); | ||
1348 | |||
1349 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1350 | return connector_status_unknown; | ||
1351 | |||
1352 | if ((response[0] != 0) || (response[1] != 0)) | ||
1353 | return connector_status_connected; | ||
1354 | else | ||
1355 | return connector_status_disconnected; | ||
1356 | } | ||
1357 | |||
1358 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | ||
1359 | { | ||
1360 | struct intel_output *intel_output = to_intel_output(connector); | ||
1361 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1362 | |||
1363 | /* set the bus switch and get the modes */ | ||
1364 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1365 | intel_ddc_get_modes(intel_output); | ||
1366 | |||
1367 | #if 0 | ||
1368 | struct drm_device *dev = encoder->dev; | ||
1369 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1370 | /* Mac mini hack. On this device, I get DDC through the analog, which | ||
1371 | * load-detects as disconnected. I fail to DDC through the SDVO DDC, | ||
1372 | * but it does load-detect as connected. So, just steal the DDC bits | ||
1373 | * from analog when we fail at finding it the right way. | ||
1374 | */ | ||
1375 | crt = xf86_config->output[0]; | ||
1376 | intel_output = crt->driver_private; | ||
1377 | if (intel_output->type == I830_OUTPUT_ANALOG && | ||
1378 | crt->funcs->detect(crt) == XF86OutputStatusDisconnected) { | ||
1379 | I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A"); | ||
1380 | edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus); | ||
1381 | xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true); | ||
1382 | } | ||
1383 | if (edid_mon) { | ||
1384 | xf86OutputSetEDID(output, edid_mon); | ||
1385 | modes = xf86OutputGetEDIDModes(output); | ||
1386 | } | ||
1387 | #endif | ||
1388 | } | ||
1389 | |||
1390 | /** | ||
1391 | * This function checks the current TV format, and chooses a default if | ||
1392 | * it hasn't been set. | ||
1393 | */ | ||
1394 | static void | ||
1395 | intel_sdvo_check_tv_format(struct intel_output *output) | ||
1396 | { | ||
1397 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | ||
1398 | struct intel_sdvo_tv_format format, unset; | ||
1399 | uint8_t status; | ||
1400 | |||
1401 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0); | ||
1402 | status = intel_sdvo_read_response(output, &format, sizeof(format)); | ||
1403 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1404 | return; | ||
1405 | |||
1406 | memset(&unset, 0, sizeof(unset)); | ||
1407 | if (memcmp(&format, &unset, sizeof(format))) { | ||
1408 | DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", | ||
1409 | SDVO_NAME(dev_priv)); | ||
1410 | |||
1411 | format.ntsc_m = true; | ||
1412 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0); | ||
1413 | status = intel_sdvo_read_response(output, NULL, 0); | ||
1414 | } | ||
1415 | } | ||
1416 | |||
1417 | /* | ||
1418 | * Set of SDVO TV modes. | ||
1419 | * Note! This is in reply order (see loop in get_tv_modes). | ||
1420 | * XXX: all 60Hz refresh? | ||
1421 | */ | ||
1422 | struct drm_display_mode sdvo_tv_modes[] = { | ||
1423 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416, | ||
1424 | 200, 0, 232, 201, 233, 4196112, 0, | ||
1425 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1426 | { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416, | ||
1427 | 240, 0, 272, 241, 273, 4196112, 0, | ||
1428 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1429 | { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496, | ||
1430 | 300, 0, 332, 301, 333, 4196112, 0, | ||
1431 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1432 | { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736, | ||
1433 | 350, 0, 382, 351, 383, 4196112, 0, | ||
1434 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1435 | { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, | ||
1436 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1437 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1438 | { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, | ||
1439 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1440 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1441 | { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800, | ||
1442 | 480, 0, 512, 481, 513, 4196112, 0, | ||
1443 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1444 | { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800, | ||
1445 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1446 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1447 | { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816, | ||
1448 | 350, 0, 382, 351, 383, 4196112, 0, | ||
1449 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1450 | { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816, | ||
1451 | 400, 0, 432, 401, 433, 4196112, 0, | ||
1452 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1453 | { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816, | ||
1454 | 480, 0, 512, 481, 513, 4196112, 0, | ||
1455 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1456 | { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816, | ||
1457 | 540, 0, 572, 541, 573, 4196112, 0, | ||
1458 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1459 | { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816, | ||
1460 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1461 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1462 | { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864, | ||
1463 | 576, 0, 608, 577, 609, 4196112, 0, | ||
1464 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1465 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896, | ||
1466 | 600, 0, 632, 601, 633, 4196112, 0, | ||
1467 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1468 | { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928, | ||
1469 | 624, 0, 656, 625, 657, 4196112, 0, | ||
1470 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1471 | { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016, | ||
1472 | 766, 0, 798, 767, 799, 4196112, 0, | ||
1473 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1474 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120, | ||
1475 | 768, 0, 800, 769, 801, 4196112, 0, | ||
1476 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1477 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376, | ||
1478 | 1024, 0, 1056, 1025, 1057, 4196112, 0, | ||
1479 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1480 | }; | ||
1481 | |||
1482 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | ||
1483 | { | ||
1484 | struct intel_output *output = to_intel_output(connector); | ||
1485 | uint32_t reply = 0; | ||
1486 | uint8_t status; | ||
1487 | int i = 0; | ||
1488 | |||
1489 | intel_sdvo_check_tv_format(output); | ||
1490 | |||
1491 | /* Read the list of supported input resolutions for the selected TV | ||
1492 | * format. | ||
1493 | */ | ||
1494 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | ||
1495 | NULL, 0); | ||
1496 | status = intel_sdvo_read_response(output, &reply, 3); | ||
1497 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1498 | return; | ||
1499 | |||
1500 | for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) | ||
1501 | if (reply & (1 << i)) | ||
1502 | drm_mode_probed_add(connector, &sdvo_tv_modes[i]); | ||
1503 | } | ||
1504 | |||
1505 | static int intel_sdvo_get_modes(struct drm_connector *connector) | ||
1506 | { | ||
1507 | struct intel_output *output = to_intel_output(connector); | ||
1508 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
1509 | |||
1510 | if (sdvo_priv->is_tv) | ||
1511 | intel_sdvo_get_tv_modes(connector); | ||
1512 | else | ||
1513 | intel_sdvo_get_ddc_modes(connector); | ||
1514 | |||
1515 | if (list_empty(&connector->probed_modes)) | ||
1516 | return 0; | ||
1517 | return 1; | ||
1518 | } | ||
1519 | |||
1520 | static void intel_sdvo_destroy(struct drm_connector *connector) | ||
1521 | { | ||
1522 | struct intel_output *intel_output = to_intel_output(connector); | ||
1523 | |||
1524 | if (intel_output->i2c_bus) | ||
1525 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1526 | drm_sysfs_connector_remove(connector); | ||
1527 | drm_connector_cleanup(connector); | ||
1528 | kfree(intel_output); | ||
1529 | } | ||
1530 | |||
1531 | static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { | ||
1532 | .dpms = intel_sdvo_dpms, | ||
1533 | .mode_fixup = intel_sdvo_mode_fixup, | ||
1534 | .prepare = intel_encoder_prepare, | ||
1535 | .mode_set = intel_sdvo_mode_set, | ||
1536 | .commit = intel_encoder_commit, | ||
1537 | }; | ||
1538 | |||
1539 | static const struct drm_connector_funcs intel_sdvo_connector_funcs = { | ||
1540 | .save = intel_sdvo_save, | ||
1541 | .restore = intel_sdvo_restore, | ||
1542 | .detect = intel_sdvo_detect, | ||
1543 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1544 | .destroy = intel_sdvo_destroy, | ||
1545 | }; | ||
1546 | |||
1547 | static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { | ||
1548 | .get_modes = intel_sdvo_get_modes, | ||
1549 | .mode_valid = intel_sdvo_mode_valid, | ||
1550 | .best_encoder = intel_best_encoder, | ||
1551 | }; | ||
1552 | |||
1553 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) | ||
1554 | { | ||
1555 | drm_encoder_cleanup(encoder); | ||
1556 | } | ||
1557 | |||
1558 | static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | ||
1559 | .destroy = intel_sdvo_enc_destroy, | ||
1560 | }; | ||
1561 | |||
1562 | |||
1563 | /** | ||
1564 | * Choose the appropriate DDC bus for control bus switch command for this | ||
1565 | * SDVO output based on the controlled output. | ||
1566 | * | ||
1567 | * DDC bus number assignment is in a priority order of RGB outputs, then TMDS | ||
1568 | * outputs, then LVDS outputs. | ||
1569 | */ | ||
1570 | static void | ||
1571 | intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | ||
1572 | { | ||
1573 | uint16_t mask = 0; | ||
1574 | unsigned int num_bits; | ||
1575 | |||
1576 | /* Make a mask of outputs less than or equal to our own priority in the | ||
1577 | * list. | ||
1578 | */ | ||
1579 | switch (dev_priv->controlled_output) { | ||
1580 | case SDVO_OUTPUT_LVDS1: | ||
1581 | mask |= SDVO_OUTPUT_LVDS1; | ||
1582 | case SDVO_OUTPUT_LVDS0: | ||
1583 | mask |= SDVO_OUTPUT_LVDS0; | ||
1584 | case SDVO_OUTPUT_TMDS1: | ||
1585 | mask |= SDVO_OUTPUT_TMDS1; | ||
1586 | case SDVO_OUTPUT_TMDS0: | ||
1587 | mask |= SDVO_OUTPUT_TMDS0; | ||
1588 | case SDVO_OUTPUT_RGB1: | ||
1589 | mask |= SDVO_OUTPUT_RGB1; | ||
1590 | case SDVO_OUTPUT_RGB0: | ||
1591 | mask |= SDVO_OUTPUT_RGB0; | ||
1592 | break; | ||
1593 | } | ||
1594 | |||
1595 | /* Count bits to find what number we are in the priority list. */ | ||
1596 | mask &= dev_priv->caps.output_flags; | ||
1597 | num_bits = hweight16(mask); | ||
1598 | if (num_bits > 3) { | ||
1599 | /* if more than 3 outputs, default to DDC bus 3 for now */ | ||
1600 | num_bits = 3; | ||
1601 | } | ||
1602 | |||
1603 | /* Corresponds to SDVO_CONTROL_BUS_DDCx */ | ||
1604 | dev_priv->ddc_bus = 1 << num_bits; | ||
1605 | } | ||
1606 | |||
1607 | static bool | ||
1608 | intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | ||
1609 | { | ||
1610 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
1611 | uint8_t status; | ||
1612 | |||
1613 | intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); | ||
1614 | |||
1615 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); | ||
1616 | status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); | ||
1617 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1618 | return false; | ||
1619 | return true; | ||
1620 | } | ||
1621 | |||
1622 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | ||
1623 | { | ||
1624 | struct drm_connector *connector; | ||
1625 | struct intel_output *intel_output; | ||
1626 | struct intel_sdvo_priv *sdvo_priv; | ||
1627 | struct intel_i2c_chan *i2cbus = NULL; | ||
1628 | int connector_type; | ||
1629 | u8 ch[0x40]; | ||
1630 | int i; | ||
1631 | int encoder_type, output_id; | ||
1632 | |||
1633 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | ||
1634 | if (!intel_output) { | ||
1635 | return false; | ||
1636 | } | ||
1637 | |||
1638 | connector = &intel_output->base; | ||
1639 | |||
1640 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | ||
1641 | DRM_MODE_CONNECTOR_Unknown); | ||
1642 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); | ||
1643 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | ||
1644 | intel_output->type = INTEL_OUTPUT_SDVO; | ||
1645 | |||
1646 | connector->interlace_allowed = 0; | ||
1647 | connector->doublescan_allowed = 0; | ||
1648 | |||
1649 | /* setup the DDC bus. */ | ||
1650 | if (output_device == SDVOB) | ||
1651 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | ||
1652 | else | ||
1653 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | ||
1654 | |||
1655 | if (!i2cbus) | ||
1656 | goto err_connector; | ||
1657 | |||
1658 | sdvo_priv->i2c_bus = i2cbus; | ||
1659 | |||
1660 | if (output_device == SDVOB) { | ||
1661 | output_id = 1; | ||
1662 | sdvo_priv->i2c_bus->slave_addr = 0x38; | ||
1663 | } else { | ||
1664 | output_id = 2; | ||
1665 | sdvo_priv->i2c_bus->slave_addr = 0x39; | ||
1666 | } | ||
1667 | |||
1668 | sdvo_priv->output_device = output_device; | ||
1669 | intel_output->i2c_bus = i2cbus; | ||
1670 | intel_output->dev_priv = sdvo_priv; | ||
1671 | |||
1672 | |||
1673 | /* Read the regs to test if we can talk to the device */ | ||
1674 | for (i = 0; i < 0x40; i++) { | ||
1675 | if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { | ||
1676 | DRM_DEBUG("No SDVO device found on SDVO%c\n", | ||
1677 | output_device == SDVOB ? 'B' : 'C'); | ||
1678 | goto err_i2c; | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1682 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | ||
1683 | |||
1684 | if (sdvo_priv->caps.output_flags & | ||
1685 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1686 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | ||
1687 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | ||
1688 | else | ||
1689 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1690 | |||
1691 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1692 | encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1693 | connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1694 | |||
1695 | if (intel_sdvo_get_supp_encode(intel_output, | ||
1696 | &sdvo_priv->encode) && | ||
1697 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1698 | sdvo_priv->is_hdmi) { | ||
1699 | /* enable hdmi encoding mode if supported */ | ||
1700 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1701 | intel_sdvo_set_colorimetry(intel_output, | ||
1702 | SDVO_COLORIMETRY_RGB256); | ||
1703 | connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1704 | } | ||
1705 | } | ||
1706 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) | ||
1707 | { | ||
1708 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
1709 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1710 | encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1711 | connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1712 | sdvo_priv->is_tv = true; | ||
1713 | intel_output->needs_tv_clock = true; | ||
1714 | } | ||
1715 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) | ||
1716 | { | ||
1717 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1718 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1719 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1720 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1721 | } | ||
1722 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) | ||
1723 | { | ||
1724 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
1725 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1726 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1727 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1728 | } | ||
1729 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) | ||
1730 | { | ||
1731 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
1732 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1733 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1734 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1735 | } | ||
1736 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) | ||
1737 | { | ||
1738 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
1739 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1740 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1741 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1742 | } | ||
1743 | else | ||
1744 | { | ||
1745 | unsigned char bytes[2]; | ||
1746 | |||
1747 | sdvo_priv->controlled_output = 0; | ||
1748 | memcpy (bytes, &sdvo_priv->caps.output_flags, 2); | ||
1749 | DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", | ||
1750 | SDVO_NAME(sdvo_priv), | ||
1751 | bytes[0], bytes[1]); | ||
1752 | encoder_type = DRM_MODE_ENCODER_NONE; | ||
1753 | connector_type = DRM_MODE_CONNECTOR_Unknown; | ||
1754 | goto err_i2c; | ||
1755 | } | ||
1756 | |||
1757 | drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); | ||
1758 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | ||
1759 | connector->connector_type = connector_type; | ||
1760 | |||
1761 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | ||
1762 | drm_sysfs_connector_add(connector); | ||
1763 | |||
1764 | intel_sdvo_select_ddc_bus(sdvo_priv); | ||
1765 | |||
1766 | /* Set the input timing to the screen. Assume always input 0. */ | ||
1767 | intel_sdvo_set_target_input(intel_output, true, false); | ||
1768 | |||
1769 | intel_sdvo_get_input_pixel_clock_range(intel_output, | ||
1770 | &sdvo_priv->pixel_clock_min, | ||
1771 | &sdvo_priv->pixel_clock_max); | ||
1772 | |||
1773 | |||
1774 | DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " | ||
1775 | "clock range %dMHz - %dMHz, " | ||
1776 | "input 1: %c, input 2: %c, " | ||
1777 | "output 1: %c, output 2: %c\n", | ||
1778 | SDVO_NAME(sdvo_priv), | ||
1779 | sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, | ||
1780 | sdvo_priv->caps.device_rev_id, | ||
1781 | sdvo_priv->pixel_clock_min / 1000, | ||
1782 | sdvo_priv->pixel_clock_max / 1000, | ||
1783 | (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', | ||
1784 | (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', | ||
1785 | /* check currently supported outputs */ | ||
1786 | sdvo_priv->caps.output_flags & | ||
1787 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', | ||
1788 | sdvo_priv->caps.output_flags & | ||
1789 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); | ||
1790 | |||
1791 | intel_output->ddc_bus = i2cbus; | ||
1792 | |||
1793 | return true; | ||
1794 | |||
1795 | err_i2c: | ||
1796 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1797 | err_connector: | ||
1798 | drm_connector_cleanup(connector); | ||
1799 | kfree(intel_output); | ||
1800 | |||
1801 | return false; | ||
1802 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h new file mode 100644 index 000000000000..1117b9c151a6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -0,0 +1,719 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2007 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | */ | ||
26 | |||
27 | /** | ||
28 | * @file SDVO command definitions and structures. | ||
29 | */ | ||
30 | |||
31 | #define SDVO_OUTPUT_FIRST (0) | ||
32 | #define SDVO_OUTPUT_TMDS0 (1 << 0) | ||
33 | #define SDVO_OUTPUT_RGB0 (1 << 1) | ||
34 | #define SDVO_OUTPUT_CVBS0 (1 << 2) | ||
35 | #define SDVO_OUTPUT_SVID0 (1 << 3) | ||
36 | #define SDVO_OUTPUT_YPRPB0 (1 << 4) | ||
37 | #define SDVO_OUTPUT_SCART0 (1 << 5) | ||
38 | #define SDVO_OUTPUT_LVDS0 (1 << 6) | ||
39 | #define SDVO_OUTPUT_TMDS1 (1 << 8) | ||
40 | #define SDVO_OUTPUT_RGB1 (1 << 9) | ||
41 | #define SDVO_OUTPUT_CVBS1 (1 << 10) | ||
42 | #define SDVO_OUTPUT_SVID1 (1 << 11) | ||
43 | #define SDVO_OUTPUT_YPRPB1 (1 << 12) | ||
44 | #define SDVO_OUTPUT_SCART1 (1 << 13) | ||
45 | #define SDVO_OUTPUT_LVDS1 (1 << 14) | ||
46 | #define SDVO_OUTPUT_LAST (14) | ||
47 | |||
48 | struct intel_sdvo_caps { | ||
49 | u8 vendor_id; | ||
50 | u8 device_id; | ||
51 | u8 device_rev_id; | ||
52 | u8 sdvo_version_major; | ||
53 | u8 sdvo_version_minor; | ||
54 | unsigned int sdvo_inputs_mask:2; | ||
55 | unsigned int smooth_scaling:1; | ||
56 | unsigned int sharp_scaling:1; | ||
57 | unsigned int up_scaling:1; | ||
58 | unsigned int down_scaling:1; | ||
59 | unsigned int stall_support:1; | ||
60 | unsigned int pad:1; | ||
61 | u16 output_flags; | ||
62 | } __attribute__((packed)); | ||
63 | |||
64 | /** This matches the EDID DTD structure, more or less */ | ||
65 | struct intel_sdvo_dtd { | ||
66 | struct { | ||
67 | u16 clock; /**< pixel clock, in 10kHz units */ | ||
68 | u8 h_active; /**< lower 8 bits (pixels) */ | ||
69 | u8 h_blank; /**< lower 8 bits (pixels) */ | ||
70 | u8 h_high; /**< upper 4 bits each h_active, h_blank */ | ||
71 | u8 v_active; /**< lower 8 bits (lines) */ | ||
72 | u8 v_blank; /**< lower 8 bits (lines) */ | ||
73 | u8 v_high; /**< upper 4 bits each v_active, v_blank */ | ||
74 | } part1; | ||
75 | |||
76 | struct { | ||
77 | u8 h_sync_off; /**< lower 8 bits, from hblank start */ | ||
78 | u8 h_sync_width; /**< lower 8 bits (pixels) */ | ||
79 | /** lower 4 bits each vsync offset, vsync width */ | ||
80 | u8 v_sync_off_width; | ||
81 | /** | ||
82 | * 2 high bits of hsync offset, 2 high bits of hsync width, | ||
83 | * bits 4-5 of vsync offset, and 2 high bits of vsync width. | ||
84 | */ | ||
85 | u8 sync_off_width_high; | ||
86 | u8 dtd_flags; | ||
87 | u8 sdvo_flags; | ||
88 | /** bits 6-7 of vsync offset at bits 6-7 */ | ||
89 | u8 v_sync_off_high; | ||
90 | u8 reserved; | ||
91 | } part2; | ||
92 | } __attribute__((packed)); | ||
93 | |||
94 | struct intel_sdvo_pixel_clock_range { | ||
95 | u16 min; /**< pixel clock, in 10kHz units */ | ||
96 | u16 max; /**< pixel clock, in 10kHz units */ | ||
97 | } __attribute__((packed)); | ||
98 | |||
99 | struct intel_sdvo_preferred_input_timing_args { | ||
100 | u16 clock; | ||
101 | u16 width; | ||
102 | u16 height; | ||
103 | } __attribute__((packed)); | ||
104 | |||
105 | /* I2C registers for SDVO */ | ||
106 | #define SDVO_I2C_ARG_0 0x07 | ||
107 | #define SDVO_I2C_ARG_1 0x06 | ||
108 | #define SDVO_I2C_ARG_2 0x05 | ||
109 | #define SDVO_I2C_ARG_3 0x04 | ||
110 | #define SDVO_I2C_ARG_4 0x03 | ||
111 | #define SDVO_I2C_ARG_5 0x02 | ||
112 | #define SDVO_I2C_ARG_6 0x01 | ||
113 | #define SDVO_I2C_ARG_7 0x00 | ||
114 | #define SDVO_I2C_OPCODE 0x08 | ||
115 | #define SDVO_I2C_CMD_STATUS 0x09 | ||
116 | #define SDVO_I2C_RETURN_0 0x0a | ||
117 | #define SDVO_I2C_RETURN_1 0x0b | ||
118 | #define SDVO_I2C_RETURN_2 0x0c | ||
119 | #define SDVO_I2C_RETURN_3 0x0d | ||
120 | #define SDVO_I2C_RETURN_4 0x0e | ||
121 | #define SDVO_I2C_RETURN_5 0x0f | ||
122 | #define SDVO_I2C_RETURN_6 0x10 | ||
123 | #define SDVO_I2C_RETURN_7 0x11 | ||
124 | #define SDVO_I2C_VENDOR_BEGIN 0x20 | ||
125 | |||
126 | /* Status results */ | ||
127 | #define SDVO_CMD_STATUS_POWER_ON 0x0 | ||
128 | #define SDVO_CMD_STATUS_SUCCESS 0x1 | ||
129 | #define SDVO_CMD_STATUS_NOTSUPP 0x2 | ||
130 | #define SDVO_CMD_STATUS_INVALID_ARG 0x3 | ||
131 | #define SDVO_CMD_STATUS_PENDING 0x4 | ||
132 | #define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 | ||
133 | #define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 | ||
134 | |||
135 | /* SDVO commands, argument/result registers */ | ||
136 | |||
137 | #define SDVO_CMD_RESET 0x01 | ||
138 | |||
139 | /** Returns a struct intel_sdvo_caps */ | ||
140 | #define SDVO_CMD_GET_DEVICE_CAPS 0x02 | ||
141 | |||
142 | #define SDVO_CMD_GET_FIRMWARE_REV 0x86 | ||
143 | # define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 | ||
144 | # define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 | ||
145 | # define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 | ||
146 | |||
147 | /** | ||
148 | * Reports which inputs are trained (managed to sync). | ||
149 | * | ||
150 | * Devices must have trained within 2 vsyncs of a mode change. | ||
151 | */ | ||
152 | #define SDVO_CMD_GET_TRAINED_INPUTS 0x03 | ||
153 | struct intel_sdvo_get_trained_inputs_response { | ||
154 | unsigned int input0_trained:1; | ||
155 | unsigned int input1_trained:1; | ||
156 | unsigned int pad:6; | ||
157 | } __attribute__((packed)); | ||
158 | |||
159 | /** Returns a struct intel_sdvo_output_flags of active outputs. */ | ||
160 | #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 | ||
161 | |||
162 | /** | ||
163 | * Sets the current set of active outputs. | ||
164 | * | ||
165 | * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP | ||
166 | * on multi-output devices. | ||
167 | */ | ||
168 | #define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 | ||
169 | |||
170 | /** | ||
171 | * Returns the current mapping of SDVO inputs to outputs on the device. | ||
172 | * | ||
173 | * Returns two struct intel_sdvo_output_flags structures. | ||
174 | */ | ||
175 | #define SDVO_CMD_GET_IN_OUT_MAP 0x06 | ||
176 | struct intel_sdvo_in_out_map { | ||
177 | u16 in0, in1; | ||
178 | }; | ||
179 | |||
180 | /** | ||
181 | * Sets the current mapping of SDVO inputs to outputs on the device. | ||
182 | * | ||
183 | * Takes two struct i380_sdvo_output_flags structures. | ||
184 | */ | ||
185 | #define SDVO_CMD_SET_IN_OUT_MAP 0x07 | ||
186 | |||
187 | /** | ||
188 | * Returns a struct intel_sdvo_output_flags of attached displays. | ||
189 | */ | ||
190 | #define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b | ||
191 | |||
192 | /** | ||
193 | * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. | ||
194 | */ | ||
195 | #define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c | ||
196 | |||
197 | /** | ||
198 | * Takes a struct intel_sdvo_output_flags. | ||
199 | */ | ||
200 | #define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d | ||
201 | |||
202 | /** | ||
203 | * Returns a struct intel_sdvo_output_flags of displays with hot plug | ||
204 | * interrupts enabled. | ||
205 | */ | ||
206 | #define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e | ||
207 | |||
208 | #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f | ||
209 | struct intel_sdvo_get_interrupt_event_source_response { | ||
210 | u16 interrupt_status; | ||
211 | unsigned int ambient_light_interrupt:1; | ||
212 | unsigned int hdmi_audio_encrypt_change:1; | ||
213 | unsigned int pad:6; | ||
214 | } __attribute__((packed)); | ||
215 | |||
216 | /** | ||
217 | * Selects which input is affected by future input commands. | ||
218 | * | ||
219 | * Commands affected include SET_INPUT_TIMINGS_PART[12], | ||
220 | * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], | ||
221 | * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. | ||
222 | */ | ||
223 | #define SDVO_CMD_SET_TARGET_INPUT 0x10 | ||
224 | struct intel_sdvo_set_target_input_args { | ||
225 | unsigned int target_1:1; | ||
226 | unsigned int pad:7; | ||
227 | } __attribute__((packed)); | ||
228 | |||
229 | /** | ||
230 | * Takes a struct intel_sdvo_output_flags of which outputs are targetted by | ||
231 | * future output commands. | ||
232 | * | ||
233 | * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], | ||
234 | * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. | ||
235 | */ | ||
236 | #define SDVO_CMD_SET_TARGET_OUTPUT 0x11 | ||
237 | |||
238 | #define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 | ||
239 | #define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 | ||
240 | #define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 | ||
241 | #define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 | ||
242 | #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 | ||
243 | #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 | ||
244 | #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 | ||
245 | #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 | ||
246 | /* Part 1 */ | ||
247 | # define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 | ||
248 | # define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 | ||
249 | # define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 | ||
250 | # define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 | ||
251 | # define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 | ||
252 | # define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 | ||
253 | # define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 | ||
254 | # define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 | ||
255 | /* Part 2 */ | ||
256 | # define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 | ||
257 | # define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 | ||
258 | # define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 | ||
259 | # define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 | ||
260 | # define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 | ||
261 | # define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) | ||
262 | # define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) | ||
263 | # define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) | ||
264 | # define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) | ||
265 | # define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 | ||
266 | # define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) | ||
267 | # define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) | ||
268 | # define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) | ||
269 | # define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) | ||
270 | # define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) | ||
271 | # define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) | ||
272 | # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) | ||
273 | # define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 | ||
274 | |||
275 | /** | ||
276 | * Generates a DTD based on the given width, height, and flags. | ||
277 | * | ||
278 | * This will be supported by any device supporting scaling or interlaced | ||
279 | * modes. | ||
280 | */ | ||
281 | #define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a | ||
282 | # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 | ||
283 | # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 | ||
284 | # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 | ||
285 | # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 | ||
286 | # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 | ||
287 | # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 | ||
288 | # define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 | ||
289 | # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) | ||
290 | # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) | ||
291 | |||
292 | #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b | ||
293 | #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c | ||
294 | |||
295 | /** Returns a struct intel_sdvo_pixel_clock_range */ | ||
296 | #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d | ||
297 | /** Returns a struct intel_sdvo_pixel_clock_range */ | ||
298 | #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e | ||
299 | |||
300 | /** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ | ||
301 | #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f | ||
302 | |||
303 | /** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ | ||
304 | #define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 | ||
305 | /** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ | ||
306 | #define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 | ||
307 | # define SDVO_CLOCK_RATE_MULT_1X (1 << 0) | ||
308 | # define SDVO_CLOCK_RATE_MULT_2X (1 << 1) | ||
309 | # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) | ||
310 | |||
311 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 | ||
312 | /** 5 bytes of bit flags for TV formats shared by all TV format functions */ | ||
313 | struct intel_sdvo_tv_format { | ||
314 | unsigned int ntsc_m:1; | ||
315 | unsigned int ntsc_j:1; | ||
316 | unsigned int ntsc_443:1; | ||
317 | unsigned int pal_b:1; | ||
318 | unsigned int pal_d:1; | ||
319 | unsigned int pal_g:1; | ||
320 | unsigned int pal_h:1; | ||
321 | unsigned int pal_i:1; | ||
322 | |||
323 | unsigned int pal_m:1; | ||
324 | unsigned int pal_n:1; | ||
325 | unsigned int pal_nc:1; | ||
326 | unsigned int pal_60:1; | ||
327 | unsigned int secam_b:1; | ||
328 | unsigned int secam_d:1; | ||
329 | unsigned int secam_g:1; | ||
330 | unsigned int secam_k:1; | ||
331 | |||
332 | unsigned int secam_k1:1; | ||
333 | unsigned int secam_l:1; | ||
334 | unsigned int secam_60:1; | ||
335 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | ||
336 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | ||
337 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | ||
338 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | ||
339 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | ||
340 | |||
341 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | ||
342 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | ||
343 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | ||
344 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | ||
345 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | ||
346 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | ||
347 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | ||
348 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | ||
349 | |||
350 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | ||
351 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | ||
352 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | ||
353 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | ||
354 | unsigned int hdtv_std_smpte_296m_720p_59:1; | ||
355 | unsigned int hdtv_std_smpte_296m_720p_60:1; | ||
356 | unsigned int hdtv_std_smpte_296m_720p_50:1; | ||
357 | unsigned int hdtv_std_smpte_293m_480p_59:1; | ||
358 | |||
359 | unsigned int hdtv_std_smpte_170m_480i_59:1; | ||
360 | unsigned int hdtv_std_iturbt601_576i_50:1; | ||
361 | unsigned int hdtv_std_iturbt601_576p_50:1; | ||
362 | unsigned int hdtv_std_eia_7702a_480i_60:1; | ||
363 | unsigned int hdtv_std_eia_7702a_480p_60:1; | ||
364 | unsigned int pad:3; | ||
365 | } __attribute__((packed)); | ||
366 | |||
367 | #define SDVO_CMD_GET_TV_FORMAT 0x28 | ||
368 | |||
369 | #define SDVO_CMD_SET_TV_FORMAT 0x29 | ||
370 | |||
371 | /** Returns the resolutiosn that can be used with the given TV format */ | ||
372 | #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 | ||
373 | struct intel_sdvo_sdtv_resolution_request { | ||
374 | unsigned int ntsc_m:1; | ||
375 | unsigned int ntsc_j:1; | ||
376 | unsigned int ntsc_443:1; | ||
377 | unsigned int pal_b:1; | ||
378 | unsigned int pal_d:1; | ||
379 | unsigned int pal_g:1; | ||
380 | unsigned int pal_h:1; | ||
381 | unsigned int pal_i:1; | ||
382 | |||
383 | unsigned int pal_m:1; | ||
384 | unsigned int pal_n:1; | ||
385 | unsigned int pal_nc:1; | ||
386 | unsigned int pal_60:1; | ||
387 | unsigned int secam_b:1; | ||
388 | unsigned int secam_d:1; | ||
389 | unsigned int secam_g:1; | ||
390 | unsigned int secam_k:1; | ||
391 | |||
392 | unsigned int secam_k1:1; | ||
393 | unsigned int secam_l:1; | ||
394 | unsigned int secam_60:1; | ||
395 | unsigned int pad:5; | ||
396 | } __attribute__((packed)); | ||
397 | |||
398 | struct intel_sdvo_sdtv_resolution_reply { | ||
399 | unsigned int res_320x200:1; | ||
400 | unsigned int res_320x240:1; | ||
401 | unsigned int res_400x300:1; | ||
402 | unsigned int res_640x350:1; | ||
403 | unsigned int res_640x400:1; | ||
404 | unsigned int res_640x480:1; | ||
405 | unsigned int res_704x480:1; | ||
406 | unsigned int res_704x576:1; | ||
407 | |||
408 | unsigned int res_720x350:1; | ||
409 | unsigned int res_720x400:1; | ||
410 | unsigned int res_720x480:1; | ||
411 | unsigned int res_720x540:1; | ||
412 | unsigned int res_720x576:1; | ||
413 | unsigned int res_768x576:1; | ||
414 | unsigned int res_800x600:1; | ||
415 | unsigned int res_832x624:1; | ||
416 | |||
417 | unsigned int res_920x766:1; | ||
418 | unsigned int res_1024x768:1; | ||
419 | unsigned int res_1280x1024:1; | ||
420 | unsigned int pad:5; | ||
421 | } __attribute__((packed)); | ||
422 | |||
423 | /* Get supported resolution with squire pixel aspect ratio that can be | ||
424 | scaled for the requested HDTV format */ | ||
425 | #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 | ||
426 | |||
427 | struct intel_sdvo_hdtv_resolution_request { | ||
428 | unsigned int hdtv_std_smpte_240m_1080i_59:1; | ||
429 | unsigned int hdtv_std_smpte_240m_1080i_60:1; | ||
430 | unsigned int hdtv_std_smpte_260m_1080i_59:1; | ||
431 | unsigned int hdtv_std_smpte_260m_1080i_60:1; | ||
432 | unsigned int hdtv_std_smpte_274m_1080i_50:1; | ||
433 | unsigned int hdtv_std_smpte_274m_1080i_59:1; | ||
434 | unsigned int hdtv_std_smpte_274m_1080i_60:1; | ||
435 | unsigned int hdtv_std_smpte_274m_1080p_23:1; | ||
436 | |||
437 | unsigned int hdtv_std_smpte_274m_1080p_24:1; | ||
438 | unsigned int hdtv_std_smpte_274m_1080p_25:1; | ||
439 | unsigned int hdtv_std_smpte_274m_1080p_29:1; | ||
440 | unsigned int hdtv_std_smpte_274m_1080p_30:1; | ||
441 | unsigned int hdtv_std_smpte_274m_1080p_50:1; | ||
442 | unsigned int hdtv_std_smpte_274m_1080p_59:1; | ||
443 | unsigned int hdtv_std_smpte_274m_1080p_60:1; | ||
444 | unsigned int hdtv_std_smpte_295m_1080i_50:1; | ||
445 | |||
446 | unsigned int hdtv_std_smpte_295m_1080p_50:1; | ||
447 | unsigned int hdtv_std_smpte_296m_720p_59:1; | ||
448 | unsigned int hdtv_std_smpte_296m_720p_60:1; | ||
449 | unsigned int hdtv_std_smpte_296m_720p_50:1; | ||
450 | unsigned int hdtv_std_smpte_293m_480p_59:1; | ||
451 | unsigned int hdtv_std_smpte_170m_480i_59:1; | ||
452 | unsigned int hdtv_std_iturbt601_576i_50:1; | ||
453 | unsigned int hdtv_std_iturbt601_576p_50:1; | ||
454 | |||
455 | unsigned int hdtv_std_eia_7702a_480i_60:1; | ||
456 | unsigned int hdtv_std_eia_7702a_480p_60:1; | ||
457 | unsigned int pad:6; | ||
458 | } __attribute__((packed)); | ||
459 | |||
460 | struct intel_sdvo_hdtv_resolution_reply { | ||
461 | unsigned int res_640x480:1; | ||
462 | unsigned int res_800x600:1; | ||
463 | unsigned int res_1024x768:1; | ||
464 | unsigned int res_1280x960:1; | ||
465 | unsigned int res_1400x1050:1; | ||
466 | unsigned int res_1600x1200:1; | ||
467 | unsigned int res_1920x1440:1; | ||
468 | unsigned int res_2048x1536:1; | ||
469 | |||
470 | unsigned int res_2560x1920:1; | ||
471 | unsigned int res_3200x2400:1; | ||
472 | unsigned int res_3840x2880:1; | ||
473 | unsigned int pad1:5; | ||
474 | |||
475 | unsigned int res_848x480:1; | ||
476 | unsigned int res_1064x600:1; | ||
477 | unsigned int res_1280x720:1; | ||
478 | unsigned int res_1360x768:1; | ||
479 | unsigned int res_1704x960:1; | ||
480 | unsigned int res_1864x1050:1; | ||
481 | unsigned int res_1920x1080:1; | ||
482 | unsigned int res_2128x1200:1; | ||
483 | |||
484 | unsigned int res_2560x1400:1; | ||
485 | unsigned int res_2728x1536:1; | ||
486 | unsigned int res_3408x1920:1; | ||
487 | unsigned int res_4264x2400:1; | ||
488 | unsigned int res_5120x2880:1; | ||
489 | unsigned int pad2:3; | ||
490 | |||
491 | unsigned int res_768x480:1; | ||
492 | unsigned int res_960x600:1; | ||
493 | unsigned int res_1152x720:1; | ||
494 | unsigned int res_1124x768:1; | ||
495 | unsigned int res_1536x960:1; | ||
496 | unsigned int res_1680x1050:1; | ||
497 | unsigned int res_1728x1080:1; | ||
498 | unsigned int res_1920x1200:1; | ||
499 | |||
500 | unsigned int res_2304x1440:1; | ||
501 | unsigned int res_2456x1536:1; | ||
502 | unsigned int res_3072x1920:1; | ||
503 | unsigned int res_3840x2400:1; | ||
504 | unsigned int res_4608x2880:1; | ||
505 | unsigned int pad3:3; | ||
506 | |||
507 | unsigned int res_1280x1024:1; | ||
508 | unsigned int pad4:7; | ||
509 | |||
510 | unsigned int res_1280x768:1; | ||
511 | unsigned int pad5:7; | ||
512 | } __attribute__((packed)); | ||
513 | |||
514 | /* Get supported power state returns info for encoder and monitor, rely on | ||
515 | last SetTargetInput and SetTargetOutput calls */ | ||
516 | #define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a | ||
517 | /* Get power state returns info for encoder and monitor, rely on last | ||
518 | SetTargetInput and SetTargetOutput calls */ | ||
519 | #define SDVO_CMD_GET_POWER_STATE 0x2b | ||
520 | #define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b | ||
521 | #define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c | ||
522 | # define SDVO_ENCODER_STATE_ON (1 << 0) | ||
523 | # define SDVO_ENCODER_STATE_STANDBY (1 << 1) | ||
524 | # define SDVO_ENCODER_STATE_SUSPEND (1 << 2) | ||
525 | # define SDVO_ENCODER_STATE_OFF (1 << 3) | ||
526 | # define SDVO_MONITOR_STATE_ON (1 << 4) | ||
527 | # define SDVO_MONITOR_STATE_STANDBY (1 << 5) | ||
528 | # define SDVO_MONITOR_STATE_SUSPEND (1 << 6) | ||
529 | # define SDVO_MONITOR_STATE_OFF (1 << 7) | ||
530 | |||
531 | #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d | ||
532 | #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e | ||
533 | #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f | ||
534 | /** | ||
535 | * The panel power sequencing parameters are in units of milliseconds. | ||
536 | * The high fields are bits 8:9 of the 10-bit values. | ||
537 | */ | ||
538 | struct sdvo_panel_power_sequencing { | ||
539 | u8 t0; | ||
540 | u8 t1; | ||
541 | u8 t2; | ||
542 | u8 t3; | ||
543 | u8 t4; | ||
544 | |||
545 | unsigned int t0_high:2; | ||
546 | unsigned int t1_high:2; | ||
547 | unsigned int t2_high:2; | ||
548 | unsigned int t3_high:2; | ||
549 | |||
550 | unsigned int t4_high:2; | ||
551 | unsigned int pad:6; | ||
552 | } __attribute__((packed)); | ||
553 | |||
554 | #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 | ||
555 | struct sdvo_max_backlight_reply { | ||
556 | u8 max_value; | ||
557 | u8 default_value; | ||
558 | } __attribute__((packed)); | ||
559 | |||
560 | #define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 | ||
561 | #define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 | ||
562 | |||
563 | #define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 | ||
564 | struct sdvo_get_ambient_light_reply { | ||
565 | u16 trip_low; | ||
566 | u16 trip_high; | ||
567 | u16 value; | ||
568 | } __attribute__((packed)); | ||
569 | #define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 | ||
570 | struct sdvo_set_ambient_light_reply { | ||
571 | u16 trip_low; | ||
572 | u16 trip_high; | ||
573 | unsigned int enable:1; | ||
574 | unsigned int pad:7; | ||
575 | } __attribute__((packed)); | ||
576 | |||
577 | /* Set display power state */ | ||
578 | #define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d | ||
579 | # define SDVO_DISPLAY_STATE_ON (1 << 0) | ||
580 | # define SDVO_DISPLAY_STATE_STANDBY (1 << 1) | ||
581 | # define SDVO_DISPLAY_STATE_SUSPEND (1 << 2) | ||
582 | # define SDVO_DISPLAY_STATE_OFF (1 << 3) | ||
583 | |||
584 | #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 | ||
585 | struct intel_sdvo_enhancements_reply { | ||
586 | unsigned int flicker_filter:1; | ||
587 | unsigned int flicker_filter_adaptive:1; | ||
588 | unsigned int flicker_filter_2d:1; | ||
589 | unsigned int saturation:1; | ||
590 | unsigned int hue:1; | ||
591 | unsigned int brightness:1; | ||
592 | unsigned int contrast:1; | ||
593 | unsigned int overscan_h:1; | ||
594 | |||
595 | unsigned int overscan_v:1; | ||
596 | unsigned int position_h:1; | ||
597 | unsigned int position_v:1; | ||
598 | unsigned int sharpness:1; | ||
599 | unsigned int dot_crawl:1; | ||
600 | unsigned int dither:1; | ||
601 | unsigned int max_tv_chroma_filter:1; | ||
602 | unsigned int max_tv_luma_filter:1; | ||
603 | } __attribute__((packed)); | ||
604 | |||
605 | /* Picture enhancement limits below are dependent on the current TV format, | ||
606 | * and thus need to be queried and set after it. | ||
607 | */ | ||
608 | #define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d | ||
609 | #define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b | ||
610 | #define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52 | ||
611 | #define SDVO_CMD_GET_MAX_SATURATION 0x55 | ||
612 | #define SDVO_CMD_GET_MAX_HUE 0x58 | ||
613 | #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b | ||
614 | #define SDVO_CMD_GET_MAX_CONTRAST 0x5e | ||
615 | #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 | ||
616 | #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 | ||
617 | #define SDVO_CMD_GET_MAX_POSITION_H 0x67 | ||
618 | #define SDVO_CMD_GET_MAX_POSITION_V 0x6a | ||
619 | #define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d | ||
620 | #define SDVO_CMD_GET_MAX_TV_CHROMA 0x74 | ||
621 | #define SDVO_CMD_GET_MAX_TV_LUMA 0x77 | ||
622 | struct intel_sdvo_enhancement_limits_reply { | ||
623 | u16 max_value; | ||
624 | u16 default_value; | ||
625 | } __attribute__((packed)); | ||
626 | |||
627 | #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f | ||
628 | #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 | ||
629 | # define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0) | ||
630 | # define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0) | ||
631 | # define SDVO_LVDS_CONNECTOR_SPWG (0 << 2) | ||
632 | # define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2) | ||
633 | # define SDVO_LVDS_SINGLE_CHANNEL (0 << 4) | ||
634 | # define SDVO_LVDS_DUAL_CHANNEL (1 << 4) | ||
635 | |||
636 | #define SDVO_CMD_GET_FLICKER_FILTER 0x4e | ||
637 | #define SDVO_CMD_SET_FLICKER_FILTER 0x4f | ||
638 | #define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50 | ||
639 | #define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51 | ||
640 | #define SDVO_CMD_GET_2D_FLICKER_FITER 0x53 | ||
641 | #define SDVO_CMD_SET_2D_FLICKER_FITER 0x54 | ||
642 | #define SDVO_CMD_GET_SATURATION 0x56 | ||
643 | #define SDVO_CMD_SET_SATURATION 0x57 | ||
644 | #define SDVO_CMD_GET_HUE 0x59 | ||
645 | #define SDVO_CMD_SET_HUE 0x5a | ||
646 | #define SDVO_CMD_GET_BRIGHTNESS 0x5c | ||
647 | #define SDVO_CMD_SET_BRIGHTNESS 0x5d | ||
648 | #define SDVO_CMD_GET_CONTRAST 0x5f | ||
649 | #define SDVO_CMD_SET_CONTRAST 0x60 | ||
650 | #define SDVO_CMD_GET_OVERSCAN_H 0x62 | ||
651 | #define SDVO_CMD_SET_OVERSCAN_H 0x63 | ||
652 | #define SDVO_CMD_GET_OVERSCAN_V 0x65 | ||
653 | #define SDVO_CMD_SET_OVERSCAN_V 0x66 | ||
654 | #define SDVO_CMD_GET_POSITION_H 0x68 | ||
655 | #define SDVO_CMD_SET_POSITION_H 0x69 | ||
656 | #define SDVO_CMD_GET_POSITION_V 0x6b | ||
657 | #define SDVO_CMD_SET_POSITION_V 0x6c | ||
658 | #define SDVO_CMD_GET_SHARPNESS 0x6e | ||
659 | #define SDVO_CMD_SET_SHARPNESS 0x6f | ||
660 | #define SDVO_CMD_GET_TV_CHROMA 0x75 | ||
661 | #define SDVO_CMD_SET_TV_CHROMA 0x76 | ||
662 | #define SDVO_CMD_GET_TV_LUMA 0x78 | ||
663 | #define SDVO_CMD_SET_TV_LUMA 0x79 | ||
664 | struct intel_sdvo_enhancements_arg { | ||
665 | u16 value; | ||
666 | }__attribute__((packed)); | ||
667 | |||
668 | #define SDVO_CMD_GET_DOT_CRAWL 0x70 | ||
669 | #define SDVO_CMD_SET_DOT_CRAWL 0x71 | ||
670 | # define SDVO_DOT_CRAWL_ON (1 << 0) | ||
671 | # define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1) | ||
672 | |||
673 | #define SDVO_CMD_GET_DITHER 0x72 | ||
674 | #define SDVO_CMD_SET_DITHER 0x73 | ||
675 | # define SDVO_DITHER_ON (1 << 0) | ||
676 | # define SDVO_DITHER_DEFAULT_ON (1 << 1) | ||
677 | |||
678 | #define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a | ||
679 | # define SDVO_CONTROL_BUS_PROM (1 << 0) | ||
680 | # define SDVO_CONTROL_BUS_DDC1 (1 << 1) | ||
681 | # define SDVO_CONTROL_BUS_DDC2 (1 << 2) | ||
682 | # define SDVO_CONTROL_BUS_DDC3 (1 << 3) | ||
683 | |||
684 | /* HDMI op codes */ | ||
685 | #define SDVO_CMD_GET_SUPP_ENCODE 0x9d | ||
686 | #define SDVO_CMD_GET_ENCODE 0x9e | ||
687 | #define SDVO_CMD_SET_ENCODE 0x9f | ||
688 | #define SDVO_ENCODE_DVI 0x0 | ||
689 | #define SDVO_ENCODE_HDMI 0x1 | ||
690 | #define SDVO_CMD_SET_PIXEL_REPLI 0x8b | ||
691 | #define SDVO_CMD_GET_PIXEL_REPLI 0x8c | ||
692 | #define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d | ||
693 | #define SDVO_CMD_SET_COLORIMETRY 0x8e | ||
694 | #define SDVO_COLORIMETRY_RGB256 0x0 | ||
695 | #define SDVO_COLORIMETRY_RGB220 0x1 | ||
696 | #define SDVO_COLORIMETRY_YCrCb422 0x3 | ||
697 | #define SDVO_COLORIMETRY_YCrCb444 0x4 | ||
698 | #define SDVO_CMD_GET_COLORIMETRY 0x8f | ||
699 | #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 | ||
700 | #define SDVO_CMD_SET_AUDIO_STAT 0x91 | ||
701 | #define SDVO_CMD_GET_AUDIO_STAT 0x92 | ||
702 | #define SDVO_CMD_SET_HBUF_INDEX 0x93 | ||
703 | #define SDVO_CMD_GET_HBUF_INDEX 0x94 | ||
704 | #define SDVO_CMD_GET_HBUF_INFO 0x95 | ||
705 | #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 | ||
706 | #define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97 | ||
707 | #define SDVO_CMD_SET_HBUF_DATA 0x98 | ||
708 | #define SDVO_CMD_GET_HBUF_DATA 0x99 | ||
709 | #define SDVO_CMD_SET_HBUF_TXRATE 0x9a | ||
710 | #define SDVO_CMD_GET_HBUF_TXRATE 0x9b | ||
711 | #define SDVO_HBUF_TX_DISABLED (0 << 6) | ||
712 | #define SDVO_HBUF_TX_ONCE (2 << 6) | ||
713 | #define SDVO_HBUF_TX_VSYNC (3 << 6) | ||
714 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c | ||
715 | |||
716 | struct intel_sdvo_encode{ | ||
717 | u8 dvi_rev; | ||
718 | u8 hdmi_rev; | ||
719 | } __attribute__ ((packed)); | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c new file mode 100644 index 000000000000..fbb35dc56f5c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -0,0 +1,1725 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2008 Intel Corporation | ||
3 | * Jesse Barnes <jesse.barnes@intel.com> | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Eric Anholt <eric@anholt.net> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | /** @file | ||
30 | * Integrated TV-out support for the 915GM and 945GM. | ||
31 | */ | ||
32 | |||
33 | #include "drmP.h" | ||
34 | #include "drm.h" | ||
35 | #include "drm_crtc.h" | ||
36 | #include "drm_edid.h" | ||
37 | #include "intel_drv.h" | ||
38 | #include "i915_drm.h" | ||
39 | #include "i915_drv.h" | ||
40 | |||
41 | enum tv_margin { | ||
42 | TV_MARGIN_LEFT, TV_MARGIN_TOP, | ||
43 | TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM | ||
44 | }; | ||
45 | |||
46 | /** Private structure for the integrated TV support */ | ||
47 | struct intel_tv_priv { | ||
48 | int type; | ||
49 | char *tv_format; | ||
50 | int margin[4]; | ||
51 | u32 save_TV_H_CTL_1; | ||
52 | u32 save_TV_H_CTL_2; | ||
53 | u32 save_TV_H_CTL_3; | ||
54 | u32 save_TV_V_CTL_1; | ||
55 | u32 save_TV_V_CTL_2; | ||
56 | u32 save_TV_V_CTL_3; | ||
57 | u32 save_TV_V_CTL_4; | ||
58 | u32 save_TV_V_CTL_5; | ||
59 | u32 save_TV_V_CTL_6; | ||
60 | u32 save_TV_V_CTL_7; | ||
61 | u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3; | ||
62 | |||
63 | u32 save_TV_CSC_Y; | ||
64 | u32 save_TV_CSC_Y2; | ||
65 | u32 save_TV_CSC_U; | ||
66 | u32 save_TV_CSC_U2; | ||
67 | u32 save_TV_CSC_V; | ||
68 | u32 save_TV_CSC_V2; | ||
69 | u32 save_TV_CLR_KNOBS; | ||
70 | u32 save_TV_CLR_LEVEL; | ||
71 | u32 save_TV_WIN_POS; | ||
72 | u32 save_TV_WIN_SIZE; | ||
73 | u32 save_TV_FILTER_CTL_1; | ||
74 | u32 save_TV_FILTER_CTL_2; | ||
75 | u32 save_TV_FILTER_CTL_3; | ||
76 | |||
77 | u32 save_TV_H_LUMA[60]; | ||
78 | u32 save_TV_H_CHROMA[60]; | ||
79 | u32 save_TV_V_LUMA[43]; | ||
80 | u32 save_TV_V_CHROMA[43]; | ||
81 | |||
82 | u32 save_TV_DAC; | ||
83 | u32 save_TV_CTL; | ||
84 | }; | ||
85 | |||
86 | struct video_levels { | ||
87 | int blank, black, burst; | ||
88 | }; | ||
89 | |||
90 | struct color_conversion { | ||
91 | u16 ry, gy, by, ay; | ||
92 | u16 ru, gu, bu, au; | ||
93 | u16 rv, gv, bv, av; | ||
94 | }; | ||
95 | |||
96 | static const u32 filter_table[] = { | ||
97 | 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, | ||
98 | 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, | ||
99 | 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, | ||
100 | 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, | ||
101 | 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, | ||
102 | 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, | ||
103 | 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, | ||
104 | 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, | ||
105 | 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, | ||
106 | 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, | ||
107 | 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, | ||
108 | 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, | ||
109 | 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, | ||
110 | 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, | ||
111 | 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, | ||
112 | 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, | ||
113 | 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, | ||
114 | 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, | ||
115 | 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, | ||
116 | 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, | ||
117 | 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, | ||
118 | 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, | ||
119 | 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, | ||
120 | 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, | ||
121 | 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, | ||
122 | 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, | ||
123 | 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, | ||
124 | 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, | ||
125 | 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, | ||
126 | 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, | ||
127 | 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, | ||
128 | 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, | ||
129 | 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, | ||
130 | 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, | ||
131 | 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, | ||
132 | 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, | ||
133 | 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, | ||
134 | 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, | ||
135 | 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, | ||
136 | 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, | ||
137 | 0x28003100, 0x28002F00, 0x00003100, 0x36403000, | ||
138 | 0x2D002CC0, 0x30003640, 0x2D0036C0, | ||
139 | 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, | ||
140 | 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, | ||
141 | 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, | ||
142 | 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, | ||
143 | 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, | ||
144 | 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, | ||
145 | 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, | ||
146 | 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, | ||
147 | 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, | ||
148 | 0x28003100, 0x28002F00, 0x00003100, | ||
149 | }; | ||
150 | |||
151 | /* | ||
152 | * Color conversion values have 3 separate fixed point formats: | ||
153 | * | ||
154 | * 10 bit fields (ay, au) | ||
155 | * 1.9 fixed point (b.bbbbbbbbb) | ||
156 | * 11 bit fields (ry, by, ru, gu, gv) | ||
157 | * exp.mantissa (ee.mmmmmmmmm) | ||
158 | * ee = 00 = 10^-1 (0.mmmmmmmmm) | ||
159 | * ee = 01 = 10^-2 (0.0mmmmmmmmm) | ||
160 | * ee = 10 = 10^-3 (0.00mmmmmmmmm) | ||
161 | * ee = 11 = 10^-4 (0.000mmmmmmmmm) | ||
162 | * 12 bit fields (gy, rv, bu) | ||
163 | * exp.mantissa (eee.mmmmmmmmm) | ||
164 | * eee = 000 = 10^-1 (0.mmmmmmmmm) | ||
165 | * eee = 001 = 10^-2 (0.0mmmmmmmmm) | ||
166 | * eee = 010 = 10^-3 (0.00mmmmmmmmm) | ||
167 | * eee = 011 = 10^-4 (0.000mmmmmmmmm) | ||
168 | * eee = 100 = reserved | ||
169 | * eee = 101 = reserved | ||
170 | * eee = 110 = reserved | ||
171 | * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation) | ||
172 | * | ||
173 | * Saturation and contrast are 8 bits, with their own representation: | ||
174 | * 8 bit field (saturation, contrast) | ||
175 | * exp.mantissa (ee.mmmmmm) | ||
176 | * ee = 00 = 10^-1 (0.mmmmmm) | ||
177 | * ee = 01 = 10^0 (m.mmmmm) | ||
178 | * ee = 10 = 10^1 (mm.mmmm) | ||
179 | * ee = 11 = 10^2 (mmm.mmm) | ||
180 | * | ||
181 | * Simple conversion function: | ||
182 | * | ||
183 | * static u32 | ||
184 | * float_to_csc_11(float f) | ||
185 | * { | ||
186 | * u32 exp; | ||
187 | * u32 mant; | ||
188 | * u32 ret; | ||
189 | * | ||
190 | * if (f < 0) | ||
191 | * f = -f; | ||
192 | * | ||
193 | * if (f >= 1) { | ||
194 | * exp = 0x7; | ||
195 | * mant = 1 << 8; | ||
196 | * } else { | ||
197 | * for (exp = 0; exp < 3 && f < 0.5; exp++) | ||
198 | * f *= 2.0; | ||
199 | * mant = (f * (1 << 9) + 0.5); | ||
200 | * if (mant >= (1 << 9)) | ||
201 | * mant = (1 << 9) - 1; | ||
202 | * } | ||
203 | * ret = (exp << 9) | mant; | ||
204 | * return ret; | ||
205 | * } | ||
206 | */ | ||
207 | |||
208 | /* | ||
209 | * Behold, magic numbers! If we plant them they might grow a big | ||
210 | * s-video cable to the sky... or something. | ||
211 | * | ||
212 | * Pre-converted to appropriate hex value. | ||
213 | */ | ||
214 | |||
215 | /* | ||
216 | * PAL & NTSC values for composite & s-video connections | ||
217 | */ | ||
218 | static const struct color_conversion ntsc_m_csc_composite = { | ||
219 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | ||
220 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | ||
221 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | ||
222 | }; | ||
223 | |||
224 | static const struct video_levels ntsc_m_levels_composite = { | ||
225 | .blank = 225, .black = 267, .burst = 113, | ||
226 | }; | ||
227 | |||
228 | static const struct color_conversion ntsc_m_csc_svideo = { | ||
229 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | ||
230 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | ||
231 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | ||
232 | }; | ||
233 | |||
234 | static const struct video_levels ntsc_m_levels_svideo = { | ||
235 | .blank = 266, .black = 316, .burst = 133, | ||
236 | }; | ||
237 | |||
238 | static const struct color_conversion ntsc_j_csc_composite = { | ||
239 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, | ||
240 | .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, | ||
241 | .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, | ||
242 | }; | ||
243 | |||
244 | static const struct video_levels ntsc_j_levels_composite = { | ||
245 | .blank = 225, .black = 225, .burst = 113, | ||
246 | }; | ||
247 | |||
248 | static const struct color_conversion ntsc_j_csc_svideo = { | ||
249 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, | ||
250 | .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, | ||
251 | .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, | ||
252 | }; | ||
253 | |||
254 | static const struct video_levels ntsc_j_levels_svideo = { | ||
255 | .blank = 266, .black = 266, .burst = 133, | ||
256 | }; | ||
257 | |||
258 | static const struct color_conversion pal_csc_composite = { | ||
259 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, | ||
260 | .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, | ||
261 | .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, | ||
262 | }; | ||
263 | |||
264 | static const struct video_levels pal_levels_composite = { | ||
265 | .blank = 237, .black = 237, .burst = 118, | ||
266 | }; | ||
267 | |||
268 | static const struct color_conversion pal_csc_svideo = { | ||
269 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, | ||
270 | .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, | ||
271 | .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, | ||
272 | }; | ||
273 | |||
274 | static const struct video_levels pal_levels_svideo = { | ||
275 | .blank = 280, .black = 280, .burst = 139, | ||
276 | }; | ||
277 | |||
278 | static const struct color_conversion pal_m_csc_composite = { | ||
279 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | ||
280 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | ||
281 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | ||
282 | }; | ||
283 | |||
284 | static const struct video_levels pal_m_levels_composite = { | ||
285 | .blank = 225, .black = 267, .burst = 113, | ||
286 | }; | ||
287 | |||
288 | static const struct color_conversion pal_m_csc_svideo = { | ||
289 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | ||
290 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | ||
291 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | ||
292 | }; | ||
293 | |||
294 | static const struct video_levels pal_m_levels_svideo = { | ||
295 | .blank = 266, .black = 316, .burst = 133, | ||
296 | }; | ||
297 | |||
298 | static const struct color_conversion pal_n_csc_composite = { | ||
299 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | ||
300 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | ||
301 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | ||
302 | }; | ||
303 | |||
304 | static const struct video_levels pal_n_levels_composite = { | ||
305 | .blank = 225, .black = 267, .burst = 118, | ||
306 | }; | ||
307 | |||
308 | static const struct color_conversion pal_n_csc_svideo = { | ||
309 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | ||
310 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | ||
311 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | ||
312 | }; | ||
313 | |||
314 | static const struct video_levels pal_n_levels_svideo = { | ||
315 | .blank = 266, .black = 316, .burst = 139, | ||
316 | }; | ||
317 | |||
318 | /* | ||
319 | * Component connections | ||
320 | */ | ||
321 | static const struct color_conversion sdtv_csc_yprpb = { | ||
322 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, | ||
323 | .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, | ||
324 | .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, | ||
325 | }; | ||
326 | |||
327 | static const struct color_conversion sdtv_csc_rgb = { | ||
328 | .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, | ||
329 | .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, | ||
330 | .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, | ||
331 | }; | ||
332 | |||
333 | static const struct color_conversion hdtv_csc_yprpb = { | ||
334 | .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, | ||
335 | .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, | ||
336 | .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, | ||
337 | }; | ||
338 | |||
339 | static const struct color_conversion hdtv_csc_rgb = { | ||
340 | .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, | ||
341 | .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, | ||
342 | .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, | ||
343 | }; | ||
344 | |||
345 | static const struct video_levels component_levels = { | ||
346 | .blank = 279, .black = 279, .burst = 0, | ||
347 | }; | ||
348 | |||
349 | |||
350 | struct tv_mode { | ||
351 | char *name; | ||
352 | int clock; | ||
353 | int refresh; /* in millihertz (for precision) */ | ||
354 | u32 oversample; | ||
355 | int hsync_end, hblank_start, hblank_end, htotal; | ||
356 | bool progressive, trilevel_sync, component_only; | ||
357 | int vsync_start_f1, vsync_start_f2, vsync_len; | ||
358 | bool veq_ena; | ||
359 | int veq_start_f1, veq_start_f2, veq_len; | ||
360 | int vi_end_f1, vi_end_f2, nbr_end; | ||
361 | bool burst_ena; | ||
362 | int hburst_start, hburst_len; | ||
363 | int vburst_start_f1, vburst_end_f1; | ||
364 | int vburst_start_f2, vburst_end_f2; | ||
365 | int vburst_start_f3, vburst_end_f3; | ||
366 | int vburst_start_f4, vburst_end_f4; | ||
367 | /* | ||
368 | * subcarrier programming | ||
369 | */ | ||
370 | int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc; | ||
371 | u32 sc_reset; | ||
372 | bool pal_burst; | ||
373 | /* | ||
374 | * blank/black levels | ||
375 | */ | ||
376 | const struct video_levels *composite_levels, *svideo_levels; | ||
377 | const struct color_conversion *composite_color, *svideo_color; | ||
378 | const u32 *filter_table; | ||
379 | int max_srcw; | ||
380 | }; | ||
381 | |||
382 | |||
383 | /* | ||
384 | * Sub carrier DDA | ||
385 | * | ||
386 | * I think this works as follows: | ||
387 | * | ||
388 | * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096 | ||
389 | * | ||
390 | * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value | ||
391 | * | ||
392 | * So, | ||
393 | * dda1_ideal = subcarrier/pixel * 4096 | ||
394 | * dda1_inc = floor (dda1_ideal) | ||
395 | * dda2 = dda1_ideal - dda1_inc | ||
396 | * | ||
397 | * then pick a ratio for dda2 that gives the closest approximation. If | ||
398 | * you can't get close enough, you can play with dda3 as well. This | ||
399 | * seems likely to happen when dda2 is small as the jumps would be larger | ||
400 | * | ||
401 | * To invert this, | ||
402 | * | ||
403 | * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size) | ||
404 | * | ||
405 | * The constants below were all computed using a 107.520MHz clock | ||
406 | */ | ||
407 | |||
408 | /** | ||
409 | * Register programming values for TV modes. | ||
410 | * | ||
411 | * These values account for -1s required. | ||
412 | */ | ||
413 | |||
414 | const static struct tv_mode tv_modes[] = { | ||
415 | { | ||
416 | .name = "NTSC-M", | ||
417 | .clock = 107520, | ||
418 | .refresh = 29970, | ||
419 | .oversample = TV_OVERSAMPLE_8X, | ||
420 | .component_only = 0, | ||
421 | /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ | ||
422 | |||
423 | .hsync_end = 64, .hblank_end = 124, | ||
424 | .hblank_start = 836, .htotal = 857, | ||
425 | |||
426 | .progressive = false, .trilevel_sync = false, | ||
427 | |||
428 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | ||
429 | .vsync_len = 6, | ||
430 | |||
431 | .veq_ena = true, .veq_start_f1 = 0, | ||
432 | .veq_start_f2 = 1, .veq_len = 18, | ||
433 | |||
434 | .vi_end_f1 = 20, .vi_end_f2 = 21, | ||
435 | .nbr_end = 240, | ||
436 | |||
437 | .burst_ena = true, | ||
438 | .hburst_start = 72, .hburst_len = 34, | ||
439 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | ||
440 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | ||
441 | .vburst_start_f3 = 9, .vburst_end_f3 = 240, | ||
442 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | ||
443 | |||
444 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | ||
445 | .dda1_inc = 136, | ||
446 | .dda2_inc = 7624, .dda2_size = 20013, | ||
447 | .dda3_inc = 0, .dda3_size = 0, | ||
448 | .sc_reset = TV_SC_RESET_EVERY_4, | ||
449 | .pal_burst = false, | ||
450 | |||
451 | .composite_levels = &ntsc_m_levels_composite, | ||
452 | .composite_color = &ntsc_m_csc_composite, | ||
453 | .svideo_levels = &ntsc_m_levels_svideo, | ||
454 | .svideo_color = &ntsc_m_csc_svideo, | ||
455 | |||
456 | .filter_table = filter_table, | ||
457 | }, | ||
458 | { | ||
459 | .name = "NTSC-443", | ||
460 | .clock = 107520, | ||
461 | .refresh = 29970, | ||
462 | .oversample = TV_OVERSAMPLE_8X, | ||
463 | .component_only = 0, | ||
464 | /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ | ||
465 | .hsync_end = 64, .hblank_end = 124, | ||
466 | .hblank_start = 836, .htotal = 857, | ||
467 | |||
468 | .progressive = false, .trilevel_sync = false, | ||
469 | |||
470 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | ||
471 | .vsync_len = 6, | ||
472 | |||
473 | .veq_ena = true, .veq_start_f1 = 0, | ||
474 | .veq_start_f2 = 1, .veq_len = 18, | ||
475 | |||
476 | .vi_end_f1 = 20, .vi_end_f2 = 21, | ||
477 | .nbr_end = 240, | ||
478 | |||
479 | .burst_ena = 8, | ||
480 | .hburst_start = 72, .hburst_len = 34, | ||
481 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | ||
482 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | ||
483 | .vburst_start_f3 = 9, .vburst_end_f3 = 240, | ||
484 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | ||
485 | |||
486 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | ||
487 | .dda1_inc = 168, | ||
488 | .dda2_inc = 18557, .dda2_size = 20625, | ||
489 | .dda3_inc = 0, .dda3_size = 0, | ||
490 | .sc_reset = TV_SC_RESET_EVERY_8, | ||
491 | .pal_burst = true, | ||
492 | |||
493 | .composite_levels = &ntsc_m_levels_composite, | ||
494 | .composite_color = &ntsc_m_csc_composite, | ||
495 | .svideo_levels = &ntsc_m_levels_svideo, | ||
496 | .svideo_color = &ntsc_m_csc_svideo, | ||
497 | |||
498 | .filter_table = filter_table, | ||
499 | }, | ||
500 | { | ||
501 | .name = "NTSC-J", | ||
502 | .clock = 107520, | ||
503 | .refresh = 29970, | ||
504 | .oversample = TV_OVERSAMPLE_8X, | ||
505 | .component_only = 0, | ||
506 | |||
507 | /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ | ||
508 | .hsync_end = 64, .hblank_end = 124, | ||
509 | .hblank_start = 836, .htotal = 857, | ||
510 | |||
511 | .progressive = false, .trilevel_sync = false, | ||
512 | |||
513 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | ||
514 | .vsync_len = 6, | ||
515 | |||
516 | .veq_ena = true, .veq_start_f1 = 0, | ||
517 | .veq_start_f2 = 1, .veq_len = 18, | ||
518 | |||
519 | .vi_end_f1 = 20, .vi_end_f2 = 21, | ||
520 | .nbr_end = 240, | ||
521 | |||
522 | .burst_ena = true, | ||
523 | .hburst_start = 72, .hburst_len = 34, | ||
524 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | ||
525 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | ||
526 | .vburst_start_f3 = 9, .vburst_end_f3 = 240, | ||
527 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | ||
528 | |||
529 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | ||
530 | .dda1_inc = 136, | ||
531 | .dda2_inc = 7624, .dda2_size = 20013, | ||
532 | .dda3_inc = 0, .dda3_size = 0, | ||
533 | .sc_reset = TV_SC_RESET_EVERY_4, | ||
534 | .pal_burst = false, | ||
535 | |||
536 | .composite_levels = &ntsc_j_levels_composite, | ||
537 | .composite_color = &ntsc_j_csc_composite, | ||
538 | .svideo_levels = &ntsc_j_levels_svideo, | ||
539 | .svideo_color = &ntsc_j_csc_svideo, | ||
540 | |||
541 | .filter_table = filter_table, | ||
542 | }, | ||
543 | { | ||
544 | .name = "PAL-M", | ||
545 | .clock = 107520, | ||
546 | .refresh = 29970, | ||
547 | .oversample = TV_OVERSAMPLE_8X, | ||
548 | .component_only = 0, | ||
549 | |||
550 | /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ | ||
551 | .hsync_end = 64, .hblank_end = 124, | ||
552 | .hblank_start = 836, .htotal = 857, | ||
553 | |||
554 | .progressive = false, .trilevel_sync = false, | ||
555 | |||
556 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | ||
557 | .vsync_len = 6, | ||
558 | |||
559 | .veq_ena = true, .veq_start_f1 = 0, | ||
560 | .veq_start_f2 = 1, .veq_len = 18, | ||
561 | |||
562 | .vi_end_f1 = 20, .vi_end_f2 = 21, | ||
563 | .nbr_end = 240, | ||
564 | |||
565 | .burst_ena = true, | ||
566 | .hburst_start = 72, .hburst_len = 34, | ||
567 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | ||
568 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | ||
569 | .vburst_start_f3 = 9, .vburst_end_f3 = 240, | ||
570 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | ||
571 | |||
572 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | ||
573 | .dda1_inc = 136, | ||
574 | .dda2_inc = 7624, .dda2_size = 20013, | ||
575 | .dda3_inc = 0, .dda3_size = 0, | ||
576 | .sc_reset = TV_SC_RESET_EVERY_4, | ||
577 | .pal_burst = false, | ||
578 | |||
579 | .composite_levels = &pal_m_levels_composite, | ||
580 | .composite_color = &pal_m_csc_composite, | ||
581 | .svideo_levels = &pal_m_levels_svideo, | ||
582 | .svideo_color = &pal_m_csc_svideo, | ||
583 | |||
584 | .filter_table = filter_table, | ||
585 | }, | ||
586 | { | ||
587 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | ||
588 | .name = "PAL-N", | ||
589 | .clock = 107520, | ||
590 | .refresh = 25000, | ||
591 | .oversample = TV_OVERSAMPLE_8X, | ||
592 | .component_only = 0, | ||
593 | |||
594 | .hsync_end = 64, .hblank_end = 128, | ||
595 | .hblank_start = 844, .htotal = 863, | ||
596 | |||
597 | .progressive = false, .trilevel_sync = false, | ||
598 | |||
599 | |||
600 | .vsync_start_f1 = 6, .vsync_start_f2 = 7, | ||
601 | .vsync_len = 6, | ||
602 | |||
603 | .veq_ena = true, .veq_start_f1 = 0, | ||
604 | .veq_start_f2 = 1, .veq_len = 18, | ||
605 | |||
606 | .vi_end_f1 = 24, .vi_end_f2 = 25, | ||
607 | .nbr_end = 286, | ||
608 | |||
609 | .burst_ena = true, | ||
610 | .hburst_start = 73, .hburst_len = 34, | ||
611 | .vburst_start_f1 = 8, .vburst_end_f1 = 285, | ||
612 | .vburst_start_f2 = 8, .vburst_end_f2 = 286, | ||
613 | .vburst_start_f3 = 9, .vburst_end_f3 = 286, | ||
614 | .vburst_start_f4 = 9, .vburst_end_f4 = 285, | ||
615 | |||
616 | |||
617 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | ||
618 | .dda1_inc = 168, | ||
619 | .dda2_inc = 18557, .dda2_size = 20625, | ||
620 | .dda3_inc = 0, .dda3_size = 0, | ||
621 | .sc_reset = TV_SC_RESET_EVERY_8, | ||
622 | .pal_burst = true, | ||
623 | |||
624 | .composite_levels = &pal_n_levels_composite, | ||
625 | .composite_color = &pal_n_csc_composite, | ||
626 | .svideo_levels = &pal_n_levels_svideo, | ||
627 | .svideo_color = &pal_n_csc_svideo, | ||
628 | |||
629 | .filter_table = filter_table, | ||
630 | }, | ||
631 | { | ||
632 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | ||
633 | .name = "PAL", | ||
634 | .clock = 107520, | ||
635 | .refresh = 25000, | ||
636 | .oversample = TV_OVERSAMPLE_8X, | ||
637 | .component_only = 0, | ||
638 | |||
639 | .hsync_end = 64, .hblank_end = 128, | ||
640 | .hblank_start = 844, .htotal = 863, | ||
641 | |||
642 | .progressive = false, .trilevel_sync = false, | ||
643 | |||
644 | .vsync_start_f1 = 5, .vsync_start_f2 = 6, | ||
645 | .vsync_len = 5, | ||
646 | |||
647 | .veq_ena = true, .veq_start_f1 = 0, | ||
648 | .veq_start_f2 = 1, .veq_len = 15, | ||
649 | |||
650 | .vi_end_f1 = 24, .vi_end_f2 = 25, | ||
651 | .nbr_end = 286, | ||
652 | |||
653 | .burst_ena = true, | ||
654 | .hburst_start = 73, .hburst_len = 32, | ||
655 | .vburst_start_f1 = 8, .vburst_end_f1 = 285, | ||
656 | .vburst_start_f2 = 8, .vburst_end_f2 = 286, | ||
657 | .vburst_start_f3 = 9, .vburst_end_f3 = 286, | ||
658 | .vburst_start_f4 = 9, .vburst_end_f4 = 285, | ||
659 | |||
660 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | ||
661 | .dda1_inc = 168, | ||
662 | .dda2_inc = 18557, .dda2_size = 20625, | ||
663 | .dda3_inc = 0, .dda3_size = 0, | ||
664 | .sc_reset = TV_SC_RESET_EVERY_8, | ||
665 | .pal_burst = true, | ||
666 | |||
667 | .composite_levels = &pal_levels_composite, | ||
668 | .composite_color = &pal_csc_composite, | ||
669 | .svideo_levels = &pal_levels_svideo, | ||
670 | .svideo_color = &pal_csc_svideo, | ||
671 | |||
672 | .filter_table = filter_table, | ||
673 | }, | ||
674 | { | ||
675 | .name = "480p@59.94Hz", | ||
676 | .clock = 107520, | ||
677 | .refresh = 59940, | ||
678 | .oversample = TV_OVERSAMPLE_4X, | ||
679 | .component_only = 1, | ||
680 | |||
681 | .hsync_end = 64, .hblank_end = 122, | ||
682 | .hblank_start = 842, .htotal = 857, | ||
683 | |||
684 | .progressive = true,.trilevel_sync = false, | ||
685 | |||
686 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, | ||
687 | .vsync_len = 12, | ||
688 | |||
689 | .veq_ena = false, | ||
690 | |||
691 | .vi_end_f1 = 44, .vi_end_f2 = 44, | ||
692 | .nbr_end = 496, | ||
693 | |||
694 | .burst_ena = false, | ||
695 | |||
696 | .filter_table = filter_table, | ||
697 | }, | ||
698 | { | ||
699 | .name = "480p@60Hz", | ||
700 | .clock = 107520, | ||
701 | .refresh = 60000, | ||
702 | .oversample = TV_OVERSAMPLE_4X, | ||
703 | .component_only = 1, | ||
704 | |||
705 | .hsync_end = 64, .hblank_end = 122, | ||
706 | .hblank_start = 842, .htotal = 856, | ||
707 | |||
708 | .progressive = true,.trilevel_sync = false, | ||
709 | |||
710 | .vsync_start_f1 = 12, .vsync_start_f2 = 12, | ||
711 | .vsync_len = 12, | ||
712 | |||
713 | .veq_ena = false, | ||
714 | |||
715 | .vi_end_f1 = 44, .vi_end_f2 = 44, | ||
716 | .nbr_end = 496, | ||
717 | |||
718 | .burst_ena = false, | ||
719 | |||
720 | .filter_table = filter_table, | ||
721 | }, | ||
722 | { | ||
723 | .name = "576p", | ||
724 | .clock = 107520, | ||
725 | .refresh = 50000, | ||
726 | .oversample = TV_OVERSAMPLE_4X, | ||
727 | .component_only = 1, | ||
728 | |||
729 | .hsync_end = 64, .hblank_end = 139, | ||
730 | .hblank_start = 859, .htotal = 863, | ||
731 | |||
732 | .progressive = true, .trilevel_sync = false, | ||
733 | |||
734 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | ||
735 | .vsync_len = 10, | ||
736 | |||
737 | .veq_ena = false, | ||
738 | |||
739 | .vi_end_f1 = 48, .vi_end_f2 = 48, | ||
740 | .nbr_end = 575, | ||
741 | |||
742 | .burst_ena = false, | ||
743 | |||
744 | .filter_table = filter_table, | ||
745 | }, | ||
746 | { | ||
747 | .name = "720p@60Hz", | ||
748 | .clock = 148800, | ||
749 | .refresh = 60000, | ||
750 | .oversample = TV_OVERSAMPLE_2X, | ||
751 | .component_only = 1, | ||
752 | |||
753 | .hsync_end = 80, .hblank_end = 300, | ||
754 | .hblank_start = 1580, .htotal = 1649, | ||
755 | |||
756 | .progressive = true, .trilevel_sync = true, | ||
757 | |||
758 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | ||
759 | .vsync_len = 10, | ||
760 | |||
761 | .veq_ena = false, | ||
762 | |||
763 | .vi_end_f1 = 29, .vi_end_f2 = 29, | ||
764 | .nbr_end = 719, | ||
765 | |||
766 | .burst_ena = false, | ||
767 | |||
768 | .filter_table = filter_table, | ||
769 | }, | ||
770 | { | ||
771 | .name = "720p@59.94Hz", | ||
772 | .clock = 148800, | ||
773 | .refresh = 59940, | ||
774 | .oversample = TV_OVERSAMPLE_2X, | ||
775 | .component_only = 1, | ||
776 | |||
777 | .hsync_end = 80, .hblank_end = 300, | ||
778 | .hblank_start = 1580, .htotal = 1651, | ||
779 | |||
780 | .progressive = true, .trilevel_sync = true, | ||
781 | |||
782 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | ||
783 | .vsync_len = 10, | ||
784 | |||
785 | .veq_ena = false, | ||
786 | |||
787 | .vi_end_f1 = 29, .vi_end_f2 = 29, | ||
788 | .nbr_end = 719, | ||
789 | |||
790 | .burst_ena = false, | ||
791 | |||
792 | .filter_table = filter_table, | ||
793 | }, | ||
794 | { | ||
795 | .name = "720p@50Hz", | ||
796 | .clock = 148800, | ||
797 | .refresh = 50000, | ||
798 | .oversample = TV_OVERSAMPLE_2X, | ||
799 | .component_only = 1, | ||
800 | |||
801 | .hsync_end = 80, .hblank_end = 300, | ||
802 | .hblank_start = 1580, .htotal = 1979, | ||
803 | |||
804 | .progressive = true, .trilevel_sync = true, | ||
805 | |||
806 | .vsync_start_f1 = 10, .vsync_start_f2 = 10, | ||
807 | .vsync_len = 10, | ||
808 | |||
809 | .veq_ena = false, | ||
810 | |||
811 | .vi_end_f1 = 29, .vi_end_f2 = 29, | ||
812 | .nbr_end = 719, | ||
813 | |||
814 | .burst_ena = false, | ||
815 | |||
816 | .filter_table = filter_table, | ||
817 | .max_srcw = 800 | ||
818 | }, | ||
819 | { | ||
820 | .name = "1080i@50Hz", | ||
821 | .clock = 148800, | ||
822 | .refresh = 25000, | ||
823 | .oversample = TV_OVERSAMPLE_2X, | ||
824 | .component_only = 1, | ||
825 | |||
826 | .hsync_end = 88, .hblank_end = 235, | ||
827 | .hblank_start = 2155, .htotal = 2639, | ||
828 | |||
829 | .progressive = false, .trilevel_sync = true, | ||
830 | |||
831 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | ||
832 | .vsync_len = 10, | ||
833 | |||
834 | .veq_ena = true, .veq_start_f1 = 4, | ||
835 | .veq_start_f2 = 4, .veq_len = 10, | ||
836 | |||
837 | |||
838 | .vi_end_f1 = 21, .vi_end_f2 = 22, | ||
839 | .nbr_end = 539, | ||
840 | |||
841 | .burst_ena = false, | ||
842 | |||
843 | .filter_table = filter_table, | ||
844 | }, | ||
845 | { | ||
846 | .name = "1080i@60Hz", | ||
847 | .clock = 148800, | ||
848 | .refresh = 30000, | ||
849 | .oversample = TV_OVERSAMPLE_2X, | ||
850 | .component_only = 1, | ||
851 | |||
852 | .hsync_end = 88, .hblank_end = 235, | ||
853 | .hblank_start = 2155, .htotal = 2199, | ||
854 | |||
855 | .progressive = false, .trilevel_sync = true, | ||
856 | |||
857 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | ||
858 | .vsync_len = 10, | ||
859 | |||
860 | .veq_ena = true, .veq_start_f1 = 4, | ||
861 | .veq_start_f2 = 4, .veq_len = 10, | ||
862 | |||
863 | |||
864 | .vi_end_f1 = 21, .vi_end_f2 = 22, | ||
865 | .nbr_end = 539, | ||
866 | |||
867 | .burst_ena = false, | ||
868 | |||
869 | .filter_table = filter_table, | ||
870 | }, | ||
871 | { | ||
872 | .name = "1080i@59.94Hz", | ||
873 | .clock = 148800, | ||
874 | .refresh = 29970, | ||
875 | .oversample = TV_OVERSAMPLE_2X, | ||
876 | .component_only = 1, | ||
877 | |||
878 | .hsync_end = 88, .hblank_end = 235, | ||
879 | .hblank_start = 2155, .htotal = 2200, | ||
880 | |||
881 | .progressive = false, .trilevel_sync = true, | ||
882 | |||
883 | .vsync_start_f1 = 4, .vsync_start_f2 = 5, | ||
884 | .vsync_len = 10, | ||
885 | |||
886 | .veq_ena = true, .veq_start_f1 = 4, | ||
887 | .veq_start_f2 = 4, .veq_len = 10, | ||
888 | |||
889 | |||
890 | .vi_end_f1 = 21, .vi_end_f2 = 22, | ||
891 | .nbr_end = 539, | ||
892 | |||
893 | .burst_ena = false, | ||
894 | |||
895 | .filter_table = filter_table, | ||
896 | }, | ||
897 | }; | ||
898 | |||
899 | #define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) | ||
900 | |||
901 | static void | ||
902 | intel_tv_dpms(struct drm_encoder *encoder, int mode) | ||
903 | { | ||
904 | struct drm_device *dev = encoder->dev; | ||
905 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
906 | |||
907 | switch(mode) { | ||
908 | case DRM_MODE_DPMS_ON: | ||
909 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); | ||
910 | break; | ||
911 | case DRM_MODE_DPMS_STANDBY: | ||
912 | case DRM_MODE_DPMS_SUSPEND: | ||
913 | case DRM_MODE_DPMS_OFF: | ||
914 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); | ||
915 | break; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | static void | ||
920 | intel_tv_save(struct drm_connector *connector) | ||
921 | { | ||
922 | struct drm_device *dev = connector->dev; | ||
923 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
924 | struct intel_output *intel_output = to_intel_output(connector); | ||
925 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
926 | int i; | ||
927 | |||
928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); | ||
929 | tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); | ||
930 | tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); | ||
931 | tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); | ||
932 | tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); | ||
933 | tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); | ||
934 | tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); | ||
935 | tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); | ||
936 | tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); | ||
937 | tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); | ||
938 | tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); | ||
939 | tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); | ||
940 | tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); | ||
941 | |||
942 | tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); | ||
943 | tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); | ||
944 | tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); | ||
945 | tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); | ||
946 | tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); | ||
947 | tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); | ||
948 | tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); | ||
949 | tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); | ||
950 | tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); | ||
951 | tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); | ||
952 | tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); | ||
953 | tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); | ||
954 | tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); | ||
955 | |||
956 | for (i = 0; i < 60; i++) | ||
957 | tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); | ||
958 | for (i = 0; i < 60; i++) | ||
959 | tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); | ||
960 | for (i = 0; i < 43; i++) | ||
961 | tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); | ||
962 | for (i = 0; i < 43; i++) | ||
963 | tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); | ||
964 | |||
965 | tv_priv->save_TV_DAC = I915_READ(TV_DAC); | ||
966 | tv_priv->save_TV_CTL = I915_READ(TV_CTL); | ||
967 | } | ||
968 | |||
969 | static void | ||
970 | intel_tv_restore(struct drm_connector *connector) | ||
971 | { | ||
972 | struct drm_device *dev = connector->dev; | ||
973 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
974 | struct intel_output *intel_output = to_intel_output(connector); | ||
975 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
976 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
977 | struct intel_crtc *intel_crtc; | ||
978 | int i; | ||
979 | |||
980 | /* FIXME: No CRTC? */ | ||
981 | if (!crtc) | ||
982 | return; | ||
983 | |||
984 | intel_crtc = to_intel_crtc(crtc); | ||
985 | I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); | ||
986 | I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); | ||
987 | I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); | ||
988 | I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); | ||
989 | I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); | ||
990 | I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); | ||
991 | I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); | ||
992 | I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); | ||
993 | I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); | ||
994 | I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); | ||
995 | I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); | ||
996 | I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); | ||
997 | I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); | ||
998 | |||
999 | I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); | ||
1000 | I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); | ||
1001 | I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); | ||
1002 | I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); | ||
1003 | I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); | ||
1004 | I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); | ||
1005 | I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); | ||
1006 | I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); | ||
1007 | |||
1008 | { | ||
1009 | int pipeconf_reg = (intel_crtc->pipe == 0) ? | ||
1010 | PIPEACONF : PIPEBCONF; | ||
1011 | int dspcntr_reg = (intel_crtc->plane == 0) ? | ||
1012 | DSPACNTR : DSPBCNTR; | ||
1013 | int pipeconf = I915_READ(pipeconf_reg); | ||
1014 | int dspcntr = I915_READ(dspcntr_reg); | ||
1015 | int dspbase_reg = (intel_crtc->plane == 0) ? | ||
1016 | DSPAADDR : DSPBADDR; | ||
1017 | /* Pipe must be off here */ | ||
1018 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); | ||
1019 | /* Flush the plane changes */ | ||
1020 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1021 | |||
1022 | if (!IS_I9XX(dev)) { | ||
1023 | /* Wait for vblank for the disable to take effect */ | ||
1024 | intel_wait_for_vblank(dev); | ||
1025 | } | ||
1026 | |||
1027 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); | ||
1028 | /* Wait for vblank for the disable to take effect. */ | ||
1029 | intel_wait_for_vblank(dev); | ||
1030 | |||
1031 | /* Filter ctl must be set before TV_WIN_SIZE */ | ||
1032 | I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); | ||
1033 | I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); | ||
1034 | I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); | ||
1035 | I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); | ||
1036 | I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); | ||
1037 | I915_WRITE(pipeconf_reg, pipeconf); | ||
1038 | I915_WRITE(dspcntr_reg, dspcntr); | ||
1039 | /* Flush the plane changes */ | ||
1040 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1041 | } | ||
1042 | |||
1043 | for (i = 0; i < 60; i++) | ||
1044 | I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); | ||
1045 | for (i = 0; i < 60; i++) | ||
1046 | I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); | ||
1047 | for (i = 0; i < 43; i++) | ||
1048 | I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); | ||
1049 | for (i = 0; i < 43; i++) | ||
1050 | I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); | ||
1051 | |||
1052 | I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); | ||
1053 | I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); | ||
1054 | } | ||
1055 | |||
1056 | static const struct tv_mode * | ||
1057 | intel_tv_mode_lookup (char *tv_format) | ||
1058 | { | ||
1059 | int i; | ||
1060 | |||
1061 | for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) { | ||
1062 | const struct tv_mode *tv_mode = &tv_modes[i]; | ||
1063 | |||
1064 | if (!strcmp(tv_format, tv_mode->name)) | ||
1065 | return tv_mode; | ||
1066 | } | ||
1067 | return NULL; | ||
1068 | } | ||
1069 | |||
1070 | static const struct tv_mode * | ||
1071 | intel_tv_mode_find (struct intel_output *intel_output) | ||
1072 | { | ||
1073 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
1074 | |||
1075 | return intel_tv_mode_lookup(tv_priv->tv_format); | ||
1076 | } | ||
1077 | |||
1078 | static enum drm_mode_status | ||
1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | ||
1080 | { | ||
1081 | struct intel_output *intel_output = to_intel_output(connector); | ||
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | ||
1083 | |||
1084 | /* Ensure TV refresh is close to desired refresh */ | ||
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) | ||
1086 | return MODE_OK; | ||
1087 | return MODE_CLOCK_RANGE; | ||
1088 | } | ||
1089 | |||
1090 | |||
1091 | static bool | ||
1092 | intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
1093 | struct drm_display_mode *adjusted_mode) | ||
1094 | { | ||
1095 | struct drm_device *dev = encoder->dev; | ||
1096 | struct drm_mode_config *drm_config = &dev->mode_config; | ||
1097 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
1098 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); | ||
1099 | struct drm_encoder *other_encoder; | ||
1100 | |||
1101 | if (!tv_mode) | ||
1102 | return false; | ||
1103 | |||
1104 | /* FIXME: lock encoder list */ | ||
1105 | list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { | ||
1106 | if (other_encoder != encoder && | ||
1107 | other_encoder->crtc == encoder->crtc) | ||
1108 | return false; | ||
1109 | } | ||
1110 | |||
1111 | adjusted_mode->clock = tv_mode->clock; | ||
1112 | return true; | ||
1113 | } | ||
1114 | |||
1115 | static void | ||
1116 | intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
1117 | struct drm_display_mode *adjusted_mode) | ||
1118 | { | ||
1119 | struct drm_device *dev = encoder->dev; | ||
1120 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1121 | struct drm_crtc *crtc = encoder->crtc; | ||
1122 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1123 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
1124 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
1125 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | ||
1126 | u32 tv_ctl; | ||
1127 | u32 hctl1, hctl2, hctl3; | ||
1128 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; | ||
1129 | u32 scctl1, scctl2, scctl3; | ||
1130 | int i, j; | ||
1131 | const struct video_levels *video_levels; | ||
1132 | const struct color_conversion *color_conversion; | ||
1133 | bool burst_ena; | ||
1134 | |||
1135 | if (!tv_mode) | ||
1136 | return; /* can't happen (mode_prepare prevents this) */ | ||
1137 | |||
1138 | tv_ctl = 0; | ||
1139 | |||
1140 | switch (tv_priv->type) { | ||
1141 | default: | ||
1142 | case DRM_MODE_CONNECTOR_Unknown: | ||
1143 | case DRM_MODE_CONNECTOR_Composite: | ||
1144 | tv_ctl |= TV_ENC_OUTPUT_COMPOSITE; | ||
1145 | video_levels = tv_mode->composite_levels; | ||
1146 | color_conversion = tv_mode->composite_color; | ||
1147 | burst_ena = tv_mode->burst_ena; | ||
1148 | break; | ||
1149 | case DRM_MODE_CONNECTOR_Component: | ||
1150 | tv_ctl |= TV_ENC_OUTPUT_COMPONENT; | ||
1151 | video_levels = &component_levels; | ||
1152 | if (tv_mode->burst_ena) | ||
1153 | color_conversion = &sdtv_csc_yprpb; | ||
1154 | else | ||
1155 | color_conversion = &hdtv_csc_yprpb; | ||
1156 | burst_ena = false; | ||
1157 | break; | ||
1158 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
1159 | tv_ctl |= TV_ENC_OUTPUT_SVIDEO; | ||
1160 | video_levels = tv_mode->svideo_levels; | ||
1161 | color_conversion = tv_mode->svideo_color; | ||
1162 | burst_ena = tv_mode->burst_ena; | ||
1163 | break; | ||
1164 | } | ||
1165 | hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) | | ||
1166 | (tv_mode->htotal << TV_HTOTAL_SHIFT); | ||
1167 | |||
1168 | hctl2 = (tv_mode->hburst_start << 16) | | ||
1169 | (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT); | ||
1170 | |||
1171 | if (burst_ena) | ||
1172 | hctl2 |= TV_BURST_ENA; | ||
1173 | |||
1174 | hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) | | ||
1175 | (tv_mode->hblank_end << TV_HBLANK_END_SHIFT); | ||
1176 | |||
1177 | vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) | | ||
1178 | (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) | | ||
1179 | (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT); | ||
1180 | |||
1181 | vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) | | ||
1182 | (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) | | ||
1183 | (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT); | ||
1184 | |||
1185 | vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) | | ||
1186 | (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) | | ||
1187 | (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT); | ||
1188 | |||
1189 | if (tv_mode->veq_ena) | ||
1190 | vctl3 |= TV_EQUAL_ENA; | ||
1191 | |||
1192 | vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) | | ||
1193 | (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT); | ||
1194 | |||
1195 | vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) | | ||
1196 | (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT); | ||
1197 | |||
1198 | vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) | | ||
1199 | (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT); | ||
1200 | |||
1201 | vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | | ||
1202 | (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); | ||
1203 | |||
1204 | if (intel_crtc->pipe == 1) | ||
1205 | tv_ctl |= TV_ENC_PIPEB_SELECT; | ||
1206 | tv_ctl |= tv_mode->oversample; | ||
1207 | |||
1208 | if (tv_mode->progressive) | ||
1209 | tv_ctl |= TV_PROGRESSIVE; | ||
1210 | if (tv_mode->trilevel_sync) | ||
1211 | tv_ctl |= TV_TRILEVEL_SYNC; | ||
1212 | if (tv_mode->pal_burst) | ||
1213 | tv_ctl |= TV_PAL_BURST; | ||
1214 | scctl1 = 0; | ||
1215 | /* dda1 implies valid video levels */ | ||
1216 | if (tv_mode->dda1_inc) { | ||
1217 | scctl1 |= TV_SC_DDA1_EN; | ||
1218 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1219 | } | ||
1220 | |||
1221 | if (tv_mode->dda2_inc) | ||
1222 | scctl1 |= TV_SC_DDA2_EN; | ||
1223 | |||
1224 | if (tv_mode->dda3_inc) | ||
1225 | scctl1 |= TV_SC_DDA3_EN; | ||
1226 | |||
1227 | scctl1 |= tv_mode->sc_reset; | ||
1228 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; | ||
1229 | |||
1230 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | | ||
1231 | tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT; | ||
1232 | |||
1233 | scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT | | ||
1234 | tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; | ||
1235 | |||
1236 | /* Enable two fixes for the chips that need them. */ | ||
1237 | if (dev->pci_device < 0x2772) | ||
1238 | tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; | ||
1239 | |||
1240 | I915_WRITE(TV_H_CTL_1, hctl1); | ||
1241 | I915_WRITE(TV_H_CTL_2, hctl2); | ||
1242 | I915_WRITE(TV_H_CTL_3, hctl3); | ||
1243 | I915_WRITE(TV_V_CTL_1, vctl1); | ||
1244 | I915_WRITE(TV_V_CTL_2, vctl2); | ||
1245 | I915_WRITE(TV_V_CTL_3, vctl3); | ||
1246 | I915_WRITE(TV_V_CTL_4, vctl4); | ||
1247 | I915_WRITE(TV_V_CTL_5, vctl5); | ||
1248 | I915_WRITE(TV_V_CTL_6, vctl6); | ||
1249 | I915_WRITE(TV_V_CTL_7, vctl7); | ||
1250 | I915_WRITE(TV_SC_CTL_1, scctl1); | ||
1251 | I915_WRITE(TV_SC_CTL_2, scctl2); | ||
1252 | I915_WRITE(TV_SC_CTL_3, scctl3); | ||
1253 | |||
1254 | if (color_conversion) { | ||
1255 | I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | | ||
1256 | color_conversion->gy); | ||
1257 | I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) | | ||
1258 | color_conversion->ay); | ||
1259 | I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | | ||
1260 | color_conversion->gu); | ||
1261 | I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) | | ||
1262 | color_conversion->au); | ||
1263 | I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) | | ||
1264 | color_conversion->gv); | ||
1265 | I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) | | ||
1266 | color_conversion->av); | ||
1267 | } | ||
1268 | |||
1269 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | ||
1270 | if (video_levels) | ||
1271 | I915_WRITE(TV_CLR_LEVEL, | ||
1272 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | | ||
1273 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); | ||
1274 | { | ||
1275 | int pipeconf_reg = (intel_crtc->pipe == 0) ? | ||
1276 | PIPEACONF : PIPEBCONF; | ||
1277 | int dspcntr_reg = (intel_crtc->plane == 0) ? | ||
1278 | DSPACNTR : DSPBCNTR; | ||
1279 | int pipeconf = I915_READ(pipeconf_reg); | ||
1280 | int dspcntr = I915_READ(dspcntr_reg); | ||
1281 | int dspbase_reg = (intel_crtc->plane == 0) ? | ||
1282 | DSPAADDR : DSPBADDR; | ||
1283 | int xpos = 0x0, ypos = 0x0; | ||
1284 | unsigned int xsize, ysize; | ||
1285 | /* Pipe must be off here */ | ||
1286 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); | ||
1287 | /* Flush the plane changes */ | ||
1288 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1289 | |||
1290 | /* Wait for vblank for the disable to take effect */ | ||
1291 | if (!IS_I9XX(dev)) | ||
1292 | intel_wait_for_vblank(dev); | ||
1293 | |||
1294 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); | ||
1295 | /* Wait for vblank for the disable to take effect. */ | ||
1296 | intel_wait_for_vblank(dev); | ||
1297 | |||
1298 | /* Filter ctl must be set before TV_WIN_SIZE */ | ||
1299 | I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); | ||
1300 | xsize = tv_mode->hblank_start - tv_mode->hblank_end; | ||
1301 | if (tv_mode->progressive) | ||
1302 | ysize = tv_mode->nbr_end + 1; | ||
1303 | else | ||
1304 | ysize = 2*tv_mode->nbr_end + 1; | ||
1305 | |||
1306 | xpos += tv_priv->margin[TV_MARGIN_LEFT]; | ||
1307 | ypos += tv_priv->margin[TV_MARGIN_TOP]; | ||
1308 | xsize -= (tv_priv->margin[TV_MARGIN_LEFT] + | ||
1309 | tv_priv->margin[TV_MARGIN_RIGHT]); | ||
1310 | ysize -= (tv_priv->margin[TV_MARGIN_TOP] + | ||
1311 | tv_priv->margin[TV_MARGIN_BOTTOM]); | ||
1312 | I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); | ||
1313 | I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); | ||
1314 | |||
1315 | I915_WRITE(pipeconf_reg, pipeconf); | ||
1316 | I915_WRITE(dspcntr_reg, dspcntr); | ||
1317 | /* Flush the plane changes */ | ||
1318 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1319 | } | ||
1320 | |||
1321 | j = 0; | ||
1322 | for (i = 0; i < 60; i++) | ||
1323 | I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); | ||
1324 | for (i = 0; i < 60; i++) | ||
1325 | I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); | ||
1326 | for (i = 0; i < 43; i++) | ||
1327 | I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); | ||
1328 | for (i = 0; i < 43; i++) | ||
1329 | I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); | ||
1330 | I915_WRITE(TV_DAC, 0); | ||
1331 | I915_WRITE(TV_CTL, tv_ctl); | ||
1332 | } | ||
1333 | |||
1334 | static const struct drm_display_mode reported_modes[] = { | ||
1335 | { | ||
1336 | .name = "NTSC 480i", | ||
1337 | .clock = 107520, | ||
1338 | .hdisplay = 1280, | ||
1339 | .hsync_start = 1368, | ||
1340 | .hsync_end = 1496, | ||
1341 | .htotal = 1712, | ||
1342 | |||
1343 | .vdisplay = 1024, | ||
1344 | .vsync_start = 1027, | ||
1345 | .vsync_end = 1034, | ||
1346 | .vtotal = 1104, | ||
1347 | .type = DRM_MODE_TYPE_DRIVER, | ||
1348 | }, | ||
1349 | }; | ||
1350 | |||
1351 | /** | ||
1352 | * Detects TV presence by checking for load. | ||
1353 | * | ||
1354 | * Requires that the current pipe's DPLL is active. | ||
1355 | |||
1356 | * \return true if TV is connected. | ||
1357 | * \return false if TV is disconnected. | ||
1358 | */ | ||
1359 | static int | ||
1360 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | ||
1361 | { | ||
1362 | struct drm_encoder *encoder = &intel_output->enc; | ||
1363 | struct drm_device *dev = encoder->dev; | ||
1364 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1365 | unsigned long irqflags; | ||
1366 | u32 tv_ctl, save_tv_ctl; | ||
1367 | u32 tv_dac, save_tv_dac; | ||
1368 | int type = DRM_MODE_CONNECTOR_Unknown; | ||
1369 | |||
1370 | tv_dac = I915_READ(TV_DAC); | ||
1371 | |||
1372 | /* Disable TV interrupts around load detect or we'll recurse */ | ||
1373 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
1374 | i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | ||
1375 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | ||
1376 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
1377 | |||
1378 | /* | ||
1379 | * Detect TV by polling) | ||
1380 | */ | ||
1381 | if (intel_output->load_detect_temp) { | ||
1382 | /* TV not currently running, prod it with destructive detect */ | ||
1383 | save_tv_dac = tv_dac; | ||
1384 | tv_ctl = I915_READ(TV_CTL); | ||
1385 | save_tv_ctl = tv_ctl; | ||
1386 | tv_ctl &= ~TV_ENC_ENABLE; | ||
1387 | tv_ctl &= ~TV_TEST_MODE_MASK; | ||
1388 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; | ||
1389 | tv_dac &= ~TVDAC_SENSE_MASK; | ||
1390 | tv_dac |= (TVDAC_STATE_CHG_EN | | ||
1391 | TVDAC_A_SENSE_CTL | | ||
1392 | TVDAC_B_SENSE_CTL | | ||
1393 | TVDAC_C_SENSE_CTL | | ||
1394 | DAC_CTL_OVERRIDE | | ||
1395 | DAC_A_0_7_V | | ||
1396 | DAC_B_0_7_V | | ||
1397 | DAC_C_0_7_V); | ||
1398 | I915_WRITE(TV_CTL, tv_ctl); | ||
1399 | I915_WRITE(TV_DAC, tv_dac); | ||
1400 | intel_wait_for_vblank(dev); | ||
1401 | tv_dac = I915_READ(TV_DAC); | ||
1402 | I915_WRITE(TV_DAC, save_tv_dac); | ||
1403 | I915_WRITE(TV_CTL, save_tv_ctl); | ||
1404 | } | ||
1405 | /* | ||
1406 | * A B C | ||
1407 | * 0 1 1 Composite | ||
1408 | * 1 0 X svideo | ||
1409 | * 0 0 0 Component | ||
1410 | */ | ||
1411 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { | ||
1412 | DRM_DEBUG("Detected Composite TV connection\n"); | ||
1413 | type = DRM_MODE_CONNECTOR_Composite; | ||
1414 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { | ||
1415 | DRM_DEBUG("Detected S-Video TV connection\n"); | ||
1416 | type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1417 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { | ||
1418 | DRM_DEBUG("Detected Component TV connection\n"); | ||
1419 | type = DRM_MODE_CONNECTOR_Component; | ||
1420 | } else { | ||
1421 | DRM_DEBUG("No TV connection detected\n"); | ||
1422 | type = -1; | ||
1423 | } | ||
1424 | |||
1425 | /* Restore interrupt config */ | ||
1426 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
1427 | i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | ||
1428 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | ||
1429 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | ||
1430 | |||
1431 | return type; | ||
1432 | } | ||
1433 | |||
1434 | /** | ||
1435 | * Detect the TV connection. | ||
1436 | * | ||
1437 | * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure | ||
1438 | * we have a pipe programmed in order to probe the TV. | ||
1439 | */ | ||
1440 | static enum drm_connector_status | ||
1441 | intel_tv_detect(struct drm_connector *connector) | ||
1442 | { | ||
1443 | struct drm_crtc *crtc; | ||
1444 | struct drm_display_mode mode; | ||
1445 | struct intel_output *intel_output = to_intel_output(connector); | ||
1446 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
1447 | struct drm_encoder *encoder = &intel_output->enc; | ||
1448 | int dpms_mode; | ||
1449 | int type = tv_priv->type; | ||
1450 | |||
1451 | mode = reported_modes[0]; | ||
1452 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | ||
1453 | |||
1454 | if (encoder->crtc) { | ||
1455 | type = intel_tv_detect_type(encoder->crtc, intel_output); | ||
1456 | } else { | ||
1457 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | ||
1458 | if (crtc) { | ||
1459 | type = intel_tv_detect_type(crtc, intel_output); | ||
1460 | intel_release_load_detect_pipe(intel_output, dpms_mode); | ||
1461 | } else | ||
1462 | type = -1; | ||
1463 | } | ||
1464 | |||
1465 | if (type < 0) | ||
1466 | return connector_status_disconnected; | ||
1467 | |||
1468 | return connector_status_connected; | ||
1469 | } | ||
1470 | |||
1471 | static struct input_res { | ||
1472 | char *name; | ||
1473 | int w, h; | ||
1474 | } input_res_table[] = | ||
1475 | { | ||
1476 | {"640x480", 640, 480}, | ||
1477 | {"800x600", 800, 600}, | ||
1478 | {"1024x768", 1024, 768}, | ||
1479 | {"1280x1024", 1280, 1024}, | ||
1480 | {"848x480", 848, 480}, | ||
1481 | {"1280x720", 1280, 720}, | ||
1482 | {"1920x1080", 1920, 1080}, | ||
1483 | }; | ||
1484 | |||
1485 | /** | ||
1486 | * Stub get_modes function. | ||
1487 | * | ||
1488 | * This should probably return a set of fixed modes, unless we can figure out | ||
1489 | * how to probe modes off of TV connections. | ||
1490 | */ | ||
1491 | |||
1492 | static int | ||
1493 | intel_tv_get_modes(struct drm_connector *connector) | ||
1494 | { | ||
1495 | struct drm_display_mode *mode_ptr; | ||
1496 | struct intel_output *intel_output = to_intel_output(connector); | ||
1497 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | ||
1498 | int j; | ||
1499 | |||
1500 | for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); | ||
1501 | j++) { | ||
1502 | struct input_res *input = &input_res_table[j]; | ||
1503 | unsigned int hactive_s = input->w; | ||
1504 | unsigned int vactive_s = input->h; | ||
1505 | |||
1506 | if (tv_mode->max_srcw && input->w > tv_mode->max_srcw) | ||
1507 | continue; | ||
1508 | |||
1509 | if (input->w > 1024 && (!tv_mode->progressive | ||
1510 | && !tv_mode->component_only)) | ||
1511 | continue; | ||
1512 | |||
1513 | mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), | ||
1514 | DRM_MEM_DRIVER); | ||
1515 | strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); | ||
1516 | |||
1517 | mode_ptr->hdisplay = hactive_s; | ||
1518 | mode_ptr->hsync_start = hactive_s + 1; | ||
1519 | mode_ptr->hsync_end = hactive_s + 64; | ||
1520 | if (mode_ptr->hsync_end <= mode_ptr->hsync_start) | ||
1521 | mode_ptr->hsync_end = mode_ptr->hsync_start + 1; | ||
1522 | mode_ptr->htotal = hactive_s + 96; | ||
1523 | |||
1524 | mode_ptr->vdisplay = vactive_s; | ||
1525 | mode_ptr->vsync_start = vactive_s + 1; | ||
1526 | mode_ptr->vsync_end = vactive_s + 32; | ||
1527 | if (mode_ptr->vsync_end <= mode_ptr->vsync_start) | ||
1528 | mode_ptr->vsync_end = mode_ptr->vsync_start + 1; | ||
1529 | mode_ptr->vtotal = vactive_s + 33; | ||
1530 | |||
1531 | mode_ptr->clock = (int) (tv_mode->refresh * | ||
1532 | mode_ptr->vtotal * | ||
1533 | mode_ptr->htotal / 1000) / 1000; | ||
1534 | |||
1535 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; | ||
1536 | drm_mode_probed_add(connector, mode_ptr); | ||
1537 | } | ||
1538 | |||
1539 | return 0; | ||
1540 | } | ||
1541 | |||
1542 | static void | ||
1543 | intel_tv_destroy (struct drm_connector *connector) | ||
1544 | { | ||
1545 | struct intel_output *intel_output = to_intel_output(connector); | ||
1546 | |||
1547 | drm_sysfs_connector_remove(connector); | ||
1548 | drm_connector_cleanup(connector); | ||
1549 | drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), | ||
1550 | DRM_MEM_DRIVER); | ||
1551 | } | ||
1552 | |||
1553 | |||
1554 | static int | ||
1555 | intel_tv_set_property(struct drm_connector *connector, struct drm_property *property, | ||
1556 | uint64_t val) | ||
1557 | { | ||
1558 | struct drm_device *dev = connector->dev; | ||
1559 | struct intel_output *intel_output = to_intel_output(connector); | ||
1560 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | ||
1561 | int ret = 0; | ||
1562 | |||
1563 | ret = drm_connector_property_set_value(connector, property, val); | ||
1564 | if (ret < 0) | ||
1565 | goto out; | ||
1566 | |||
1567 | if (property == dev->mode_config.tv_left_margin_property) | ||
1568 | tv_priv->margin[TV_MARGIN_LEFT] = val; | ||
1569 | else if (property == dev->mode_config.tv_right_margin_property) | ||
1570 | tv_priv->margin[TV_MARGIN_RIGHT] = val; | ||
1571 | else if (property == dev->mode_config.tv_top_margin_property) | ||
1572 | tv_priv->margin[TV_MARGIN_TOP] = val; | ||
1573 | else if (property == dev->mode_config.tv_bottom_margin_property) | ||
1574 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; | ||
1575 | else if (property == dev->mode_config.tv_mode_property) { | ||
1576 | if (val >= NUM_TV_MODES) { | ||
1577 | ret = -EINVAL; | ||
1578 | goto out; | ||
1579 | } | ||
1580 | tv_priv->tv_format = tv_modes[val].name; | ||
1581 | intel_tv_mode_set(&intel_output->enc, NULL, NULL); | ||
1582 | } else { | ||
1583 | ret = -EINVAL; | ||
1584 | goto out; | ||
1585 | } | ||
1586 | |||
1587 | intel_tv_mode_set(&intel_output->enc, NULL, NULL); | ||
1588 | out: | ||
1589 | return ret; | ||
1590 | } | ||
1591 | |||
1592 | static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { | ||
1593 | .dpms = intel_tv_dpms, | ||
1594 | .mode_fixup = intel_tv_mode_fixup, | ||
1595 | .prepare = intel_encoder_prepare, | ||
1596 | .mode_set = intel_tv_mode_set, | ||
1597 | .commit = intel_encoder_commit, | ||
1598 | }; | ||
1599 | |||
1600 | static const struct drm_connector_funcs intel_tv_connector_funcs = { | ||
1601 | .save = intel_tv_save, | ||
1602 | .restore = intel_tv_restore, | ||
1603 | .detect = intel_tv_detect, | ||
1604 | .destroy = intel_tv_destroy, | ||
1605 | .set_property = intel_tv_set_property, | ||
1606 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1607 | }; | ||
1608 | |||
1609 | static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { | ||
1610 | .mode_valid = intel_tv_mode_valid, | ||
1611 | .get_modes = intel_tv_get_modes, | ||
1612 | .best_encoder = intel_best_encoder, | ||
1613 | }; | ||
1614 | |||
1615 | static void intel_tv_enc_destroy(struct drm_encoder *encoder) | ||
1616 | { | ||
1617 | drm_encoder_cleanup(encoder); | ||
1618 | } | ||
1619 | |||
1620 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { | ||
1621 | .destroy = intel_tv_enc_destroy, | ||
1622 | }; | ||
1623 | |||
1624 | |||
1625 | void | ||
1626 | intel_tv_init(struct drm_device *dev) | ||
1627 | { | ||
1628 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1629 | struct drm_connector *connector; | ||
1630 | struct intel_output *intel_output; | ||
1631 | struct intel_tv_priv *tv_priv; | ||
1632 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | ||
1633 | char **tv_format_names; | ||
1634 | int i, initial_mode = 0; | ||
1635 | |||
1636 | if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) | ||
1637 | return; | ||
1638 | |||
1639 | /* Even if we have an encoder we may not have a connector */ | ||
1640 | if (!dev_priv->int_tv_support) | ||
1641 | return; | ||
1642 | |||
1643 | /* | ||
1644 | * Sanity check the TV output by checking to see if the | ||
1645 | * DAC register holds a value | ||
1646 | */ | ||
1647 | save_tv_dac = I915_READ(TV_DAC); | ||
1648 | |||
1649 | I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); | ||
1650 | tv_dac_on = I915_READ(TV_DAC); | ||
1651 | |||
1652 | I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); | ||
1653 | tv_dac_off = I915_READ(TV_DAC); | ||
1654 | |||
1655 | I915_WRITE(TV_DAC, save_tv_dac); | ||
1656 | |||
1657 | /* | ||
1658 | * If the register does not hold the state change enable | ||
1659 | * bit, (either as a 0 or a 1), assume it doesn't really | ||
1660 | * exist | ||
1661 | */ | ||
1662 | if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 || | ||
1663 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) | ||
1664 | return; | ||
1665 | |||
1666 | intel_output = drm_calloc(1, sizeof(struct intel_output) + | ||
1667 | sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); | ||
1668 | if (!intel_output) { | ||
1669 | return; | ||
1670 | } | ||
1671 | connector = &intel_output->base; | ||
1672 | |||
1673 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | ||
1674 | DRM_MODE_CONNECTOR_SVIDEO); | ||
1675 | |||
1676 | drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, | ||
1677 | DRM_MODE_ENCODER_TVDAC); | ||
1678 | |||
1679 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | ||
1680 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); | ||
1681 | intel_output->type = INTEL_OUTPUT_TVOUT; | ||
1682 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | ||
1683 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | ||
1684 | intel_output->dev_priv = tv_priv; | ||
1685 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; | ||
1686 | |||
1687 | /* BIOS margin values */ | ||
1688 | tv_priv->margin[TV_MARGIN_LEFT] = 54; | ||
1689 | tv_priv->margin[TV_MARGIN_TOP] = 36; | ||
1690 | tv_priv->margin[TV_MARGIN_RIGHT] = 46; | ||
1691 | tv_priv->margin[TV_MARGIN_BOTTOM] = 37; | ||
1692 | |||
1693 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | ||
1694 | |||
1695 | drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); | ||
1696 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | ||
1697 | connector->interlace_allowed = false; | ||
1698 | connector->doublescan_allowed = false; | ||
1699 | |||
1700 | /* Create TV properties then attach current values */ | ||
1701 | tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, | ||
1702 | DRM_MEM_DRIVER); | ||
1703 | if (!tv_format_names) | ||
1704 | goto out; | ||
1705 | for (i = 0; i < NUM_TV_MODES; i++) | ||
1706 | tv_format_names[i] = tv_modes[i].name; | ||
1707 | drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); | ||
1708 | |||
1709 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, | ||
1710 | initial_mode); | ||
1711 | drm_connector_attach_property(connector, | ||
1712 | dev->mode_config.tv_left_margin_property, | ||
1713 | tv_priv->margin[TV_MARGIN_LEFT]); | ||
1714 | drm_connector_attach_property(connector, | ||
1715 | dev->mode_config.tv_top_margin_property, | ||
1716 | tv_priv->margin[TV_MARGIN_TOP]); | ||
1717 | drm_connector_attach_property(connector, | ||
1718 | dev->mode_config.tv_right_margin_property, | ||
1719 | tv_priv->margin[TV_MARGIN_RIGHT]); | ||
1720 | drm_connector_attach_property(connector, | ||
1721 | dev->mode_config.tv_bottom_margin_property, | ||
1722 | tv_priv->margin[TV_MARGIN_BOTTOM]); | ||
1723 | out: | ||
1724 | drm_sysfs_connector_add(connector); | ||
1725 | } | ||
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index c1d12dbfa8d8..b49c5ff29585 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c | |||
@@ -396,6 +396,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) | |||
396 | int mga_driver_load(struct drm_device * dev, unsigned long flags) | 396 | int mga_driver_load(struct drm_device * dev, unsigned long flags) |
397 | { | 397 | { |
398 | drm_mga_private_t *dev_priv; | 398 | drm_mga_private_t *dev_priv; |
399 | int ret; | ||
399 | 400 | ||
400 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | 401 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); |
401 | if (!dev_priv) | 402 | if (!dev_priv) |
@@ -415,6 +416,13 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) | |||
415 | dev->types[7] = _DRM_STAT_PRIMARY; | 416 | dev->types[7] = _DRM_STAT_PRIMARY; |
416 | dev->types[8] = _DRM_STAT_SECONDARY; | 417 | dev->types[8] = _DRM_STAT_SECONDARY; |
417 | 418 | ||
419 | ret = drm_vblank_init(dev, 1); | ||
420 | |||
421 | if (ret) { | ||
422 | (void) mga_driver_unload(dev); | ||
423 | return ret; | ||
424 | } | ||
425 | |||
418 | return 0; | 426 | return 0; |
419 | } | 427 | } |
420 | 428 | ||
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index bab42f41188b..daa6041a483a 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c | |||
@@ -152,11 +152,6 @@ void mga_driver_irq_preinstall(struct drm_device * dev) | |||
152 | int mga_driver_irq_postinstall(struct drm_device *dev) | 152 | int mga_driver_irq_postinstall(struct drm_device *dev) |
153 | { | 153 | { |
154 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | 154 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
155 | int ret; | ||
156 | |||
157 | ret = drm_vblank_init(dev, 1); | ||
158 | if (ret) | ||
159 | return ret; | ||
160 | 155 | ||
161 | DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); | 156 | DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); |
162 | 157 | ||
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 3265d53ba91f..601f4c0e5da5 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c | |||
@@ -45,6 +45,7 @@ static struct drm_driver driver = { | |||
45 | DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | | 45 | DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | |
46 | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, | 46 | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, |
47 | .dev_priv_size = sizeof(drm_r128_buf_priv_t), | 47 | .dev_priv_size = sizeof(drm_r128_buf_priv_t), |
48 | .load = r128_driver_load, | ||
48 | .preclose = r128_driver_preclose, | 49 | .preclose = r128_driver_preclose, |
49 | .lastclose = r128_driver_lastclose, | 50 | .lastclose = r128_driver_lastclose, |
50 | .get_vblank_counter = r128_get_vblank_counter, | 51 | .get_vblank_counter = r128_get_vblank_counter, |
@@ -84,6 +85,11 @@ static struct drm_driver driver = { | |||
84 | .patchlevel = DRIVER_PATCHLEVEL, | 85 | .patchlevel = DRIVER_PATCHLEVEL, |
85 | }; | 86 | }; |
86 | 87 | ||
88 | int r128_driver_load(struct drm_device * dev, unsigned long flags) | ||
89 | { | ||
90 | return drm_vblank_init(dev, 1); | ||
91 | } | ||
92 | |||
87 | static int __init r128_init(void) | 93 | static int __init r128_init(void) |
88 | { | 94 | { |
89 | driver.num_ioctls = r128_max_ioctl; | 95 | driver.num_ioctls = r128_max_ioctl; |
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 5898b274279d..797a26c42dab 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h | |||
@@ -159,6 +159,7 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev); | |||
159 | extern int r128_driver_irq_postinstall(struct drm_device *dev); | 159 | extern int r128_driver_irq_postinstall(struct drm_device *dev); |
160 | extern void r128_driver_irq_uninstall(struct drm_device * dev); | 160 | extern void r128_driver_irq_uninstall(struct drm_device * dev); |
161 | extern void r128_driver_lastclose(struct drm_device * dev); | 161 | extern void r128_driver_lastclose(struct drm_device * dev); |
162 | extern int r128_driver_load(struct drm_device * dev, unsigned long flags); | ||
162 | extern void r128_driver_preclose(struct drm_device * dev, | 163 | extern void r128_driver_preclose(struct drm_device * dev, |
163 | struct drm_file *file_priv); | 164 | struct drm_file *file_priv); |
164 | 165 | ||
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c index d7349012a680..69810fb8ac49 100644 --- a/drivers/gpu/drm/r128/r128_irq.c +++ b/drivers/gpu/drm/r128/r128_irq.c | |||
@@ -102,7 +102,7 @@ void r128_driver_irq_preinstall(struct drm_device * dev) | |||
102 | 102 | ||
103 | int r128_driver_irq_postinstall(struct drm_device *dev) | 103 | int r128_driver_irq_postinstall(struct drm_device *dev) |
104 | { | 104 | { |
105 | return drm_vblank_init(dev, 1); | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | void r128_driver_irq_uninstall(struct drm_device * dev) | 108 | void r128_driver_irq_uninstall(struct drm_device * dev) |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 4b27d9abb7bc..cace3964feeb 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) | |||
860 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must | 860 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must |
861 | * be careful about how this function is called. | 861 | * be careful about how this function is called. |
862 | */ | 862 | */ |
863 | static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) | 863 | static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) |
864 | { | 864 | { |
865 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
866 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | 865 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; |
866 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
867 | 867 | ||
868 | buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; | 868 | buf_priv->age = ++master_priv->sarea_priv->last_dispatch; |
869 | buf->pending = 1; | 869 | buf->pending = 1; |
870 | buf->used = 0; | 870 | buf->used = 0; |
871 | } | 871 | } |
@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1027 | drm_radeon_kcmd_buffer_t *cmdbuf) | 1027 | drm_radeon_kcmd_buffer_t *cmdbuf) |
1028 | { | 1028 | { |
1029 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1029 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1030 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
1030 | struct drm_device_dma *dma = dev->dma; | 1031 | struct drm_device_dma *dma = dev->dma; |
1031 | struct drm_buf *buf = NULL; | 1032 | struct drm_buf *buf = NULL; |
1032 | int emit_dispatch_age = 0; | 1033 | int emit_dispatch_age = 0; |
@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1134 | } | 1135 | } |
1135 | 1136 | ||
1136 | emit_dispatch_age = 1; | 1137 | emit_dispatch_age = 1; |
1137 | r300_discard_buffer(dev, buf); | 1138 | r300_discard_buffer(dev, file_priv->master, buf); |
1138 | break; | 1139 | break; |
1139 | 1140 | ||
1140 | case R300_CMD_WAIT: | 1141 | case R300_CMD_WAIT: |
@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1189 | 1190 | ||
1190 | /* Emit the vertex buffer age */ | 1191 | /* Emit the vertex buffer age */ |
1191 | BEGIN_RING(2); | 1192 | BEGIN_RING(2); |
1192 | RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); | 1193 | RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch); |
1193 | ADVANCE_RING(); | 1194 | ADVANCE_RING(); |
1194 | } | 1195 | } |
1195 | 1196 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index abdc1ae38467..df4cf97e5d97 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "drm_sarea.h" | ||
34 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
35 | #include "radeon_drv.h" | 36 | #include "radeon_drv.h" |
36 | #include "r300_reg.h" | 37 | #include "r300_reg.h" |
@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, | |||
667 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); | 668 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); |
668 | } /* PCIE cards appears to not need this */ | 669 | } /* PCIE cards appears to not need this */ |
669 | 670 | ||
670 | dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; | 671 | dev_priv->scratch[0] = 0; |
671 | RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); | 672 | RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); |
672 | 673 | ||
673 | dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; | 674 | dev_priv->scratch[1] = 0; |
674 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, | 675 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); |
675 | dev_priv->sarea_priv->last_dispatch); | ||
676 | 676 | ||
677 | dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; | 677 | dev_priv->scratch[2] = 0; |
678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); | 678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); |
679 | 679 | ||
680 | radeon_do_wait_for_idle(dev_priv); | 680 | radeon_do_wait_for_idle(dev_priv); |
681 | 681 | ||
@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) | |||
871 | } | 871 | } |
872 | } | 872 | } |
873 | 873 | ||
874 | static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | 874 | static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, |
875 | struct drm_file *file_priv) | ||
875 | { | 876 | { |
876 | drm_radeon_private_t *dev_priv = dev->dev_private; | 877 | drm_radeon_private_t *dev_priv = dev->dev_private; |
878 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
877 | 879 | ||
878 | DRM_DEBUG("\n"); | 880 | DRM_DEBUG("\n"); |
879 | 881 | ||
@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
998 | dev_priv->buffers_offset = init->buffers_offset; | 1000 | dev_priv->buffers_offset = init->buffers_offset; |
999 | dev_priv->gart_textures_offset = init->gart_textures_offset; | 1001 | dev_priv->gart_textures_offset = init->gart_textures_offset; |
1000 | 1002 | ||
1001 | dev_priv->sarea = drm_getsarea(dev); | 1003 | master_priv->sarea = drm_getsarea(dev); |
1002 | if (!dev_priv->sarea) { | 1004 | if (!master_priv->sarea) { |
1003 | DRM_ERROR("could not find sarea!\n"); | 1005 | DRM_ERROR("could not find sarea!\n"); |
1004 | radeon_do_cleanup_cp(dev); | 1006 | radeon_do_cleanup_cp(dev); |
1005 | return -EINVAL; | 1007 | return -EINVAL; |
@@ -1035,15 +1037,11 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1035 | } | 1037 | } |
1036 | } | 1038 | } |
1037 | 1039 | ||
1038 | dev_priv->sarea_priv = | ||
1039 | (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + | ||
1040 | init->sarea_priv_offset); | ||
1041 | |||
1042 | #if __OS_HAS_AGP | 1040 | #if __OS_HAS_AGP |
1043 | if (dev_priv->flags & RADEON_IS_AGP) { | 1041 | if (dev_priv->flags & RADEON_IS_AGP) { |
1044 | drm_core_ioremap(dev_priv->cp_ring, dev); | 1042 | drm_core_ioremap_wc(dev_priv->cp_ring, dev); |
1045 | drm_core_ioremap(dev_priv->ring_rptr, dev); | 1043 | drm_core_ioremap_wc(dev_priv->ring_rptr, dev); |
1046 | drm_core_ioremap(dev->agp_buffer_map, dev); | 1044 | drm_core_ioremap_wc(dev->agp_buffer_map, dev); |
1047 | if (!dev_priv->cp_ring->handle || | 1045 | if (!dev_priv->cp_ring->handle || |
1048 | !dev_priv->ring_rptr->handle || | 1046 | !dev_priv->ring_rptr->handle || |
1049 | !dev->agp_buffer_map->handle) { | 1047 | !dev->agp_buffer_map->handle) { |
@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
1329 | case RADEON_INIT_CP: | 1327 | case RADEON_INIT_CP: |
1330 | case RADEON_INIT_R200_CP: | 1328 | case RADEON_INIT_R200_CP: |
1331 | case RADEON_INIT_R300_CP: | 1329 | case RADEON_INIT_R300_CP: |
1332 | return radeon_do_init_cp(dev, init); | 1330 | return radeon_do_init_cp(dev, init, file_priv); |
1333 | case RADEON_CLEANUP_CP: | 1331 | case RADEON_CLEANUP_CP: |
1334 | return radeon_do_cleanup_cp(dev); | 1332 | return radeon_do_cleanup_cp(dev); |
1335 | } | 1333 | } |
@@ -1757,11 +1755,62 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
1757 | if (ret != 0) | 1755 | if (ret != 0) |
1758 | return ret; | 1756 | return ret; |
1759 | 1757 | ||
1758 | ret = drm_vblank_init(dev, 2); | ||
1759 | if (ret) { | ||
1760 | radeon_driver_unload(dev); | ||
1761 | return ret; | ||
1762 | } | ||
1763 | |||
1760 | DRM_DEBUG("%s card detected\n", | 1764 | DRM_DEBUG("%s card detected\n", |
1761 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); | 1765 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); |
1762 | return ret; | 1766 | return ret; |
1763 | } | 1767 | } |
1764 | 1768 | ||
1769 | int radeon_master_create(struct drm_device *dev, struct drm_master *master) | ||
1770 | { | ||
1771 | struct drm_radeon_master_private *master_priv; | ||
1772 | unsigned long sareapage; | ||
1773 | int ret; | ||
1774 | |||
1775 | master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1776 | if (!master_priv) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1779 | /* prebuild the SAREA */ | ||
1780 | sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); | ||
1781 | ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, | ||
1782 | &master_priv->sarea); | ||
1783 | if (ret) { | ||
1784 | DRM_ERROR("SAREA setup failed\n"); | ||
1785 | return ret; | ||
1786 | } | ||
1787 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); | ||
1788 | master_priv->sarea_priv->pfCurrentPage = 0; | ||
1789 | |||
1790 | master->driver_priv = master_priv; | ||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) | ||
1795 | { | ||
1796 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
1797 | |||
1798 | if (!master_priv) | ||
1799 | return; | ||
1800 | |||
1801 | if (master_priv->sarea_priv && | ||
1802 | master_priv->sarea_priv->pfCurrentPage != 0) | ||
1803 | radeon_cp_dispatch_flip(dev, master); | ||
1804 | |||
1805 | master_priv->sarea_priv = NULL; | ||
1806 | if (master_priv->sarea) | ||
1807 | drm_rmmap_locked(dev, master_priv->sarea); | ||
1808 | |||
1809 | drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1810 | |||
1811 | master->driver_priv = NULL; | ||
1812 | } | ||
1813 | |||
1765 | /* Create mappings for registers and framebuffer so userland doesn't necessarily | 1814 | /* Create mappings for registers and framebuffer so userland doesn't necessarily |
1766 | * have to find them. | 1815 | * have to find them. |
1767 | */ | 1816 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 71af746a4e47..fef207881f45 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -96,6 +96,8 @@ static struct drm_driver driver = { | |||
96 | .enable_vblank = radeon_enable_vblank, | 96 | .enable_vblank = radeon_enable_vblank, |
97 | .disable_vblank = radeon_disable_vblank, | 97 | .disable_vblank = radeon_disable_vblank, |
98 | .dri_library_name = dri_library_name, | 98 | .dri_library_name = dri_library_name, |
99 | .master_create = radeon_master_create, | ||
100 | .master_destroy = radeon_master_destroy, | ||
99 | .irq_preinstall = radeon_driver_irq_preinstall, | 101 | .irq_preinstall = radeon_driver_irq_preinstall, |
100 | .irq_postinstall = radeon_driver_irq_postinstall, | 102 | .irq_postinstall = radeon_driver_irq_postinstall, |
101 | .irq_uninstall = radeon_driver_irq_uninstall, | 103 | .irq_uninstall = radeon_driver_irq_uninstall, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 7a183789be97..490bc7ceef60 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -226,9 +226,13 @@ struct radeon_virt_surface { | |||
226 | #define RADEON_FLUSH_EMITED (1 < 0) | 226 | #define RADEON_FLUSH_EMITED (1 < 0) |
227 | #define RADEON_PURGE_EMITED (1 < 1) | 227 | #define RADEON_PURGE_EMITED (1 < 1) |
228 | 228 | ||
229 | struct drm_radeon_master_private { | ||
230 | drm_local_map_t *sarea; | ||
231 | drm_radeon_sarea_t *sarea_priv; | ||
232 | }; | ||
233 | |||
229 | typedef struct drm_radeon_private { | 234 | typedef struct drm_radeon_private { |
230 | drm_radeon_ring_buffer_t ring; | 235 | drm_radeon_ring_buffer_t ring; |
231 | drm_radeon_sarea_t *sarea_priv; | ||
232 | 236 | ||
233 | u32 fb_location; | 237 | u32 fb_location; |
234 | u32 fb_size; | 238 | u32 fb_size; |
@@ -299,7 +303,6 @@ typedef struct drm_radeon_private { | |||
299 | atomic_t swi_emitted; | 303 | atomic_t swi_emitted; |
300 | int vblank_crtc; | 304 | int vblank_crtc; |
301 | uint32_t irq_enable_reg; | 305 | uint32_t irq_enable_reg; |
302 | int irq_enabled; | ||
303 | uint32_t r500_disp_irq_reg; | 306 | uint32_t r500_disp_irq_reg; |
304 | 307 | ||
305 | struct radeon_surface surfaces[RADEON_MAX_SURFACES]; | 308 | struct radeon_surface surfaces[RADEON_MAX_SURFACES]; |
@@ -410,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev, | |||
410 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, | 413 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, |
411 | unsigned long arg); | 414 | unsigned long arg); |
412 | 415 | ||
416 | extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); | ||
417 | extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); | ||
418 | extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master); | ||
413 | /* r300_cmdbuf.c */ | 419 | /* r300_cmdbuf.c */ |
414 | extern void r300_init_reg_flags(struct drm_device *dev); | 420 | extern void r300_init_reg_flags(struct drm_device *dev); |
415 | 421 | ||
@@ -1336,8 +1342,9 @@ do { \ | |||
1336 | } while (0) | 1342 | } while (0) |
1337 | 1343 | ||
1338 | #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ | 1344 | #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ |
1339 | do { \ | 1345 | do { \ |
1340 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ | 1346 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \ |
1347 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ | ||
1341 | if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ | 1348 | if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ |
1342 | int __ret = radeon_do_cp_idle( dev_priv ); \ | 1349 | int __ret = radeon_do_cp_idle( dev_priv ); \ |
1343 | if ( __ret ) return __ret; \ | 1350 | if ( __ret ) return __ret; \ |
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 5079f7054a2f..8289e16419a8 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) | |||
44 | else | 44 | else |
45 | dev_priv->irq_enable_reg &= ~mask; | 45 | dev_priv->irq_enable_reg &= ~mask; |
46 | 46 | ||
47 | RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); | 47 | if (dev->irq_enabled) |
48 | RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) | 51 | static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) |
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) | |||
56 | else | 57 | else |
57 | dev_priv->r500_disp_irq_reg &= ~mask; | 58 | dev_priv->r500_disp_irq_reg &= ~mask; |
58 | 59 | ||
59 | RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); | 60 | if (dev->irq_enabled) |
61 | RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); | ||
60 | } | 62 | } |
61 | 63 | ||
62 | int radeon_enable_vblank(struct drm_device *dev, int crtc) | 64 | int radeon_enable_vblank(struct drm_device *dev, int crtc) |
@@ -337,15 +339,10 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) | |||
337 | { | 339 | { |
338 | drm_radeon_private_t *dev_priv = | 340 | drm_radeon_private_t *dev_priv = |
339 | (drm_radeon_private_t *) dev->dev_private; | 341 | (drm_radeon_private_t *) dev->dev_private; |
340 | int ret; | ||
341 | 342 | ||
342 | atomic_set(&dev_priv->swi_emitted, 0); | 343 | atomic_set(&dev_priv->swi_emitted, 0); |
343 | DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); | 344 | DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); |
344 | 345 | ||
345 | ret = drm_vblank_init(dev, 2); | ||
346 | if (ret) | ||
347 | return ret; | ||
348 | |||
349 | dev->max_vblank_count = 0x001fffff; | 346 | dev->max_vblank_count = 0x001fffff; |
350 | 347 | ||
351 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); | 348 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); |
@@ -360,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev) | |||
360 | if (!dev_priv) | 357 | if (!dev_priv) |
361 | return; | 358 | return; |
362 | 359 | ||
363 | dev_priv->irq_enabled = 0; | ||
364 | |||
365 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) | 360 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) |
366 | RADEON_WRITE(R500_DxMODE_INT_MASK, 0); | 361 | RADEON_WRITE(R500_DxMODE_INT_MASK, 0); |
367 | /* Disable *all* interrupts */ | 362 | /* Disable *all* interrupts */ |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 5d7153fcc7b0..ef940a079dcb 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -742,13 +742,14 @@ static struct { | |||
742 | */ | 742 | */ |
743 | 743 | ||
744 | static void radeon_clear_box(drm_radeon_private_t * dev_priv, | 744 | static void radeon_clear_box(drm_radeon_private_t * dev_priv, |
745 | struct drm_radeon_master_private *master_priv, | ||
745 | int x, int y, int w, int h, int r, int g, int b) | 746 | int x, int y, int w, int h, int r, int g, int b) |
746 | { | 747 | { |
747 | u32 color; | 748 | u32 color; |
748 | RING_LOCALS; | 749 | RING_LOCALS; |
749 | 750 | ||
750 | x += dev_priv->sarea_priv->boxes[0].x1; | 751 | x += master_priv->sarea_priv->boxes[0].x1; |
751 | y += dev_priv->sarea_priv->boxes[0].y1; | 752 | y += master_priv->sarea_priv->boxes[0].y1; |
752 | 753 | ||
753 | switch (dev_priv->color_fmt) { | 754 | switch (dev_priv->color_fmt) { |
754 | case RADEON_COLOR_FORMAT_RGB565: | 755 | case RADEON_COLOR_FORMAT_RGB565: |
@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, | |||
776 | RADEON_GMC_SRC_DATATYPE_COLOR | | 777 | RADEON_GMC_SRC_DATATYPE_COLOR | |
777 | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); | 778 | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); |
778 | 779 | ||
779 | if (dev_priv->sarea_priv->pfCurrentPage == 1) { | 780 | if (master_priv->sarea_priv->pfCurrentPage == 1) { |
780 | OUT_RING(dev_priv->front_pitch_offset); | 781 | OUT_RING(dev_priv->front_pitch_offset); |
781 | } else { | 782 | } else { |
782 | OUT_RING(dev_priv->back_pitch_offset); | 783 | OUT_RING(dev_priv->back_pitch_offset); |
@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, | |||
790 | ADVANCE_RING(); | 791 | ADVANCE_RING(); |
791 | } | 792 | } |
792 | 793 | ||
793 | static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | 794 | static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv) |
794 | { | 795 | { |
795 | /* Collapse various things into a wait flag -- trying to | 796 | /* Collapse various things into a wait flag -- trying to |
796 | * guess if userspase slept -- better just to have them tell us. | 797 | * guess if userspase slept -- better just to have them tell us. |
@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
807 | /* Purple box for page flipping | 808 | /* Purple box for page flipping |
808 | */ | 809 | */ |
809 | if (dev_priv->stats.boxes & RADEON_BOX_FLIP) | 810 | if (dev_priv->stats.boxes & RADEON_BOX_FLIP) |
810 | radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); | 811 | radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255); |
811 | 812 | ||
812 | /* Red box if we have to wait for idle at any point | 813 | /* Red box if we have to wait for idle at any point |
813 | */ | 814 | */ |
814 | if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) | 815 | if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) |
815 | radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); | 816 | radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0); |
816 | 817 | ||
817 | /* Blue box: lost context? | 818 | /* Blue box: lost context? |
818 | */ | 819 | */ |
@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
820 | /* Yellow box for texture swaps | 821 | /* Yellow box for texture swaps |
821 | */ | 822 | */ |
822 | if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) | 823 | if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) |
823 | radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); | 824 | radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0); |
824 | 825 | ||
825 | /* Green box if hardware never idles (as far as we can tell) | 826 | /* Green box if hardware never idles (as far as we can tell) |
826 | */ | 827 | */ |
827 | if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) | 828 | if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) |
828 | radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); | 829 | radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0); |
829 | 830 | ||
830 | /* Draw bars indicating number of buffers allocated | 831 | /* Draw bars indicating number of buffers allocated |
831 | * (not a great measure, easily confused) | 832 | * (not a great measure, easily confused) |
@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
834 | if (dev_priv->stats.requested_bufs > 100) | 835 | if (dev_priv->stats.requested_bufs > 100) |
835 | dev_priv->stats.requested_bufs = 100; | 836 | dev_priv->stats.requested_bufs = 100; |
836 | 837 | ||
837 | radeon_clear_box(dev_priv, 4, 16, | 838 | radeon_clear_box(dev_priv, master_priv, 4, 16, |
838 | dev_priv->stats.requested_bufs, 4, | 839 | dev_priv->stats.requested_bufs, 4, |
839 | 196, 128, 128); | 840 | 196, 128, 128); |
840 | } | 841 | } |
@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
848 | */ | 849 | */ |
849 | 850 | ||
850 | static void radeon_cp_dispatch_clear(struct drm_device * dev, | 851 | static void radeon_cp_dispatch_clear(struct drm_device * dev, |
852 | struct drm_master *master, | ||
851 | drm_radeon_clear_t * clear, | 853 | drm_radeon_clear_t * clear, |
852 | drm_radeon_clear_rect_t * depth_boxes) | 854 | drm_radeon_clear_rect_t * depth_boxes) |
853 | { | 855 | { |
854 | drm_radeon_private_t *dev_priv = dev->dev_private; | 856 | drm_radeon_private_t *dev_priv = dev->dev_private; |
855 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 857 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
858 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
856 | drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; | 859 | drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; |
857 | int nbox = sarea_priv->nbox; | 860 | int nbox = sarea_priv->nbox; |
858 | struct drm_clip_rect *pbox = sarea_priv->boxes; | 861 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
864 | 867 | ||
865 | dev_priv->stats.clears++; | 868 | dev_priv->stats.clears++; |
866 | 869 | ||
867 | if (dev_priv->sarea_priv->pfCurrentPage == 1) { | 870 | if (sarea_priv->pfCurrentPage == 1) { |
868 | unsigned int tmp = flags; | 871 | unsigned int tmp = flags; |
869 | 872 | ||
870 | flags &= ~(RADEON_FRONT | RADEON_BACK); | 873 | flags &= ~(RADEON_FRONT | RADEON_BACK); |
@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
890 | 893 | ||
891 | /* Make sure we restore the 3D state next time. | 894 | /* Make sure we restore the 3D state next time. |
892 | */ | 895 | */ |
893 | dev_priv->sarea_priv->ctx_owner = 0; | 896 | sarea_priv->ctx_owner = 0; |
894 | 897 | ||
895 | for (i = 0; i < nbox; i++) { | 898 | for (i = 0; i < nbox; i++) { |
896 | int x = pbox[i].x1; | 899 | int x = pbox[i].x1; |
@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
967 | /* Make sure we restore the 3D state next time. | 970 | /* Make sure we restore the 3D state next time. |
968 | * we haven't touched any "normal" state - still need this? | 971 | * we haven't touched any "normal" state - still need this? |
969 | */ | 972 | */ |
970 | dev_priv->sarea_priv->ctx_owner = 0; | 973 | sarea_priv->ctx_owner = 0; |
971 | 974 | ||
972 | if ((dev_priv->flags & RADEON_HAS_HIERZ) | 975 | if ((dev_priv->flags & RADEON_HAS_HIERZ) |
973 | && (flags & RADEON_USE_HIERZ)) { | 976 | && (flags & RADEON_USE_HIERZ)) { |
@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1214 | 1217 | ||
1215 | /* Make sure we restore the 3D state next time. | 1218 | /* Make sure we restore the 3D state next time. |
1216 | */ | 1219 | */ |
1217 | dev_priv->sarea_priv->ctx_owner = 0; | 1220 | sarea_priv->ctx_owner = 0; |
1218 | 1221 | ||
1219 | for (i = 0; i < nbox; i++) { | 1222 | for (i = 0; i < nbox; i++) { |
1220 | 1223 | ||
@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1285 | 1288 | ||
1286 | /* Make sure we restore the 3D state next time. | 1289 | /* Make sure we restore the 3D state next time. |
1287 | */ | 1290 | */ |
1288 | dev_priv->sarea_priv->ctx_owner = 0; | 1291 | sarea_priv->ctx_owner = 0; |
1289 | 1292 | ||
1290 | for (i = 0; i < nbox; i++) { | 1293 | for (i = 0; i < nbox; i++) { |
1291 | 1294 | ||
@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1328 | * wait on this value before performing the clear ioctl. We | 1331 | * wait on this value before performing the clear ioctl. We |
1329 | * need this because the card's so damned fast... | 1332 | * need this because the card's so damned fast... |
1330 | */ | 1333 | */ |
1331 | dev_priv->sarea_priv->last_clear++; | 1334 | sarea_priv->last_clear++; |
1332 | 1335 | ||
1333 | BEGIN_RING(4); | 1336 | BEGIN_RING(4); |
1334 | 1337 | ||
1335 | RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); | 1338 | RADEON_CLEAR_AGE(sarea_priv->last_clear); |
1336 | RADEON_WAIT_UNTIL_IDLE(); | 1339 | RADEON_WAIT_UNTIL_IDLE(); |
1337 | 1340 | ||
1338 | ADVANCE_RING(); | 1341 | ADVANCE_RING(); |
1339 | } | 1342 | } |
1340 | 1343 | ||
1341 | static void radeon_cp_dispatch_swap(struct drm_device * dev) | 1344 | static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master) |
1342 | { | 1345 | { |
1343 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1346 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1344 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1347 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1348 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1345 | int nbox = sarea_priv->nbox; | 1349 | int nbox = sarea_priv->nbox; |
1346 | struct drm_clip_rect *pbox = sarea_priv->boxes; | 1350 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
1347 | int i; | 1351 | int i; |
@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1351 | /* Do some trivial performance monitoring... | 1355 | /* Do some trivial performance monitoring... |
1352 | */ | 1356 | */ |
1353 | if (dev_priv->do_boxes) | 1357 | if (dev_priv->do_boxes) |
1354 | radeon_cp_performance_boxes(dev_priv); | 1358 | radeon_cp_performance_boxes(dev_priv, master_priv); |
1355 | 1359 | ||
1356 | /* Wait for the 3D stream to idle before dispatching the bitblt. | 1360 | /* Wait for the 3D stream to idle before dispatching the bitblt. |
1357 | * This will prevent data corruption between the two streams. | 1361 | * This will prevent data corruption between the two streams. |
@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1385 | /* Make this work even if front & back are flipped: | 1389 | /* Make this work even if front & back are flipped: |
1386 | */ | 1390 | */ |
1387 | OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); | 1391 | OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); |
1388 | if (dev_priv->sarea_priv->pfCurrentPage == 0) { | 1392 | if (sarea_priv->pfCurrentPage == 0) { |
1389 | OUT_RING(dev_priv->back_pitch_offset); | 1393 | OUT_RING(dev_priv->back_pitch_offset); |
1390 | OUT_RING(dev_priv->front_pitch_offset); | 1394 | OUT_RING(dev_priv->front_pitch_offset); |
1391 | } else { | 1395 | } else { |
@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1405 | * throttle the framerate by waiting for this value before | 1409 | * throttle the framerate by waiting for this value before |
1406 | * performing the swapbuffer ioctl. | 1410 | * performing the swapbuffer ioctl. |
1407 | */ | 1411 | */ |
1408 | dev_priv->sarea_priv->last_frame++; | 1412 | sarea_priv->last_frame++; |
1409 | 1413 | ||
1410 | BEGIN_RING(4); | 1414 | BEGIN_RING(4); |
1411 | 1415 | ||
1412 | RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); | 1416 | RADEON_FRAME_AGE(sarea_priv->last_frame); |
1413 | RADEON_WAIT_UNTIL_2D_IDLE(); | 1417 | RADEON_WAIT_UNTIL_2D_IDLE(); |
1414 | 1418 | ||
1415 | ADVANCE_RING(); | 1419 | ADVANCE_RING(); |
1416 | } | 1420 | } |
1417 | 1421 | ||
1418 | static void radeon_cp_dispatch_flip(struct drm_device * dev) | 1422 | void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master) |
1419 | { | 1423 | { |
1420 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1424 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1421 | struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; | 1425 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1422 | int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) | 1426 | struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle; |
1427 | int offset = (master_priv->sarea_priv->pfCurrentPage == 1) | ||
1423 | ? dev_priv->front_offset : dev_priv->back_offset; | 1428 | ? dev_priv->front_offset : dev_priv->back_offset; |
1424 | RING_LOCALS; | 1429 | RING_LOCALS; |
1425 | DRM_DEBUG("pfCurrentPage=%d\n", | 1430 | DRM_DEBUG("pfCurrentPage=%d\n", |
1426 | dev_priv->sarea_priv->pfCurrentPage); | 1431 | master_priv->sarea_priv->pfCurrentPage); |
1427 | 1432 | ||
1428 | /* Do some trivial performance monitoring... | 1433 | /* Do some trivial performance monitoring... |
1429 | */ | 1434 | */ |
1430 | if (dev_priv->do_boxes) { | 1435 | if (dev_priv->do_boxes) { |
1431 | dev_priv->stats.boxes |= RADEON_BOX_FLIP; | 1436 | dev_priv->stats.boxes |= RADEON_BOX_FLIP; |
1432 | radeon_cp_performance_boxes(dev_priv); | 1437 | radeon_cp_performance_boxes(dev_priv, master_priv); |
1433 | } | 1438 | } |
1434 | 1439 | ||
1435 | /* Update the frame offsets for both CRTCs | 1440 | /* Update the frame offsets for both CRTCs |
@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) | |||
1441 | ((sarea->frame.y * dev_priv->front_pitch + | 1446 | ((sarea->frame.y * dev_priv->front_pitch + |
1442 | sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) | 1447 | sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) |
1443 | + offset); | 1448 | + offset); |
1444 | OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base | 1449 | OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base |
1445 | + offset); | 1450 | + offset); |
1446 | 1451 | ||
1447 | ADVANCE_RING(); | 1452 | ADVANCE_RING(); |
@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) | |||
1450 | * throttle the framerate by waiting for this value before | 1455 | * throttle the framerate by waiting for this value before |
1451 | * performing the swapbuffer ioctl. | 1456 | * performing the swapbuffer ioctl. |
1452 | */ | 1457 | */ |
1453 | dev_priv->sarea_priv->last_frame++; | 1458 | master_priv->sarea_priv->last_frame++; |
1454 | dev_priv->sarea_priv->pfCurrentPage = | 1459 | master_priv->sarea_priv->pfCurrentPage = |
1455 | 1 - dev_priv->sarea_priv->pfCurrentPage; | 1460 | 1 - master_priv->sarea_priv->pfCurrentPage; |
1456 | 1461 | ||
1457 | BEGIN_RING(2); | 1462 | BEGIN_RING(2); |
1458 | 1463 | ||
1459 | RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); | 1464 | RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame); |
1460 | 1465 | ||
1461 | ADVANCE_RING(); | 1466 | ADVANCE_RING(); |
1462 | } | 1467 | } |
@@ -1494,11 +1499,13 @@ typedef struct { | |||
1494 | } drm_radeon_tcl_prim_t; | 1499 | } drm_radeon_tcl_prim_t; |
1495 | 1500 | ||
1496 | static void radeon_cp_dispatch_vertex(struct drm_device * dev, | 1501 | static void radeon_cp_dispatch_vertex(struct drm_device * dev, |
1502 | struct drm_file *file_priv, | ||
1497 | struct drm_buf * buf, | 1503 | struct drm_buf * buf, |
1498 | drm_radeon_tcl_prim_t * prim) | 1504 | drm_radeon_tcl_prim_t * prim) |
1499 | { | 1505 | { |
1500 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1506 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1501 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1507 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
1508 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1502 | int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; | 1509 | int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; |
1503 | int numverts = (int)prim->numverts; | 1510 | int numverts = (int)prim->numverts; |
1504 | int nbox = sarea_priv->nbox; | 1511 | int nbox = sarea_priv->nbox; |
@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev, | |||
1539 | } while (i < nbox); | 1546 | } while (i < nbox); |
1540 | } | 1547 | } |
1541 | 1548 | ||
1542 | static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) | 1549 | static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) |
1543 | { | 1550 | { |
1544 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1551 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1552 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
1545 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | 1553 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; |
1546 | RING_LOCALS; | 1554 | RING_LOCALS; |
1547 | 1555 | ||
1548 | buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; | 1556 | buf_priv->age = ++master_priv->sarea_priv->last_dispatch; |
1549 | 1557 | ||
1550 | /* Emit the vertex buffer age */ | 1558 | /* Emit the vertex buffer age */ |
1551 | BEGIN_RING(2); | 1559 | BEGIN_RING(2); |
@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev, | |||
1590 | } | 1598 | } |
1591 | } | 1599 | } |
1592 | 1600 | ||
1593 | static void radeon_cp_dispatch_indices(struct drm_device * dev, | 1601 | static void radeon_cp_dispatch_indices(struct drm_device *dev, |
1602 | struct drm_master *master, | ||
1594 | struct drm_buf * elt_buf, | 1603 | struct drm_buf * elt_buf, |
1595 | drm_radeon_tcl_prim_t * prim) | 1604 | drm_radeon_tcl_prim_t * prim) |
1596 | { | 1605 | { |
1597 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1606 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1598 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1607 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1608 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1599 | int offset = dev_priv->gart_buffers_offset + prim->offset; | 1609 | int offset = dev_priv->gart_buffers_offset + prim->offset; |
1600 | u32 *data; | 1610 | u32 *data; |
1601 | int dwords; | 1611 | int dwords; |
@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, | |||
1870 | ADVANCE_RING(); | 1880 | ADVANCE_RING(); |
1871 | COMMIT_RING(); | 1881 | COMMIT_RING(); |
1872 | 1882 | ||
1873 | radeon_cp_discard_buffer(dev, buf); | 1883 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
1874 | 1884 | ||
1875 | /* Update the input parameters for next time */ | 1885 | /* Update the input parameters for next time */ |
1876 | image->y += height; | 1886 | image->y += height; |
@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi | |||
2110 | static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2120 | static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2111 | { | 2121 | { |
2112 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2122 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2113 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2123 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2124 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
2114 | drm_radeon_clear_t *clear = data; | 2125 | drm_radeon_clear_t *clear = data; |
2115 | drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; | 2126 | drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; |
2116 | DRM_DEBUG("\n"); | 2127 | DRM_DEBUG("\n"); |
@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * | |||
2126 | sarea_priv->nbox * sizeof(depth_boxes[0]))) | 2137 | sarea_priv->nbox * sizeof(depth_boxes[0]))) |
2127 | return -EFAULT; | 2138 | return -EFAULT; |
2128 | 2139 | ||
2129 | radeon_cp_dispatch_clear(dev, clear, depth_boxes); | 2140 | radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes); |
2130 | 2141 | ||
2131 | COMMIT_RING(); | 2142 | COMMIT_RING(); |
2132 | return 0; | 2143 | return 0; |
@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * | |||
2134 | 2145 | ||
2135 | /* Not sure why this isn't set all the time: | 2146 | /* Not sure why this isn't set all the time: |
2136 | */ | 2147 | */ |
2137 | static int radeon_do_init_pageflip(struct drm_device * dev) | 2148 | static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master) |
2138 | { | 2149 | { |
2139 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2150 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2151 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
2140 | RING_LOCALS; | 2152 | RING_LOCALS; |
2141 | 2153 | ||
2142 | DRM_DEBUG("\n"); | 2154 | DRM_DEBUG("\n"); |
@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev) | |||
2153 | 2165 | ||
2154 | dev_priv->page_flipping = 1; | 2166 | dev_priv->page_flipping = 1; |
2155 | 2167 | ||
2156 | if (dev_priv->sarea_priv->pfCurrentPage != 1) | 2168 | if (master_priv->sarea_priv->pfCurrentPage != 1) |
2157 | dev_priv->sarea_priv->pfCurrentPage = 0; | 2169 | master_priv->sarea_priv->pfCurrentPage = 0; |
2158 | 2170 | ||
2159 | return 0; | 2171 | return 0; |
2160 | } | 2172 | } |
@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f | |||
2172 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2184 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
2173 | 2185 | ||
2174 | if (!dev_priv->page_flipping) | 2186 | if (!dev_priv->page_flipping) |
2175 | radeon_do_init_pageflip(dev); | 2187 | radeon_do_init_pageflip(dev, file_priv->master); |
2176 | 2188 | ||
2177 | radeon_cp_dispatch_flip(dev); | 2189 | radeon_cp_dispatch_flip(dev, file_priv->master); |
2178 | 2190 | ||
2179 | COMMIT_RING(); | 2191 | COMMIT_RING(); |
2180 | return 0; | 2192 | return 0; |
@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f | |||
2183 | static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2195 | static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2184 | { | 2196 | { |
2185 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2197 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2186 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2198 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2199 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
2200 | |||
2187 | DRM_DEBUG("\n"); | 2201 | DRM_DEBUG("\n"); |
2188 | 2202 | ||
2189 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2203 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f | |||
2193 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) | 2207 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) |
2194 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; | 2208 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; |
2195 | 2209 | ||
2196 | radeon_cp_dispatch_swap(dev); | 2210 | radeon_cp_dispatch_swap(dev, file_priv->master); |
2197 | dev_priv->sarea_priv->ctx_owner = 0; | 2211 | sarea_priv->ctx_owner = 0; |
2198 | 2212 | ||
2199 | COMMIT_RING(); | 2213 | COMMIT_RING(); |
2200 | return 0; | 2214 | return 0; |
@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f | |||
2203 | static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2217 | static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2204 | { | 2218 | { |
2205 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2219 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2206 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2220 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2221 | drm_radeon_sarea_t *sarea_priv; | ||
2207 | struct drm_device_dma *dma = dev->dma; | 2222 | struct drm_device_dma *dma = dev->dma; |
2208 | struct drm_buf *buf; | 2223 | struct drm_buf *buf; |
2209 | drm_radeon_vertex_t *vertex = data; | 2224 | drm_radeon_vertex_t *vertex = data; |
@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2211 | 2226 | ||
2212 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2227 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2213 | 2228 | ||
2229 | sarea_priv = master_priv->sarea_priv; | ||
2230 | |||
2214 | DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", | 2231 | DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", |
2215 | DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); | 2232 | DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); |
2216 | 2233 | ||
@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2263 | prim.finish = vertex->count; /* unused */ | 2280 | prim.finish = vertex->count; /* unused */ |
2264 | prim.prim = vertex->prim; | 2281 | prim.prim = vertex->prim; |
2265 | prim.numverts = vertex->count; | 2282 | prim.numverts = vertex->count; |
2266 | prim.vc_format = dev_priv->sarea_priv->vc_format; | 2283 | prim.vc_format = sarea_priv->vc_format; |
2267 | 2284 | ||
2268 | radeon_cp_dispatch_vertex(dev, buf, &prim); | 2285 | radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim); |
2269 | } | 2286 | } |
2270 | 2287 | ||
2271 | if (vertex->discard) { | 2288 | if (vertex->discard) { |
2272 | radeon_cp_discard_buffer(dev, buf); | 2289 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2273 | } | 2290 | } |
2274 | 2291 | ||
2275 | COMMIT_RING(); | 2292 | COMMIT_RING(); |
@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2279 | static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2296 | static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2280 | { | 2297 | { |
2281 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2298 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2282 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2299 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2300 | drm_radeon_sarea_t *sarea_priv; | ||
2283 | struct drm_device_dma *dma = dev->dma; | 2301 | struct drm_device_dma *dma = dev->dma; |
2284 | struct drm_buf *buf; | 2302 | struct drm_buf *buf; |
2285 | drm_radeon_indices_t *elts = data; | 2303 | drm_radeon_indices_t *elts = data; |
@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file | |||
2288 | 2306 | ||
2289 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2307 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2290 | 2308 | ||
2309 | sarea_priv = master_priv->sarea_priv; | ||
2310 | |||
2291 | DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", | 2311 | DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", |
2292 | DRM_CURRENTPID, elts->idx, elts->start, elts->end, | 2312 | DRM_CURRENTPID, elts->idx, elts->start, elts->end, |
2293 | elts->discard); | 2313 | elts->discard); |
@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file | |||
2353 | prim.prim = elts->prim; | 2373 | prim.prim = elts->prim; |
2354 | prim.offset = 0; /* offset from start of dma buffers */ | 2374 | prim.offset = 0; /* offset from start of dma buffers */ |
2355 | prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ | 2375 | prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ |
2356 | prim.vc_format = dev_priv->sarea_priv->vc_format; | 2376 | prim.vc_format = sarea_priv->vc_format; |
2357 | 2377 | ||
2358 | radeon_cp_dispatch_indices(dev, buf, &prim); | 2378 | radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim); |
2359 | if (elts->discard) { | 2379 | if (elts->discard) { |
2360 | radeon_cp_discard_buffer(dev, buf); | 2380 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2361 | } | 2381 | } |
2362 | 2382 | ||
2363 | COMMIT_RING(); | 2383 | COMMIT_RING(); |
@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil | |||
2468 | */ | 2488 | */ |
2469 | radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); | 2489 | radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); |
2470 | if (indirect->discard) { | 2490 | if (indirect->discard) { |
2471 | radeon_cp_discard_buffer(dev, buf); | 2491 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2472 | } | 2492 | } |
2473 | 2493 | ||
2474 | COMMIT_RING(); | 2494 | COMMIT_RING(); |
@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil | |||
2478 | static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2498 | static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2479 | { | 2499 | { |
2480 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2500 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2481 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2501 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2502 | drm_radeon_sarea_t *sarea_priv; | ||
2482 | struct drm_device_dma *dma = dev->dma; | 2503 | struct drm_device_dma *dma = dev->dma; |
2483 | struct drm_buf *buf; | 2504 | struct drm_buf *buf; |
2484 | drm_radeon_vertex2_t *vertex = data; | 2505 | drm_radeon_vertex2_t *vertex = data; |
@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2487 | 2508 | ||
2488 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2509 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2489 | 2510 | ||
2511 | sarea_priv = master_priv->sarea_priv; | ||
2512 | |||
2490 | DRM_DEBUG("pid=%d index=%d discard=%d\n", | 2513 | DRM_DEBUG("pid=%d index=%d discard=%d\n", |
2491 | DRM_CURRENTPID, vertex->idx, vertex->discard); | 2514 | DRM_CURRENTPID, vertex->idx, vertex->discard); |
2492 | 2515 | ||
@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2547 | tclprim.offset = prim.numverts * 64; | 2570 | tclprim.offset = prim.numverts * 64; |
2548 | tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ | 2571 | tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ |
2549 | 2572 | ||
2550 | radeon_cp_dispatch_indices(dev, buf, &tclprim); | 2573 | radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim); |
2551 | } else { | 2574 | } else { |
2552 | tclprim.numverts = prim.numverts; | 2575 | tclprim.numverts = prim.numverts; |
2553 | tclprim.offset = 0; /* not used */ | 2576 | tclprim.offset = 0; /* not used */ |
2554 | 2577 | ||
2555 | radeon_cp_dispatch_vertex(dev, buf, &tclprim); | 2578 | radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim); |
2556 | } | 2579 | } |
2557 | 2580 | ||
2558 | if (sarea_priv->nbox == 1) | 2581 | if (sarea_priv->nbox == 1) |
@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2560 | } | 2583 | } |
2561 | 2584 | ||
2562 | if (vertex->discard) { | 2585 | if (vertex->discard) { |
2563 | radeon_cp_discard_buffer(dev, buf); | 2586 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2564 | } | 2587 | } |
2565 | 2588 | ||
2566 | COMMIT_RING(); | 2589 | COMMIT_RING(); |
@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file | |||
2909 | goto err; | 2932 | goto err; |
2910 | } | 2933 | } |
2911 | 2934 | ||
2912 | radeon_cp_discard_buffer(dev, buf); | 2935 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2913 | break; | 2936 | break; |
2914 | 2937 | ||
2915 | case RADEON_CMD_PACKET3: | 2938 | case RADEON_CMD_PACKET3: |
@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
3020 | */ | 3043 | */ |
3021 | case RADEON_PARAM_SAREA_HANDLE: | 3044 | case RADEON_PARAM_SAREA_HANDLE: |
3022 | /* The lock is the first dword in the sarea. */ | 3045 | /* The lock is the first dword in the sarea. */ |
3023 | value = (long)dev->lock.hw_lock; | 3046 | /* no users of this parameter */ |
3024 | break; | 3047 | break; |
3025 | #endif | 3048 | #endif |
3026 | case RADEON_PARAM_GART_TEX_HANDLE: | 3049 | case RADEON_PARAM_GART_TEX_HANDLE: |
@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
3064 | static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) | 3087 | static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) |
3065 | { | 3088 | { |
3066 | drm_radeon_private_t *dev_priv = dev->dev_private; | 3089 | drm_radeon_private_t *dev_priv = dev->dev_private; |
3090 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
3067 | drm_radeon_setparam_t *sp = data; | 3091 | drm_radeon_setparam_t *sp = data; |
3068 | struct drm_radeon_driver_file_fields *radeon_priv; | 3092 | struct drm_radeon_driver_file_fields *radeon_priv; |
3069 | 3093 | ||
@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil | |||
3078 | DRM_DEBUG("color tiling disabled\n"); | 3102 | DRM_DEBUG("color tiling disabled\n"); |
3079 | dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; | 3103 | dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; |
3080 | dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; | 3104 | dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; |
3081 | dev_priv->sarea_priv->tiling_enabled = 0; | 3105 | if (master_priv->sarea_priv) |
3106 | master_priv->sarea_priv->tiling_enabled = 0; | ||
3082 | } else if (sp->value == 1) { | 3107 | } else if (sp->value == 1) { |
3083 | DRM_DEBUG("color tiling enabled\n"); | 3108 | DRM_DEBUG("color tiling enabled\n"); |
3084 | dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; | 3109 | dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; |
3085 | dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; | 3110 | dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; |
3086 | dev_priv->sarea_priv->tiling_enabled = 1; | 3111 | if (master_priv->sarea_priv) |
3112 | master_priv->sarea_priv->tiling_enabled = 1; | ||
3087 | } | 3113 | } |
3088 | break; | 3114 | break; |
3089 | case RADEON_SETPARAM_PCIGART_LOCATION: | 3115 | case RADEON_SETPARAM_PCIGART_LOCATION: |
@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
3129 | 3155 | ||
3130 | void radeon_driver_lastclose(struct drm_device *dev) | 3156 | void radeon_driver_lastclose(struct drm_device *dev) |
3131 | { | 3157 | { |
3132 | if (dev->dev_private) { | ||
3133 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
3134 | |||
3135 | if (dev_priv->sarea_priv && | ||
3136 | dev_priv->sarea_priv->pfCurrentPage != 0) | ||
3137 | radeon_cp_dispatch_flip(dev); | ||
3138 | } | ||
3139 | |||
3140 | radeon_do_release(dev); | 3158 | radeon_do_release(dev); |
3141 | } | 3159 | } |
3142 | 3160 | ||
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index 665d319b927b..c248c1d37268 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c | |||
@@ -314,7 +314,6 @@ int via_driver_irq_postinstall(struct drm_device *dev) | |||
314 | if (!dev_priv) | 314 | if (!dev_priv) |
315 | return -EINVAL; | 315 | return -EINVAL; |
316 | 316 | ||
317 | drm_vblank_init(dev, 1); | ||
318 | status = VIA_READ(VIA_REG_INTERRUPT); | 317 | status = VIA_READ(VIA_REG_INTERRUPT); |
319 | VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL | 318 | VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL |
320 | | dev_priv->irq_enable_mask); | 319 | | dev_priv->irq_enable_mask); |
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index a967556be014..2c4f0b485792 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c | |||
@@ -107,8 +107,17 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) | |||
107 | ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); | 107 | ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); |
108 | if (ret) { | 108 | if (ret) { |
109 | drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); | 109 | drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); |
110 | return ret; | ||
110 | } | 111 | } |
111 | return ret; | 112 | |
113 | ret = drm_vblank_init(dev, 1); | ||
114 | if (ret) { | ||
115 | drm_sman_takedown(&dev_priv->sman); | ||
116 | drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); | ||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | return 0; | ||
112 | } | 121 | } |
113 | 122 | ||
114 | int via_driver_unload(struct drm_device *dev) | 123 | int via_driver_unload(struct drm_device *dev) |