aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-04 05:00:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-04 05:00:42 -0400
commitd7e57676e3ed7ab9b2c7c4bcb7873e51eacbdb84 (patch)
treef7433f38cd407a0c35a8cbf2b7e3fd756087bce7 /drivers/gpu/drm
parentfeaa0457ec8351cae855edc9a3052ac49322538e (diff)
parent746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff)
Merge branch 'linus' into x86/cleanups
Merge reason: We were on an older pre-rc1 base, move to almost-rc2. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c14
-rw-r--r--drivers/gpu/drm/drm_auth.c4
-rw-r--r--drivers/gpu/drm/drm_bufs.c140
-rw-r--r--drivers/gpu/drm/drm_context.c4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c9
-rw-r--r--drivers/gpu/drm/drm_dma.c31
-rw-r--r--drivers/gpu/drm/drm_drawable.c25
-rw-r--r--drivers/gpu/drm/drm_drv.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c100
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_hashtab.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c44
-rw-r--r--drivers/gpu/drm/drm_memory.c33
-rw-r--r--drivers/gpu/drm/drm_mm.c48
-rw-r--r--drivers/gpu/drm/drm_pci.c53
-rw-r--r--drivers/gpu/drm/drm_proc.c8
-rw-r--r--drivers/gpu/drm/drm_scatter.c33
-rw-r--r--drivers/gpu/drm/drm_sman.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/drm_vm.c12
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c6
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c25
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c105
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c69
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h29
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c34
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c18
-rw-r--r--drivers/gpu/drm/i915/intel_display.c219
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1153
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c272
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c35
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c344
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c72
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c64
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c14
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c12
-rw-r--r--drivers/gpu/drm/r128/r128_state.c84
-rw-r--r--drivers/gpu/drm/radeon/r100.c85
-rw-r--r--drivers/gpu/drm/radeon/r300.c478
-rw-r--r--drivers/gpu/drm/radeon/r300.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv515.c58
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c21
-rw-r--r--drivers/gpu/drm/savage/savage_state.c17
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/via/via_map.c8
91 files changed, 3529 insertions, 1051 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe415aef..39b393d38bb3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@ config DRM_I830
81 81
82config DRM_I915 82config DRM_I915
83 tristate "i915 driver" 83 tristate "i915 driver"
84 depends on AGP_INTEL
84 select FB_CFB_FILLRECT 85 select FB_CFB_FILLRECT
85 select FB_CFB_COPYAREA 86 select FB_CFB_COPYAREA
86 select FB_CFB_IMAGEBLIT 87 select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab08b7b8..fe23f29f7cba 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
16drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
17 17
18obj-$(CONFIG_DRM) += drm.o 18obj-$(CONFIG_DRM) += drm.o
19obj-$(CONFIG_DRM_TTM) += ttm/
19obj-$(CONFIG_DRM_TDFX) += tdfx/ 20obj-$(CONFIG_DRM_TDFX) += tdfx/
20obj-$(CONFIG_DRM_R128) += r128/ 21obj-$(CONFIG_DRM_R128) += r128/
21obj-$(CONFIG_DRM_RADEON)+= radeon/ 22obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/
26obj-$(CONFIG_DRM_SIS) += sis/ 27obj-$(CONFIG_DRM_SIS) += sis/
27obj-$(CONFIG_DRM_SAVAGE)+= savage/ 28obj-$(CONFIG_DRM_SAVAGE)+= savage/
28obj-$(CONFIG_DRM_VIA) +=via/ 29obj-$(CONFIG_DRM_VIA) +=via/
29obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 14796594e5d9..d68888fe3df9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -203,7 +203,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
203 203
204 if (!dev->agp || !dev->agp->acquired) 204 if (!dev->agp || !dev->agp->acquired)
205 return -EINVAL; 205 return -EINVAL;
206 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 206 if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
207 return -ENOMEM; 207 return -ENOMEM;
208 208
209 memset(entry, 0, sizeof(*entry)); 209 memset(entry, 0, sizeof(*entry));
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; 211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
212 type = (u32) request->type; 212 type = (u32) request->type;
213 if (!(memory = drm_alloc_agp(dev, pages, type))) { 213 if (!(memory = drm_alloc_agp(dev, pages, type))) {
214 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 214 kfree(entry);
215 return -ENOMEM; 215 return -ENOMEM;
216 } 216 }
217 217
@@ -369,7 +369,7 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
369 list_del(&entry->head); 369 list_del(&entry->head);
370 370
371 drm_free_agp(entry->memory, entry->pages); 371 drm_free_agp(entry->memory, entry->pages);
372 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 372 kfree(entry);
373 return 0; 373 return 0;
374} 374}
375EXPORT_SYMBOL(drm_agp_free); 375EXPORT_SYMBOL(drm_agp_free);
@@ -397,13 +397,13 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
397{ 397{
398 struct drm_agp_head *head = NULL; 398 struct drm_agp_head *head = NULL;
399 399
400 if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) 400 if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
401 return NULL; 401 return NULL;
402 memset((void *)head, 0, sizeof(*head)); 402 memset((void *)head, 0, sizeof(*head));
403 head->bridge = agp_find_bridge(dev->pdev); 403 head->bridge = agp_find_bridge(dev->pdev);
404 if (!head->bridge) { 404 if (!head->bridge) {
405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { 405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
406 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 406 kfree(head);
407 return NULL; 407 return NULL;
408 } 408 }
409 agp_copy_info(head->bridge, &head->agp_info); 409 agp_copy_info(head->bridge, &head->agp_info);
@@ -412,7 +412,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
412 agp_copy_info(head->bridge, &head->agp_info); 412 agp_copy_info(head->bridge, &head->agp_info);
413 } 413 }
414 if (head->agp_info.chipset == NOT_SUPPORTED) { 414 if (head->agp_info.chipset == NOT_SUPPORTED) {
415 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 415 kfree(head);
416 return NULL; 416 return NULL;
417 } 417 }
418 INIT_LIST_HEAD(&head->memory); 418 INIT_LIST_HEAD(&head->memory);
@@ -482,7 +482,7 @@ drm_agp_bind_pages(struct drm_device *dev,
482 } 482 }
483 483
484 for (i = 0; i < num_pages; i++) 484 for (i = 0; i < num_pages; i++)
485 mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); 485 mem->pages[i] = pages[i];
486 mem->page_count = num_pages; 486 mem->page_count = num_pages;
487 487
488 mem->is_flushed = true; 488 mem->is_flushed = true;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index ca7a9ef5007b..932b5aa96a67 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,7 +79,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
79 struct drm_device *dev = master->minor->dev; 79 struct drm_device *dev = master->minor->dev;
80 DRM_DEBUG("%d\n", magic); 80 DRM_DEBUG("%d\n", magic);
81 81
82 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); 82 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
83 if (!entry) 83 if (!entry)
84 return -ENOMEM; 84 return -ENOMEM;
85 memset(entry, 0, sizeof(*entry)); 85 memset(entry, 0, sizeof(*entry));
@@ -120,7 +120,7 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
120 list_del(&pt->head); 120 list_del(&pt->head);
121 mutex_unlock(&dev->struct_mutex); 121 mutex_unlock(&dev->struct_mutex);
122 122
123 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 123 kfree(pt);
124 124
125 return 0; 125 return 0;
126} 126}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 80a257554b30..6246e3f3dad7 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -151,7 +151,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
151 unsigned long user_token; 151 unsigned long user_token;
152 int ret; 152 int ret;
153 153
154 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); 154 map = kmalloc(sizeof(*map), GFP_KERNEL);
155 if (!map) 155 if (!map)
156 return -ENOMEM; 156 return -ENOMEM;
157 157
@@ -165,7 +165,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
165 * when processes fork. 165 * when processes fork.
166 */ 166 */
167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 168 kfree(map);
169 return -EINVAL; 169 return -EINVAL;
170 } 170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
@@ -179,7 +179,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
179 map->size = PAGE_ALIGN(map->size); 179 map->size = PAGE_ALIGN(map->size);
180 180
181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
182 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 kfree(map);
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 map->mtrr = -1; 185 map->mtrr = -1;
@@ -191,7 +191,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
191#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 191#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
192 if (map->offset + (map->size-1) < map->offset || 192 if (map->offset + (map->size-1) < map->offset ||
193 map->offset < virt_to_phys(high_memory)) { 193 map->offset < virt_to_phys(high_memory)) {
194 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 194 kfree(map);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197#endif 197#endif
@@ -212,7 +212,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
212 list->map->size = map->size; 212 list->map->size = map->size;
213 } 213 }
214 214
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 215 kfree(map);
216 *maplist = list; 216 *maplist = list;
217 return 0; 217 return 0;
218 } 218 }
@@ -227,7 +227,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
227 if (map->type == _DRM_REGISTERS) { 227 if (map->type == _DRM_REGISTERS) {
228 map->handle = ioremap(map->offset, map->size); 228 map->handle = ioremap(map->offset, map->size);
229 if (!map->handle) { 229 if (!map->handle) {
230 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 230 kfree(map);
231 return -ENOMEM; 231 return -ENOMEM;
232 } 232 }
233 } 233 }
@@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
243 list->map->size = map->size; 243 list->map->size = map->size;
244 } 244 }
245 245
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 246 kfree(map);
247 *maplist = list; 247 *maplist = list;
248 return 0; 248 return 0;
249 } 249 }
@@ -251,7 +251,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
251 DRM_DEBUG("%lu %d %p\n", 251 DRM_DEBUG("%lu %d %p\n",
252 map->size, drm_order(map->size), map->handle); 252 map->size, drm_order(map->size), map->handle);
253 if (!map->handle) { 253 if (!map->handle) {
254 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 254 kfree(map);
255 return -ENOMEM; 255 return -ENOMEM;
256 } 256 }
257 map->offset = (unsigned long)map->handle; 257 map->offset = (unsigned long)map->handle;
@@ -259,7 +259,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
259 /* Prevent a 2nd X Server from creating a 2nd lock */ 259 /* Prevent a 2nd X Server from creating a 2nd lock */
260 if (dev->primary->master->lock.hw_lock != NULL) { 260 if (dev->primary->master->lock.hw_lock != NULL) {
261 vfree(map->handle); 261 vfree(map->handle);
262 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 262 kfree(map);
263 return -EBUSY; 263 return -EBUSY;
264 } 264 }
265 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 265 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
@@ -270,7 +270,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
270 int valid = 0; 270 int valid = 0;
271 271
272 if (!drm_core_has_AGP(dev)) { 272 if (!drm_core_has_AGP(dev)) {
273 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 273 kfree(map);
274 return -EINVAL; 274 return -EINVAL;
275 } 275 }
276#ifdef __alpha__ 276#ifdef __alpha__
@@ -303,7 +303,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
303 } 303 }
304 } 304 }
305 if (!list_empty(&dev->agp->memory) && !valid) { 305 if (!list_empty(&dev->agp->memory) && !valid) {
306 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 306 kfree(map);
307 return -EPERM; 307 return -EPERM;
308 } 308 }
309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
@@ -316,7 +316,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
316 } 316 }
317 case _DRM_SCATTER_GATHER: 317 case _DRM_SCATTER_GATHER:
318 if (!dev->sg) { 318 if (!dev->sg) {
319 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 319 kfree(map);
320 return -EINVAL; 320 return -EINVAL;
321 } 321 }
322 map->offset += (unsigned long)dev->sg->virtual; 322 map->offset += (unsigned long)dev->sg->virtual;
@@ -328,7 +328,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
328 * need to point to a 64bit variable first. */ 328 * need to point to a 64bit variable first. */
329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
330 if (!dmah) { 330 if (!dmah) {
331 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 331 kfree(map);
332 return -ENOMEM; 332 return -ENOMEM;
333 } 333 }
334 map->handle = dmah->vaddr; 334 map->handle = dmah->vaddr;
@@ -336,15 +336,15 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
336 kfree(dmah); 336 kfree(dmah);
337 break; 337 break;
338 default: 338 default:
339 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 339 kfree(map);
340 return -EINVAL; 340 return -EINVAL;
341 } 341 }
342 342
343 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 343 list = kmalloc(sizeof(*list), GFP_KERNEL);
344 if (!list) { 344 if (!list) {
345 if (map->type == _DRM_REGISTERS) 345 if (map->type == _DRM_REGISTERS)
346 iounmap(map->handle); 346 iounmap(map->handle);
347 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 347 kfree(map);
348 return -EINVAL; 348 return -EINVAL;
349 } 349 }
350 memset(list, 0, sizeof(*list)); 350 memset(list, 0, sizeof(*list));
@@ -362,8 +362,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
362 if (ret) { 362 if (ret) {
363 if (map->type == _DRM_REGISTERS) 363 if (map->type == _DRM_REGISTERS)
364 iounmap(map->handle); 364 iounmap(map->handle);
365 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 365 kfree(map);
366 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 366 kfree(list);
367 mutex_unlock(&dev->struct_mutex); 367 mutex_unlock(&dev->struct_mutex);
368 return ret; 368 return ret;
369 } 369 }
@@ -448,7 +448,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
448 list_del(&r_list->head); 448 list_del(&r_list->head);
449 drm_ht_remove_key(&dev->map_hash, 449 drm_ht_remove_key(&dev->map_hash,
450 r_list->user_token >> PAGE_SHIFT); 450 r_list->user_token >> PAGE_SHIFT);
451 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); 451 kfree(r_list);
452 found = 1; 452 found = 1;
453 break; 453 break;
454 } 454 }
@@ -491,7 +491,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
491 DRM_ERROR("tried to rmmap GEM object\n"); 491 DRM_ERROR("tried to rmmap GEM object\n");
492 break; 492 break;
493 } 493 }
494 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 494 kfree(map);
495 495
496 return 0; 496 return 0;
497} 497}
@@ -582,24 +582,16 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
582 drm_pci_free(dev, entry->seglist[i]); 582 drm_pci_free(dev, entry->seglist[i]);
583 } 583 }
584 } 584 }
585 drm_free(entry->seglist, 585 kfree(entry->seglist);
586 entry->seg_count *
587 sizeof(*entry->seglist), DRM_MEM_SEGS);
588 586
589 entry->seg_count = 0; 587 entry->seg_count = 0;
590 } 588 }
591 589
592 if (entry->buf_count) { 590 if (entry->buf_count) {
593 for (i = 0; i < entry->buf_count; i++) { 591 for (i = 0; i < entry->buf_count; i++) {
594 if (entry->buflist[i].dev_private) { 592 kfree(entry->buflist[i].dev_private);
595 drm_free(entry->buflist[i].dev_private,
596 entry->buflist[i].dev_priv_size,
597 DRM_MEM_BUFS);
598 }
599 } 593 }
600 drm_free(entry->buflist, 594 kfree(entry->buflist);
601 entry->buf_count *
602 sizeof(*entry->buflist), DRM_MEM_BUFS);
603 595
604 entry->buf_count = 0; 596 entry->buf_count = 0;
605 } 597 }
@@ -698,8 +690,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
698 return -EINVAL; 690 return -EINVAL;
699 } 691 }
700 692
701 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 693 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
702 DRM_MEM_BUFS);
703 if (!entry->buflist) { 694 if (!entry->buflist) {
704 mutex_unlock(&dev->struct_mutex); 695 mutex_unlock(&dev->struct_mutex);
705 atomic_dec(&dev->buf_alloc); 696 atomic_dec(&dev->buf_alloc);
@@ -729,7 +720,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
729 buf->file_priv = NULL; 720 buf->file_priv = NULL;
730 721
731 buf->dev_priv_size = dev->driver->dev_priv_size; 722 buf->dev_priv_size = dev->driver->dev_priv_size;
732 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 723 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
733 if (!buf->dev_private) { 724 if (!buf->dev_private) {
734 /* Set count correctly so we free the proper amount. */ 725 /* Set count correctly so we free the proper amount. */
735 entry->buf_count = count; 726 entry->buf_count = count;
@@ -749,10 +740,9 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
749 740
750 DRM_DEBUG("byte_count: %d\n", byte_count); 741 DRM_DEBUG("byte_count: %d\n", byte_count);
751 742
752 temp_buflist = drm_realloc(dma->buflist, 743 temp_buflist = krealloc(dma->buflist,
753 dma->buf_count * sizeof(*dma->buflist), 744 (dma->buf_count + entry->buf_count) *
754 (dma->buf_count + entry->buf_count) 745 sizeof(*dma->buflist), GFP_KERNEL);
755 * sizeof(*dma->buflist), DRM_MEM_BUFS);
756 if (!temp_buflist) { 746 if (!temp_buflist) {
757 /* Free the entry because it isn't valid */ 747 /* Free the entry because it isn't valid */
758 drm_cleanup_buf_error(dev, entry); 748 drm_cleanup_buf_error(dev, entry);
@@ -854,8 +844,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
854 return -EINVAL; 844 return -EINVAL;
855 } 845 }
856 846
857 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 847 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
858 DRM_MEM_BUFS);
859 if (!entry->buflist) { 848 if (!entry->buflist) {
860 mutex_unlock(&dev->struct_mutex); 849 mutex_unlock(&dev->struct_mutex);
861 atomic_dec(&dev->buf_alloc); 850 atomic_dec(&dev->buf_alloc);
@@ -863,11 +852,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
863 } 852 }
864 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 853 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
865 854
866 entry->seglist = drm_alloc(count * sizeof(*entry->seglist), 855 entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
867 DRM_MEM_SEGS);
868 if (!entry->seglist) { 856 if (!entry->seglist) {
869 drm_free(entry->buflist, 857 kfree(entry->buflist);
870 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
871 mutex_unlock(&dev->struct_mutex); 858 mutex_unlock(&dev->struct_mutex);
872 atomic_dec(&dev->buf_alloc); 859 atomic_dec(&dev->buf_alloc);
873 return -ENOMEM; 860 return -ENOMEM;
@@ -877,13 +864,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
877 /* Keep the original pagelist until we know all the allocations 864 /* Keep the original pagelist until we know all the allocations
878 * have succeeded 865 * have succeeded
879 */ 866 */
880 temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) 867 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
881 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 868 sizeof(*dma->pagelist), GFP_KERNEL);
882 if (!temp_pagelist) { 869 if (!temp_pagelist) {
883 drm_free(entry->buflist, 870 kfree(entry->buflist);
884 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 871 kfree(entry->seglist);
885 drm_free(entry->seglist,
886 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
887 mutex_unlock(&dev->struct_mutex); 872 mutex_unlock(&dev->struct_mutex);
888 atomic_dec(&dev->buf_alloc); 873 atomic_dec(&dev->buf_alloc);
889 return -ENOMEM; 874 return -ENOMEM;
@@ -907,9 +892,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
907 entry->buf_count = count; 892 entry->buf_count = count;
908 entry->seg_count = count; 893 entry->seg_count = count;
909 drm_cleanup_buf_error(dev, entry); 894 drm_cleanup_buf_error(dev, entry);
910 drm_free(temp_pagelist, 895 kfree(temp_pagelist);
911 (dma->page_count + (count << page_order))
912 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
913 mutex_unlock(&dev->struct_mutex); 896 mutex_unlock(&dev->struct_mutex);
914 atomic_dec(&dev->buf_alloc); 897 atomic_dec(&dev->buf_alloc);
915 return -ENOMEM; 898 return -ENOMEM;
@@ -940,18 +923,14 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
940 buf->file_priv = NULL; 923 buf->file_priv = NULL;
941 924
942 buf->dev_priv_size = dev->driver->dev_priv_size; 925 buf->dev_priv_size = dev->driver->dev_priv_size;
943 buf->dev_private = drm_alloc(buf->dev_priv_size, 926 buf->dev_private = kmalloc(buf->dev_priv_size,
944 DRM_MEM_BUFS); 927 GFP_KERNEL);
945 if (!buf->dev_private) { 928 if (!buf->dev_private) {
946 /* Set count correctly so we free the proper amount. */ 929 /* Set count correctly so we free the proper amount. */
947 entry->buf_count = count; 930 entry->buf_count = count;
948 entry->seg_count = count; 931 entry->seg_count = count;
949 drm_cleanup_buf_error(dev, entry); 932 drm_cleanup_buf_error(dev, entry);
950 drm_free(temp_pagelist, 933 kfree(temp_pagelist);
951 (dma->page_count +
952 (count << page_order))
953 * sizeof(*dma->pagelist),
954 DRM_MEM_PAGES);
955 mutex_unlock(&dev->struct_mutex); 934 mutex_unlock(&dev->struct_mutex);
956 atomic_dec(&dev->buf_alloc); 935 atomic_dec(&dev->buf_alloc);
957 return -ENOMEM; 936 return -ENOMEM;
@@ -964,16 +943,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
964 byte_count += PAGE_SIZE << page_order; 943 byte_count += PAGE_SIZE << page_order;
965 } 944 }
966 945
967 temp_buflist = drm_realloc(dma->buflist, 946 temp_buflist = krealloc(dma->buflist,
968 dma->buf_count * sizeof(*dma->buflist), 947 (dma->buf_count + entry->buf_count) *
969 (dma->buf_count + entry->buf_count) 948 sizeof(*dma->buflist), GFP_KERNEL);
970 * sizeof(*dma->buflist), DRM_MEM_BUFS);
971 if (!temp_buflist) { 949 if (!temp_buflist) {
972 /* Free the entry because it isn't valid */ 950 /* Free the entry because it isn't valid */
973 drm_cleanup_buf_error(dev, entry); 951 drm_cleanup_buf_error(dev, entry);
974 drm_free(temp_pagelist, 952 kfree(temp_pagelist);
975 (dma->page_count + (count << page_order))
976 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
977 mutex_unlock(&dev->struct_mutex); 953 mutex_unlock(&dev->struct_mutex);
978 atomic_dec(&dev->buf_alloc); 954 atomic_dec(&dev->buf_alloc);
979 return -ENOMEM; 955 return -ENOMEM;
@@ -988,9 +964,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
988 * with the new one. 964 * with the new one.
989 */ 965 */
990 if (dma->page_count) { 966 if (dma->page_count) {
991 drm_free(dma->pagelist, 967 kfree(dma->pagelist);
992 dma->page_count * sizeof(*dma->pagelist),
993 DRM_MEM_PAGES);
994 } 968 }
995 dma->pagelist = temp_pagelist; 969 dma->pagelist = temp_pagelist;
996 970
@@ -1086,8 +1060,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1086 return -EINVAL; 1060 return -EINVAL;
1087 } 1061 }
1088 1062
1089 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1063 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1090 DRM_MEM_BUFS); 1064 GFP_KERNEL);
1091 if (!entry->buflist) { 1065 if (!entry->buflist) {
1092 mutex_unlock(&dev->struct_mutex); 1066 mutex_unlock(&dev->struct_mutex);
1093 atomic_dec(&dev->buf_alloc); 1067 atomic_dec(&dev->buf_alloc);
@@ -1118,7 +1092,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1118 buf->file_priv = NULL; 1092 buf->file_priv = NULL;
1119 1093
1120 buf->dev_priv_size = dev->driver->dev_priv_size; 1094 buf->dev_priv_size = dev->driver->dev_priv_size;
1121 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1095 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1122 if (!buf->dev_private) { 1096 if (!buf->dev_private) {
1123 /* Set count correctly so we free the proper amount. */ 1097 /* Set count correctly so we free the proper amount. */
1124 entry->buf_count = count; 1098 entry->buf_count = count;
@@ -1139,10 +1113,9 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1139 1113
1140 DRM_DEBUG("byte_count: %d\n", byte_count); 1114 DRM_DEBUG("byte_count: %d\n", byte_count);
1141 1115
1142 temp_buflist = drm_realloc(dma->buflist, 1116 temp_buflist = krealloc(dma->buflist,
1143 dma->buf_count * sizeof(*dma->buflist), 1117 (dma->buf_count + entry->buf_count) *
1144 (dma->buf_count + entry->buf_count) 1118 sizeof(*dma->buflist), GFP_KERNEL);
1145 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1146 if (!temp_buflist) { 1119 if (!temp_buflist) {
1147 /* Free the entry because it isn't valid */ 1120 /* Free the entry because it isn't valid */
1148 drm_cleanup_buf_error(dev, entry); 1121 drm_cleanup_buf_error(dev, entry);
@@ -1248,8 +1221,8 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1248 return -EINVAL; 1221 return -EINVAL;
1249 } 1222 }
1250 1223
1251 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1224 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1252 DRM_MEM_BUFS); 1225 GFP_KERNEL);
1253 if (!entry->buflist) { 1226 if (!entry->buflist) {
1254 mutex_unlock(&dev->struct_mutex); 1227 mutex_unlock(&dev->struct_mutex);
1255 atomic_dec(&dev->buf_alloc); 1228 atomic_dec(&dev->buf_alloc);
@@ -1279,7 +1252,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1279 buf->file_priv = NULL; 1252 buf->file_priv = NULL;
1280 1253
1281 buf->dev_priv_size = dev->driver->dev_priv_size; 1254 buf->dev_priv_size = dev->driver->dev_priv_size;
1282 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1255 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1283 if (!buf->dev_private) { 1256 if (!buf->dev_private) {
1284 /* Set count correctly so we free the proper amount. */ 1257 /* Set count correctly so we free the proper amount. */
1285 entry->buf_count = count; 1258 entry->buf_count = count;
@@ -1299,10 +1272,9 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1299 1272
1300 DRM_DEBUG("byte_count: %d\n", byte_count); 1273 DRM_DEBUG("byte_count: %d\n", byte_count);
1301 1274
1302 temp_buflist = drm_realloc(dma->buflist, 1275 temp_buflist = krealloc(dma->buflist,
1303 dma->buf_count * sizeof(*dma->buflist), 1276 (dma->buf_count + entry->buf_count) *
1304 (dma->buf_count + entry->buf_count) 1277 sizeof(*dma->buflist), GFP_KERNEL);
1305 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1306 if (!temp_buflist) { 1278 if (!temp_buflist) {
1307 /* Free the entry because it isn't valid */ 1279 /* Free the entry because it isn't valid */
1308 drm_cleanup_buf_error(dev, entry); 1280 drm_cleanup_buf_error(dev, entry);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 7d1e53c10d4b..2607753a320b 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -341,7 +341,7 @@ int drm_addctx(struct drm_device *dev, void *data,
341 } 341 }
342 } 342 }
343 343
344 ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); 344 ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
345 if (!ctx_entry) { 345 if (!ctx_entry) {
346 DRM_DEBUG("out of memory\n"); 346 DRM_DEBUG("out of memory\n");
347 return -ENOMEM; 347 return -ENOMEM;
@@ -456,7 +456,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
456 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 456 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
457 if (pos->handle == ctx->handle) { 457 if (pos->handle == ctx->handle) {
458 list_del(&pos->head); 458 list_del(&pos->head);
459 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 459 kfree(pos);
460 --dev->ctx_count; 460 --dev->ctx_count;
461 } 461 }
462 } 462 }
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6ce0e2667a85..2960b6d73456 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -100,15 +100,13 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
100 (dev->driver->driver_features & features) != features) 100 (dev->driver->driver_features & features) != features)
101 continue; 101 continue;
102 102
103 tmp = drm_alloc(sizeof(struct drm_info_node), 103 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 104 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops); 105 root, tmp, &drm_debugfs_fops);
107 if (!ent) { 106 if (!ent) {
108 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", 107 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
109 name, files[i].name); 108 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node), 109 kfree(tmp);
111 _DRM_DRIVER);
112 ret = -1; 110 ret = -1;
113 goto fail; 111 goto fail;
114 } 112 }
@@ -196,8 +194,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
196 if (tmp->info_ent == &files[i]) { 194 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent); 195 debugfs_remove(tmp->dent);
198 list_del(pos); 196 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node), 197 kfree(tmp);
200 _DRM_DRIVER);
201 } 198 }
202 } 199 }
203 } 200 }
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 7a8e2fba4678..13f1537413fb 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,7 +47,7 @@ int drm_dma_setup(struct drm_device *dev)
47{ 47{
48 int i; 48 int i;
49 49
50 dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); 50 dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
51 if (!dev->dma) 51 if (!dev->dma)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
@@ -88,36 +88,19 @@ void drm_dma_takedown(struct drm_device *dev)
88 drm_pci_free(dev, dma->bufs[i].seglist[j]); 88 drm_pci_free(dev, dma->bufs[i].seglist[j]);
89 } 89 }
90 } 90 }
91 drm_free(dma->bufs[i].seglist, 91 kfree(dma->bufs[i].seglist);
92 dma->bufs[i].seg_count
93 * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
94 } 92 }
95 if (dma->bufs[i].buf_count) { 93 if (dma->bufs[i].buf_count) {
96 for (j = 0; j < dma->bufs[i].buf_count; j++) { 94 for (j = 0; j < dma->bufs[i].buf_count; j++) {
97 if (dma->bufs[i].buflist[j].dev_private) { 95 kfree(dma->bufs[i].buflist[j].dev_private);
98 drm_free(dma->bufs[i].buflist[j].
99 dev_private,
100 dma->bufs[i].buflist[j].
101 dev_priv_size, DRM_MEM_BUFS);
102 }
103 } 96 }
104 drm_free(dma->bufs[i].buflist, 97 kfree(dma->bufs[i].buflist);
105 dma->bufs[i].buf_count *
106 sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
107 } 98 }
108 } 99 }
109 100
110 if (dma->buflist) { 101 kfree(dma->buflist);
111 drm_free(dma->buflist, 102 kfree(dma->pagelist);
112 dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); 103 kfree(dev->dma);
113 }
114
115 if (dma->pagelist) {
116 drm_free(dma->pagelist,
117 dma->page_count * sizeof(*dma->pagelist),
118 DRM_MEM_PAGES);
119 }
120 drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
121 dev->dma = NULL; 104 dev->dma = NULL;
122} 105}
123 106
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 80be1cab62af..c53c9768cc11 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -85,9 +85,8 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
85 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 85 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
86 return -EINVAL; 86 return -EINVAL;
87 } 87 }
88 drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect), 88 kfree(info->rects);
89 DRM_MEM_BUFS); 89 kfree(info);
90 drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
91 90
92 idr_remove(&dev->drw_idr, draw->handle); 91 idr_remove(&dev->drw_idr, draw->handle);
93 92
@@ -106,12 +105,12 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
106 105
107 info = idr_find(&dev->drw_idr, update->handle); 106 info = idr_find(&dev->drw_idr, update->handle);
108 if (!info) { 107 if (!info) {
109 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 108 info = kzalloc(sizeof(*info), GFP_KERNEL);
110 if (!info) 109 if (!info)
111 return -ENOMEM; 110 return -ENOMEM;
112 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { 111 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
113 DRM_ERROR("No such drawable %d\n", update->handle); 112 DRM_ERROR("No such drawable %d\n", update->handle);
114 drm_free(info, sizeof(*info), DRM_MEM_BUFS); 113 kfree(info);
115 return -EINVAL; 114 return -EINVAL;
116 } 115 }
117 } 116 }
@@ -121,8 +120,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
121 if (update->num == 0) 120 if (update->num == 0)
122 rects = NULL; 121 rects = NULL;
123 else if (update->num != info->num_rects) { 122 else if (update->num != info->num_rects) {
124 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 123 rects = kmalloc(update->num *
125 DRM_MEM_BUFS); 124 sizeof(struct drm_clip_rect),
125 GFP_KERNEL);
126 } else 126 } else
127 rects = info->rects; 127 rects = info->rects;
128 128
@@ -145,8 +145,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
145 spin_lock_irqsave(&dev->drw_lock, irqflags); 145 spin_lock_irqsave(&dev->drw_lock, irqflags);
146 146
147 if (rects != info->rects) { 147 if (rects != info->rects) {
148 drm_free(info->rects, info->num_rects * 148 kfree(info->rects);
149 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
150 } 149 }
151 150
152 info->rects = rects; 151 info->rects = rects;
@@ -166,8 +165,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
166 165
167error: 166error:
168 if (rects != info->rects) 167 if (rects != info->rects)
169 drm_free(rects, update->num * sizeof(struct drm_clip_rect), 168 kfree(rects);
170 DRM_MEM_BUFS);
171 169
172 return err; 170 return err;
173} 171}
@@ -186,9 +184,8 @@ static int drm_drawable_free(int idr, void *p, void *data)
186 struct drm_drawable_info *info = p; 184 struct drm_drawable_info *info = p;
187 185
188 if (info) { 186 if (info) {
189 drm_free(info->rects, info->num_rects * 187 kfree(info->rects);
190 sizeof(struct drm_clip_rect), DRM_MEM_BUFS); 188 kfree(info);
191 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
192 } 189 }
193 190
194 return 0; 191 return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 1bf7efd8d334..b39d7bfc0c9c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -189,7 +189,7 @@ int drm_lastclose(struct drm_device * dev)
189 if (entry->bound) 189 if (entry->bound)
190 drm_unbind_agp(entry->memory); 190 drm_unbind_agp(entry->memory);
191 drm_free_agp(entry->memory, entry->pages); 191 drm_free_agp(entry->memory, entry->pages);
192 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 192 kfree(entry);
193 } 193 }
194 INIT_LIST_HEAD(&dev->agp->memory); 194 INIT_LIST_HEAD(&dev->agp->memory);
195 195
@@ -208,21 +208,15 @@ int drm_lastclose(struct drm_device * dev)
208 /* Clear vma list (only built for debugging) */ 208 /* Clear vma list (only built for debugging) */
209 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 209 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
210 list_del(&vma->head); 210 list_del(&vma->head);
211 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); 211 kfree(vma);
212 } 212 }
213 213
214 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { 214 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
215 for (i = 0; i < dev->queue_count; i++) { 215 for (i = 0; i < dev->queue_count; i++) {
216 if (dev->queuelist[i]) { 216 kfree(dev->queuelist[i]);
217 drm_free(dev->queuelist[i], 217 dev->queuelist[i] = NULL;
218 sizeof(*dev->queuelist[0]),
219 DRM_MEM_QUEUES);
220 dev->queuelist[i] = NULL;
221 }
222 } 218 }
223 drm_free(dev->queuelist, 219 kfree(dev->queuelist);
224 dev->queue_slots * sizeof(*dev->queuelist),
225 DRM_MEM_QUEUES);
226 dev->queuelist = NULL; 220 dev->queuelist = NULL;
227 } 221 }
228 dev->queue_count = 0; 222 dev->queue_count = 0;
@@ -344,8 +338,6 @@ static int __init drm_core_init(void)
344 goto err_p3; 338 goto err_p3;
345 } 339 }
346 340
347 drm_mem_init();
348
349 DRM_INFO("Initialized %s %d.%d.%d %s\n", 341 DRM_INFO("Initialized %s %d.%d.%d %s\n",
350 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 342 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
351 return 0; 343 return 0;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 801a0d0e0810..80cc6d06d61b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -252,16 +252,18 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
252{ 252{
253 struct drm_display_mode *mode; 253 struct drm_display_mode *mode;
254 int hsize = t->hsize * 8 + 248, vsize; 254 int hsize = t->hsize * 8 + 248, vsize;
255 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
256 >> EDID_TIMING_ASPECT_SHIFT;
255 257
256 mode = drm_mode_create(dev); 258 mode = drm_mode_create(dev);
257 if (!mode) 259 if (!mode)
258 return NULL; 260 return NULL;
259 261
260 if (t->aspect_ratio == 0) 262 if (aspect_ratio == 0)
261 vsize = (hsize * 10) / 16; 263 vsize = (hsize * 10) / 16;
262 else if (t->aspect_ratio == 1) 264 else if (aspect_ratio == 1)
263 vsize = (hsize * 3) / 4; 265 vsize = (hsize * 3) / 4;
264 else if (t->aspect_ratio == 2) 266 else if (aspect_ratio == 2)
265 vsize = (hsize * 4) / 5; 267 vsize = (hsize * 4) / 5;
266 else 268 else
267 vsize = (hsize * 9) / 16; 269 vsize = (hsize * 9) / 16;
@@ -288,17 +290,24 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
288{ 290{
289 struct drm_display_mode *mode; 291 struct drm_display_mode *mode;
290 struct detailed_pixel_timing *pt = &timing->data.pixel_data; 292 struct detailed_pixel_timing *pt = &timing->data.pixel_data;
293 unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
294 unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
295 unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
296 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
297 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
298 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
299 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
300 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
291 301
292 /* ignore tiny modes */ 302 /* ignore tiny modes */
293 if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 || 303 if (hactive < 64 || vactive < 64)
294 ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
295 return NULL; 304 return NULL;
296 305
297 if (pt->stereo) { 306 if (pt->misc & DRM_EDID_PT_STEREO) {
298 printk(KERN_WARNING "stereo mode not supported\n"); 307 printk(KERN_WARNING "stereo mode not supported\n");
299 return NULL; 308 return NULL;
300 } 309 }
301 if (!pt->separate_sync) { 310 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
302 printk(KERN_WARNING "integrated sync not supported\n"); 311 printk(KERN_WARNING "integrated sync not supported\n");
303 return NULL; 312 return NULL;
304 } 313 }
@@ -310,41 +319,36 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
310 mode->type = DRM_MODE_TYPE_DRIVER; 319 mode->type = DRM_MODE_TYPE_DRIVER;
311 320
312 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) 321 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
313 timing->pixel_clock = 1088; 322 timing->pixel_clock = cpu_to_le16(1088);
314 323
315 mode->clock = timing->pixel_clock * 10; 324 mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
316 325
317 mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo; 326 mode->hdisplay = hactive;
318 mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) | 327 mode->hsync_start = mode->hdisplay + hsync_offset;
319 pt->hsync_offset_lo); 328 mode->hsync_end = mode->hsync_start + hsync_pulse_width;
320 mode->hsync_end = mode->hsync_start + 329 mode->htotal = mode->hdisplay + hblank;
321 ((pt->hsync_pulse_width_hi << 8) | 330
322 pt->hsync_pulse_width_lo); 331 mode->vdisplay = vactive;
323 mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); 332 mode->vsync_start = mode->vdisplay + vsync_offset;
324 333 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
325 mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; 334 mode->vtotal = mode->vdisplay + vblank;
326 mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) |
327 pt->vsync_offset_lo);
328 mode->vsync_end = mode->vsync_start +
329 ((pt->vsync_pulse_width_hi << 4) |
330 pt->vsync_pulse_width_lo);
331 mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
332 335
333 drm_mode_set_name(mode); 336 drm_mode_set_name(mode);
334 337
335 if (pt->interlaced) 338 if (pt->misc & DRM_EDID_PT_INTERLACED)
336 mode->flags |= DRM_MODE_FLAG_INTERLACE; 339 mode->flags |= DRM_MODE_FLAG_INTERLACE;
337 340
338 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 341 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
339 pt->hsync_positive = 1; 342 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
340 pt->vsync_positive = 1;
341 } 343 }
342 344
343 mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; 345 mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
344 mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; 346 DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
347 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
348 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
345 349
346 mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8); 350 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
347 mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8); 351 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
348 352
349 if (quirks & EDID_QUIRK_DETAILED_IN_CM) { 353 if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
350 mode->width_mm *= 10; 354 mode->width_mm *= 10;
@@ -465,7 +469,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
465 struct drm_display_mode *newmode; 469 struct drm_display_mode *newmode;
466 470
467 /* If std timings bytes are 1, 1 it's empty */ 471 /* If std timings bytes are 1, 1 it's empty */
468 if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1) 472 if (t->hsize == 1 && t->vfreq_aspect == 1)
469 continue; 473 continue;
470 474
471 newmode = drm_mode_std(dev, &edid->standard_timings[i]); 475 newmode = drm_mode_std(dev, &edid->standard_timings[i]);
@@ -509,7 +513,7 @@ static int add_detailed_info(struct drm_connector *connector,
509 continue; 513 continue;
510 514
511 /* First detailed mode is preferred */ 515 /* First detailed mode is preferred */
512 if (i == 0 && edid->preferred_timing) 516 if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
513 newmode->type |= DRM_MODE_TYPE_PREFERRED; 517 newmode->type |= DRM_MODE_TYPE_PREFERRED;
514 drm_mode_probed_add(connector, newmode); 518 drm_mode_probed_add(connector, newmode);
515 519
@@ -767,22 +771,22 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
767 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 771 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
768 edid_fixup_preferred(connector, quirks); 772 edid_fixup_preferred(connector, quirks);
769 773
770 connector->display_info.serration_vsync = edid->serration_vsync; 774 connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
771 connector->display_info.sync_on_green = edid->sync_on_green; 775 connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
772 connector->display_info.composite_sync = edid->composite_sync; 776 connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
773 connector->display_info.separate_syncs = edid->separate_syncs; 777 connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
774 connector->display_info.blank_to_black = edid->blank_to_black; 778 connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
775 connector->display_info.video_level = edid->video_level; 779 connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
776 connector->display_info.digital = edid->digital; 780 connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
777 connector->display_info.width_mm = edid->width_cm * 10; 781 connector->display_info.width_mm = edid->width_cm * 10;
778 connector->display_info.height_mm = edid->height_cm * 10; 782 connector->display_info.height_mm = edid->height_cm * 10;
779 connector->display_info.gamma = edid->gamma; 783 connector->display_info.gamma = edid->gamma;
780 connector->display_info.gtf_supported = edid->default_gtf; 784 connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
781 connector->display_info.standard_color = edid->standard_color; 785 connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
782 connector->display_info.display_type = edid->display_type; 786 connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
783 connector->display_info.active_off_supported = edid->pm_active_off; 787 connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
784 connector->display_info.suspend_supported = edid->pm_suspend; 788 connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
785 connector->display_info.standby_supported = edid->pm_standby; 789 connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
786 connector->display_info.gamma = edid->gamma; 790 connector->display_info.gamma = edid->gamma;
787 791
788 return num_modes; 792 return num_modes;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 09a3571c9908..251bc0e3b5ec 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -240,7 +240,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
240 240
241 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 241 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
242 242
243 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 243 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
244 if (!priv) 244 if (!priv)
245 return -ENOMEM; 245 return -ENOMEM;
246 246
@@ -328,7 +328,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
328 328
329 return 0; 329 return 0;
330 out_free: 330 out_free:
331 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 331 kfree(priv);
332 filp->private_data = NULL; 332 filp->private_data = NULL;
333 return ret; 333 return ret;
334} 334}
@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
471 drm_ctxbitmap_free(dev, pos->handle); 471 drm_ctxbitmap_free(dev, pos->handle);
472 472
473 list_del(&pos->head); 473 list_del(&pos->head);
474 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 474 kfree(pos);
475 --dev->ctx_count; 475 --dev->ctx_count;
476 } 476 }
477 } 477 }
@@ -516,7 +516,7 @@ int drm_release(struct inode *inode, struct file *filp)
516 516
517 if (dev->driver->postclose) 517 if (dev->driver->postclose)
518 dev->driver->postclose(dev, file_priv); 518 dev->driver->postclose(dev, file_priv);
519 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); 519 kfree(file_priv);
520 520
521 /* ======================================================== 521 /* ========================================================
522 * End inline drm_release 522 * End inline drm_release
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ec43005100d9..8104ecaea26f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -89,7 +89,7 @@ drm_gem_init(struct drm_device *dev)
89 atomic_set(&dev->gtt_count, 0); 89 atomic_set(&dev->gtt_count, 0);
90 atomic_set(&dev->gtt_memory, 0); 90 atomic_set(&dev->gtt_memory, 0);
91 91
92 mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); 92 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
93 if (!mm) { 93 if (!mm) {
94 DRM_ERROR("out of memory\n"); 94 DRM_ERROR("out of memory\n");
95 return -ENOMEM; 95 return -ENOMEM;
@@ -98,14 +98,14 @@ drm_gem_init(struct drm_device *dev)
98 dev->mm_private = mm; 98 dev->mm_private = mm;
99 99
100 if (drm_ht_create(&mm->offset_hash, 19)) { 100 if (drm_ht_create(&mm->offset_hash, 19)) {
101 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 101 kfree(mm);
102 return -ENOMEM; 102 return -ENOMEM;
103 } 103 }
104 104
105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 DRM_FILE_PAGE_OFFSET_SIZE)) { 106 DRM_FILE_PAGE_OFFSET_SIZE)) {
107 drm_ht_remove(&mm->offset_hash); 107 drm_ht_remove(&mm->offset_hash);
108 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 108 kfree(mm);
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 111
@@ -119,7 +119,7 @@ drm_gem_destroy(struct drm_device *dev)
119 119
120 drm_mm_takedown(&mm->offset_manager); 120 drm_mm_takedown(&mm->offset_manager);
121 drm_ht_remove(&mm->offset_hash); 121 drm_ht_remove(&mm->offset_hash);
122 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 122 kfree(mm);
123 dev->mm_private = NULL; 123 dev->mm_private = NULL;
124} 124}
125 125
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index ac35145c3e20..f36b21c5b2e1 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -46,8 +46,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
46 ht->table = NULL; 46 ht->table = NULL;
47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); 47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
48 if (!ht->use_vmalloc) { 48 if (!ht->use_vmalloc) {
49 ht->table = drm_calloc(ht->size, sizeof(*ht->table), 49 ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
50 DRM_MEM_HASHTAB);
51 } 50 }
52 if (!ht->table) { 51 if (!ht->table) {
53 ht->use_vmalloc = 1; 52 ht->use_vmalloc = 1;
@@ -200,8 +199,7 @@ void drm_ht_remove(struct drm_open_hash *ht)
200 if (ht->use_vmalloc) 199 if (ht->use_vmalloc)
201 vfree(ht->table); 200 vfree(ht->table);
202 else 201 else
203 drm_free(ht->table, ht->size * sizeof(*ht->table), 202 kfree(ht->table);
204 DRM_MEM_HASHTAB);
205 ht->table = NULL; 203 ht->table = NULL;
206 } 204 }
207} 205}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 1fad76289e66..9b9ff46c2378 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -93,7 +93,7 @@ int drm_setunique(struct drm_device *dev, void *data,
93 93
94 master->unique_len = u->unique_len; 94 master->unique_len = u->unique_len;
95 master->unique_size = u->unique_len + 1; 95 master->unique_size = u->unique_len + 1;
96 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 96 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
97 if (!master->unique) 97 if (!master->unique)
98 return -ENOMEM; 98 return -ENOMEM;
99 if (copy_from_user(master->unique, u->unique, master->unique_len)) 99 if (copy_from_user(master->unique, u->unique, master->unique_len))
@@ -101,9 +101,8 @@ int drm_setunique(struct drm_device *dev, void *data,
101 101
102 master->unique[master->unique_len] = '\0'; 102 master->unique[master->unique_len] = '\0';
103 103
104 dev->devname = 104 dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
105 drm_alloc(strlen(dev->driver->pci_driver.name) + 105 strlen(master->unique) + 2, GFP_KERNEL);
106 strlen(master->unique) + 2, DRM_MEM_DRIVER);
107 if (!dev->devname) 106 if (!dev->devname)
108 return -ENOMEM; 107 return -ENOMEM;
109 108
@@ -138,7 +137,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
138 137
139 master->unique_len = 40; 138 master->unique_len = 40;
140 master->unique_size = master->unique_len; 139 master->unique_size = master->unique_len;
141 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 140 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
142 if (master->unique == NULL) 141 if (master->unique == NULL)
143 return -ENOMEM; 142 return -ENOMEM;
144 143
@@ -152,9 +151,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
152 else 151 else
153 master->unique_len = len; 152 master->unique_len = len;
154 153
155 dev->devname = 154 dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
156 drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + 155 master->unique_len + 2, GFP_KERNEL);
157 2, DRM_MEM_DRIVER);
158 if (dev->devname == NULL) 156 if (dev->devname == NULL)
159 return -ENOMEM; 157 return -ENOMEM;
160 158
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index fc8e5acd9d9a..b4a3dbcebe9b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -104,21 +104,13 @@ void drm_vblank_cleanup(struct drm_device *dev)
104 104
105 vblank_disable_fn((unsigned long)dev); 105 vblank_disable_fn((unsigned long)dev);
106 106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 107 kfree(dev->vbl_queue);
108 DRM_MEM_DRIVER); 108 kfree(dev->_vblank_count);
109 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 109 kfree(dev->vblank_refcount);
110 dev->num_crtcs, DRM_MEM_DRIVER); 110 kfree(dev->vblank_enabled);
111 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 111 kfree(dev->last_vblank);
112 dev->num_crtcs, DRM_MEM_DRIVER); 112 kfree(dev->last_vblank_wait);
113 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * 113 kfree(dev->vblank_inmodeset);
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
116 DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank_wait,
118 sizeof(*dev->last_vblank_wait) * dev->num_crtcs,
119 DRM_MEM_DRIVER);
120 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
121 dev->num_crtcs, DRM_MEM_DRIVER);
122 114
123 dev->num_crtcs = 0; 115 dev->num_crtcs = 0;
124} 116}
@@ -132,37 +124,33 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 spin_lock_init(&dev->vbl_lock); 124 spin_lock_init(&dev->vbl_lock);
133 dev->num_crtcs = num_crtcs; 125 dev->num_crtcs = num_crtcs;
134 126
135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 127 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
136 DRM_MEM_DRIVER); 128 GFP_KERNEL);
137 if (!dev->vbl_queue) 129 if (!dev->vbl_queue)
138 goto err; 130 goto err;
139 131
140 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 132 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
141 DRM_MEM_DRIVER);
142 if (!dev->_vblank_count) 133 if (!dev->_vblank_count)
143 goto err; 134 goto err;
144 135
145 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, 136 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
146 DRM_MEM_DRIVER); 137 GFP_KERNEL);
147 if (!dev->vblank_refcount) 138 if (!dev->vblank_refcount)
148 goto err; 139 goto err;
149 140
150 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), 141 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
151 DRM_MEM_DRIVER);
152 if (!dev->vblank_enabled) 142 if (!dev->vblank_enabled)
153 goto err; 143 goto err;
154 144
155 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 145 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
156 if (!dev->last_vblank) 146 if (!dev->last_vblank)
157 goto err; 147 goto err;
158 148
159 dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32), 149 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
160 DRM_MEM_DRIVER);
161 if (!dev->last_vblank_wait) 150 if (!dev->last_vblank_wait)
162 goto err; 151 goto err;
163 152
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), 153 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
165 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset) 154 if (!dev->vblank_inmodeset)
167 goto err; 155 goto err;
168 156
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0c707f533eab..e4865f99989c 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -36,15 +36,6 @@
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39#ifdef DEBUG_MEMORY
40#include "drm_memory_debug.h"
41#else
42
43/** No-op. */
44void drm_mem_init(void)
45{
46}
47
48/** 39/**
49 * Called when "/proc/dri/%dev%/mem" is read. 40 * Called when "/proc/dri/%dev%/mem" is read.
50 * 41 *
@@ -64,28 +55,15 @@ int drm_mem_info(char *buf, char **start, off_t offset,
64 return 0; 55 return 0;
65} 56}
66 57
67/** Wrapper around kmalloc() and kfree() */
68void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
69{
70 void *pt;
71
72 if (!(pt = kmalloc(size, GFP_KERNEL)))
73 return NULL;
74 if (oldpt && oldsize) {
75 memcpy(pt, oldpt, oldsize);
76 kfree(oldpt);
77 }
78 return pt;
79}
80
81#if __OS_HAS_AGP 58#if __OS_HAS_AGP
82static void *agp_remap(unsigned long offset, unsigned long size, 59static void *agp_remap(unsigned long offset, unsigned long size,
83 struct drm_device * dev) 60 struct drm_device * dev)
84{ 61{
85 unsigned long *phys_addr_map, i, num_pages = 62 unsigned long i, num_pages =
86 PAGE_ALIGN(size) / PAGE_SIZE; 63 PAGE_ALIGN(size) / PAGE_SIZE;
87 struct drm_agp_mem *agpmem; 64 struct drm_agp_mem *agpmem;
88 struct page **page_map; 65 struct page **page_map;
66 struct page **phys_page_map;
89 void *addr; 67 void *addr;
90 68
91 size = PAGE_ALIGN(size); 69 size = PAGE_ALIGN(size);
@@ -112,10 +90,9 @@ static void *agp_remap(unsigned long offset, unsigned long size,
112 if (!page_map) 90 if (!page_map)
113 return NULL; 91 return NULL;
114 92
115 phys_addr_map = 93 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
116 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
117 for (i = 0; i < num_pages; ++i) 94 for (i = 0; i < num_pages; ++i)
118 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); 95 page_map[i] = phys_page_map[i];
119 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); 96 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
120 vfree(page_map); 97 vfree(page_map);
121 98
@@ -157,8 +134,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
157 134
158#endif /* agp */ 135#endif /* agp */
159 136
160#endif /* debug_memory */
161
162void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) 137void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
163{ 138{
164 if (drm_core_has_AGP(dev) && 139 if (drm_core_has_AGP(dev) &&
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a912a0ff11cc..3e47869d6dae 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -187,9 +187,10 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
187} 187}
188 188
189 189
190 190struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, 191 unsigned long size,
192 unsigned long size, unsigned alignment) 192 unsigned alignment,
193 int atomic)
193{ 194{
194 195
195 struct drm_mm_node *align_splitoff = NULL; 196 struct drm_mm_node *align_splitoff = NULL;
@@ -200,7 +201,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
200 201
201 if (tmp) { 202 if (tmp) {
202 align_splitoff = 203 align_splitoff =
203 drm_mm_split_at_start(node, alignment - tmp, 0); 204 drm_mm_split_at_start(node, alignment - tmp, atomic);
204 if (unlikely(align_splitoff == NULL)) 205 if (unlikely(align_splitoff == NULL))
205 return NULL; 206 return NULL;
206 } 207 }
@@ -209,7 +210,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
209 list_del_init(&node->fl_entry); 210 list_del_init(&node->fl_entry);
210 node->free = 0; 211 node->free = 0;
211 } else { 212 } else {
212 node = drm_mm_split_at_start(node, size, 0); 213 node = drm_mm_split_at_start(node, size, atomic);
213 } 214 }
214 215
215 if (align_splitoff) 216 if (align_splitoff)
@@ -217,42 +218,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
217 218
218 return node; 219 return node;
219} 220}
220 221EXPORT_SYMBOL(drm_mm_get_block_generic);
221EXPORT_SYMBOL(drm_mm_get_block);
222
223struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
224 unsigned long size,
225 unsigned alignment)
226{
227
228 struct drm_mm_node *align_splitoff = NULL;
229 struct drm_mm_node *child;
230 unsigned tmp = 0;
231
232 if (alignment)
233 tmp = parent->start % alignment;
234
235 if (tmp) {
236 align_splitoff =
237 drm_mm_split_at_start(parent, alignment - tmp, 1);
238 if (unlikely(align_splitoff == NULL))
239 return NULL;
240 }
241
242 if (parent->size == size) {
243 list_del_init(&parent->fl_entry);
244 parent->free = 0;
245 return parent;
246 } else {
247 child = drm_mm_split_at_start(parent, size, 1);
248 }
249
250 if (align_splitoff)
251 drm_mm_put_block(align_splitoff);
252
253 return child;
254}
255EXPORT_SYMBOL(drm_mm_get_block_atomic);
256 222
257/* 223/*
258 * Put a block. Merge with the previous and / or next block if they are free. 224 * Put a block. Merge with the previous and / or next block if they are free.
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b55d5bc6ea61..577094fb1995 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -55,17 +55,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
55 unsigned long addr; 55 unsigned long addr;
56 size_t sz; 56 size_t sz;
57#endif 57#endif
58#ifdef DRM_DEBUG_MEMORY
59 int area = DRM_MEM_DMA;
60
61 spin_lock(&drm_mem_lock);
62 if ((drm_ram_used >> PAGE_SHIFT)
63 > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
64 spin_unlock(&drm_mem_lock);
65 return 0;
66 }
67 spin_unlock(&drm_mem_lock);
68#endif
69 58
70 /* pci_alloc_consistent only guarantees alignment to the smallest 59 /* pci_alloc_consistent only guarantees alignment to the smallest
71 * PAGE_SIZE order which is greater than or equal to the requested size. 60 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -86,26 +75,10 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
86 dmah->size = size; 75 dmah->size = size;
87 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 76 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
88 77
89#ifdef DRM_DEBUG_MEMORY
90 if (dmah->vaddr == NULL) {
91 spin_lock(&drm_mem_lock);
92 ++drm_mem_stats[area].fail_count;
93 spin_unlock(&drm_mem_lock);
94 kfree(dmah);
95 return NULL;
96 }
97
98 spin_lock(&drm_mem_lock);
99 ++drm_mem_stats[area].succeed_count;
100 drm_mem_stats[area].bytes_allocated += size;
101 drm_ram_used += size;
102 spin_unlock(&drm_mem_lock);
103#else
104 if (dmah->vaddr == NULL) { 78 if (dmah->vaddr == NULL) {
105 kfree(dmah); 79 kfree(dmah);
106 return NULL; 80 return NULL;
107 } 81 }
108#endif
109 82
110 memset(dmah->vaddr, 0, size); 83 memset(dmah->vaddr, 0, size);
111 84
@@ -132,17 +105,8 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
132 unsigned long addr; 105 unsigned long addr;
133 size_t sz; 106 size_t sz;
134#endif 107#endif
135#ifdef DRM_DEBUG_MEMORY
136 int area = DRM_MEM_DMA;
137 int alloc_count;
138 int free_count;
139#endif
140 108
141 if (!dmah->vaddr) { 109 if (dmah->vaddr) {
142#ifdef DRM_DEBUG_MEMORY
143 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
144#endif
145 } else {
146 /* XXX - Is virt_to_page() legal for consistent mem? */ 110 /* XXX - Is virt_to_page() legal for consistent mem? */
147 /* Unreserve */ 111 /* Unreserve */
148 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 112 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
@@ -152,21 +116,6 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
152 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 116 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
153 dmah->busaddr); 117 dmah->busaddr);
154 } 118 }
155
156#ifdef DRM_DEBUG_MEMORY
157 spin_lock(&drm_mem_lock);
158 free_count = ++drm_mem_stats[area].free_count;
159 alloc_count = drm_mem_stats[area].succeed_count;
160 drm_mem_stats[area].bytes_freed += size;
161 drm_ram_used -= size;
162 spin_unlock(&drm_mem_lock);
163 if (free_count > alloc_count) {
164 DRM_MEM_ERROR(area,
165 "Excess frees: %d frees, %d allocs\n",
166 free_count, alloc_count);
167 }
168#endif
169
170} 119}
171 120
172/** 121/**
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index bae5391165ac..bbd4b3d1074a 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -105,13 +105,12 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
105 (dev->driver->driver_features & features) != features) 105 (dev->driver->driver_features & features) != features)
106 continue; 106 continue;
107 107
108 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); 108 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
109 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); 109 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
110 if (!ent) { 110 if (!ent) {
111 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 111 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
112 name, files[i].name); 112 name, files[i].name);
113 drm_free(tmp, sizeof(struct drm_info_node), 113 kfree(tmp);
114 _DRM_DRIVER);
115 ret = -1; 114 ret = -1;
116 goto fail; 115 goto fail;
117 } 116 }
@@ -192,8 +191,7 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
192 remove_proc_entry(files[i].name, 191 remove_proc_entry(files[i].name,
193 minor->proc_root); 192 minor->proc_root);
194 list_del(pos); 193 list_del(pos);
195 drm_free(tmp, sizeof(struct drm_info_node), 194 kfree(tmp);
196 _DRM_DRIVER);
197 } 195 }
198 } 196 }
199 } 197 }
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index b2b0f3d41714..c7823c863d4f 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -58,11 +58,9 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
58 58
59 vfree(entry->virtual); 59 vfree(entry->virtual);
60 60
61 drm_free(entry->busaddr, 61 kfree(entry->busaddr);
62 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 62 kfree(entry->pagelist);
63 drm_free(entry->pagelist, 63 kfree(entry);
64 entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
65 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
66} 64}
67 65
68#ifdef _LP64 66#ifdef _LP64
@@ -84,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
84 if (dev->sg) 82 if (dev->sg)
85 return -EINVAL; 83 return -EINVAL;
86 84
87 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 85 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
88 if (!entry) 86 if (!entry)
89 return -ENOMEM; 87 return -ENOMEM;
90 88
@@ -93,34 +91,27 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
93 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); 91 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
94 92
95 entry->pages = pages; 93 entry->pages = pages;
96 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), 94 entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
97 DRM_MEM_PAGES);
98 if (!entry->pagelist) { 95 if (!entry->pagelist) {
99 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 96 kfree(entry);
100 return -ENOMEM; 97 return -ENOMEM;
101 } 98 }
102 99
103 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); 100 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
104 101
105 entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), 102 entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
106 DRM_MEM_PAGES);
107 if (!entry->busaddr) { 103 if (!entry->busaddr) {
108 drm_free(entry->pagelist, 104 kfree(entry->pagelist);
109 entry->pages * sizeof(*entry->pagelist), 105 kfree(entry);
110 DRM_MEM_PAGES);
111 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
112 return -ENOMEM; 106 return -ENOMEM;
113 } 107 }
114 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); 108 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
115 109
116 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); 110 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
117 if (!entry->virtual) { 111 if (!entry->virtual) {
118 drm_free(entry->busaddr, 112 kfree(entry->busaddr);
119 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 113 kfree(entry->pagelist);
120 drm_free(entry->pagelist, 114 kfree(entry);
121 entry->pages * sizeof(*entry->pagelist),
122 DRM_MEM_PAGES);
123 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
124 return -ENOMEM; 115 return -ENOMEM;
125 } 116 }
126 117
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 926f146390ce..463aed9403db 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -48,9 +48,7 @@ void drm_sman_takedown(struct drm_sman * sman)
48{ 48{
49 drm_ht_remove(&sman->user_hash_tab); 49 drm_ht_remove(&sman->user_hash_tab);
50 drm_ht_remove(&sman->owner_hash_tab); 50 drm_ht_remove(&sman->owner_hash_tab);
51 if (sman->mm) 51 kfree(sman->mm);
52 drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
53 DRM_MEM_MM);
54} 52}
55 53
56EXPORT_SYMBOL(drm_sman_takedown); 54EXPORT_SYMBOL(drm_sman_takedown);
@@ -61,8 +59,9 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
61{ 59{
62 int ret = 0; 60 int ret = 0;
63 61
64 sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), 62 sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
65 DRM_MEM_MM); 63 sizeof(*sman->mm),
64 GFP_KERNEL);
66 if (!sman->mm) { 65 if (!sman->mm) {
67 ret = -ENOMEM; 66 ret = -ENOMEM;
68 goto out; 67 goto out;
@@ -78,7 +77,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
78 77
79 drm_ht_remove(&sman->owner_hash_tab); 78 drm_ht_remove(&sman->owner_hash_tab);
80out1: 79out1:
81 drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); 80 kfree(sman->mm);
82out: 81out:
83 return ret; 82 return ret;
84} 83}
@@ -110,7 +109,7 @@ static void drm_sman_mm_destroy(void *private)
110{ 109{
111 struct drm_mm *mm = (struct drm_mm *) private; 110 struct drm_mm *mm = (struct drm_mm *) private;
112 drm_mm_takedown(mm); 111 drm_mm_takedown(mm);
113 drm_free(mm, sizeof(*mm), DRM_MEM_MM); 112 kfree(mm);
114} 113}
115 114
116static unsigned long drm_sman_mm_offset(void *private, void *ref) 115static unsigned long drm_sman_mm_offset(void *private, void *ref)
@@ -130,7 +129,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
130 BUG_ON(manager >= sman->num_managers); 129 BUG_ON(manager >= sman->num_managers);
131 130
132 sman_mm = &sman->mm[manager]; 131 sman_mm = &sman->mm[manager];
133 mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); 132 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
134 if (!mm) { 133 if (!mm) {
135 return -ENOMEM; 134 return -ENOMEM;
136 } 135 }
@@ -138,7 +137,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
138 ret = drm_mm_init(mm, start, size); 137 ret = drm_mm_init(mm, start, size);
139 138
140 if (ret) { 139 if (ret) {
141 drm_free(mm, sizeof(*mm), DRM_MEM_MM); 140 kfree(mm);
142 return ret; 141 return ret;
143 } 142 }
144 143
@@ -176,7 +175,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
176 owner_hash); 175 owner_hash);
177 } 176 }
178 177
179 owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); 178 owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
180 if (!owner_item) 179 if (!owner_item)
181 goto out; 180 goto out;
182 181
@@ -189,7 +188,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
189 return owner_item; 188 return owner_item;
190 189
191out1: 190out1:
192 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 191 kfree(owner_item);
193out: 192out:
194 return NULL; 193 return NULL;
195} 194}
@@ -212,7 +211,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man
212 return NULL; 211 return NULL;
213 } 212 }
214 213
215 memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); 214 memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
216 215
217 if (!memblock) 216 if (!memblock)
218 goto out; 217 goto out;
@@ -237,7 +236,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man
237out2: 236out2:
238 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); 237 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
239out1: 238out1:
240 drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); 239 kfree(memblock);
241out: 240out:
242 sman_mm->free(sman_mm->private, tmp); 241 sman_mm->free(sman_mm->private, tmp);
243 242
@@ -253,7 +252,7 @@ static void drm_sman_free(struct drm_memblock_item *item)
253 list_del(&item->owner_list); 252 list_del(&item->owner_list);
254 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); 253 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
255 item->mm->free(item->mm->private, item->mm_info); 254 item->mm->free(item->mm->private, item->mm_info);
256 drm_free(item, sizeof(*item), DRM_MEM_MM); 255 kfree(item);
257} 256}
258 257
259int drm_sman_free_key(struct drm_sman *sman, unsigned int key) 258int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
@@ -277,7 +276,7 @@ static void drm_sman_remove_owner(struct drm_sman *sman,
277{ 276{
278 list_del(&owner_item->sman_list); 277 list_del(&owner_item->sman_list);
279 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); 278 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
280 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 279 kfree(owner_item);
281} 280}
282 281
283int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) 282int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 387a8de1bc7e..155a5bbce680 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -107,7 +107,7 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
107{ 107{
108 struct drm_master *master; 108 struct drm_master *master;
109 109
110 master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); 110 master = kzalloc(sizeof(*master), GFP_KERNEL);
111 if (!master) 111 if (!master)
112 return NULL; 112 return NULL;
113 113
@@ -149,7 +149,7 @@ static void drm_master_destroy(struct kref *kref)
149 } 149 }
150 150
151 if (master->unique) { 151 if (master->unique) {
152 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 152 kfree(master->unique);
153 master->unique = NULL; 153 master->unique = NULL;
154 master->unique_len = 0; 154 master->unique_len = 0;
155 } 155 }
@@ -157,12 +157,12 @@ static void drm_master_destroy(struct kref *kref)
157 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 157 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
158 list_del(&pt->head); 158 list_del(&pt->head);
159 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 159 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
160 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 160 kfree(pt);
161 } 161 }
162 162
163 drm_ht_remove(&master->magiclist); 163 drm_ht_remove(&master->magiclist);
164 164
165 drm_free(master, sizeof(*master), DRM_MEM_DRIVER); 165 kfree(master);
166} 166}
167 167
168void drm_master_put(struct drm_master **master) 168void drm_master_put(struct drm_master **master)
@@ -390,7 +390,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
390 390
391 DRM_DEBUG("\n"); 391 DRM_DEBUG("\n");
392 392
393 dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); 393 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
394 if (!dev) 394 if (!dev)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
@@ -443,7 +443,7 @@ err_g3:
443err_g2: 443err_g2:
444 pci_disable_device(pdev); 444 pci_disable_device(pdev);
445err_g1: 445err_g1:
446 drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 446 kfree(dev);
447 return ret; 447 return ret;
448} 448}
449EXPORT_SYMBOL(drm_get_dev); 449EXPORT_SYMBOL(drm_get_dev);
@@ -516,7 +516,7 @@ void drm_put_dev(struct drm_device *dev)
516 dev->driver->unload(dev); 516 dev->driver->unload(dev);
517 517
518 if (drm_core_has_AGP(dev) && dev->agp) { 518 if (drm_core_has_AGP(dev) && dev->agp) {
519 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); 519 kfree(dev->agp);
520 dev->agp = NULL; 520 dev->agp = NULL;
521 } 521 }
522 522
@@ -535,10 +535,9 @@ void drm_put_dev(struct drm_device *dev)
535 drm_put_minor(&dev->primary); 535 drm_put_minor(&dev->primary);
536 536
537 if (dev->devname) { 537 if (dev->devname) {
538 drm_free(dev->devname, strlen(dev->devname) + 1, 538 kfree(dev->devname);
539 DRM_MEM_DRIVER);
540 dev->devname = NULL; 539 dev->devname = NULL;
541 } 540 }
542 drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 541 kfree(dev);
543} 542}
544EXPORT_SYMBOL(drm_put_dev); 543EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 22f76567ac7d..7e1fbe5d4779 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -144,14 +144,14 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 * Get the page, inc the use count, and return it 144 * Get the page, inc the use count, and return it
145 */ 145 */
146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
147 page = virt_to_page(__va(agpmem->memory->memory[offset])); 147 page = agpmem->memory->pages[offset];
148 get_page(page); 148 get_page(page);
149 vmf->page = page; 149 vmf->page = page;
150 150
151 DRM_DEBUG 151 DRM_DEBUG
152 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", 152 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
153 (unsigned long long)baddr, 153 (unsigned long long)baddr,
154 __va(agpmem->memory->memory[offset]), 154 agpmem->memory->pages[offset],
155 (unsigned long long)offset, 155 (unsigned long long)offset,
156 page_count(page)); 156 page_count(page));
157 return 0; 157 return 0;
@@ -227,7 +227,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
227 found_maps++; 227 found_maps++;
228 if (pt->vma == vma) { 228 if (pt->vma == vma) {
229 list_del(&pt->head); 229 list_del(&pt->head);
230 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 230 kfree(pt);
231 } 231 }
232 } 232 }
233 233
@@ -273,7 +273,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
273 DRM_ERROR("tried to rmmap GEM object\n"); 273 DRM_ERROR("tried to rmmap GEM object\n");
274 break; 274 break;
275 } 275 }
276 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 276 kfree(map);
277 } 277 }
278 } 278 }
279 mutex_unlock(&dev->struct_mutex); 279 mutex_unlock(&dev->struct_mutex);
@@ -414,7 +414,7 @@ void drm_vm_open_locked(struct vm_area_struct *vma)
414 vma->vm_start, vma->vm_end - vma->vm_start); 414 vma->vm_start, vma->vm_end - vma->vm_start);
415 atomic_inc(&dev->vma_count); 415 atomic_inc(&dev->vma_count);
416 416
417 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 417 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
418 if (vma_entry) { 418 if (vma_entry) {
419 vma_entry->vma = vma; 419 vma_entry->vma = vma;
420 vma_entry->pid = current->pid; 420 vma_entry->pid = current->pid;
@@ -454,7 +454,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
455 if (pt->vma == vma) { 455 if (pt->vma == vma) {
456 list_del(&pt->head); 456 list_del(&pt->head);
457 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 457 kfree(pt);
458 break; 458 break;
459 } 459 }
460 } 460 }
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e5de8ea41544..7d1d88cdf2dc 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -227,8 +227,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
227 /* Need to rewrite hardware status page */ 227 /* Need to rewrite hardware status page */
228 I810_WRITE(0x02080, 0x1ffff000); 228 I810_WRITE(0x02080, 0x1ffff000);
229 } 229 }
230 drm_free(dev->dev_private, sizeof(drm_i810_private_t), 230 kfree(dev->dev_private);
231 DRM_MEM_DRIVER);
232 dev->dev_private = NULL; 231 dev->dev_private = NULL;
233 232
234 for (i = 0; i < dma->buf_count; i++) { 233 for (i = 0; i < dma->buf_count; i++) {
@@ -439,8 +438,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
439 switch (init->func) { 438 switch (init->func) {
440 case I810_INIT_DMA_1_4: 439 case I810_INIT_DMA_1_4:
441 DRM_INFO("Using v1.4 init.\n"); 440 DRM_INFO("Using v1.4 init.\n");
442 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 441 dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
443 DRM_MEM_DRIVER);
444 if (dev_priv == NULL) 442 if (dev_priv == NULL)
445 return -ENOMEM; 443 return -ENOMEM;
446 retcode = i810_dma_initialize(dev, dev_priv, init); 444 retcode = i810_dma_initialize(dev, dev_priv, init);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index a86ab30b4620..877bf6cb14a4 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -232,8 +232,7 @@ static int i830_dma_cleanup(struct drm_device * dev)
232 I830_WRITE(0x02080, 0x1ffff000); 232 I830_WRITE(0x02080, 0x1ffff000);
233 } 233 }
234 234
235 drm_free(dev->dev_private, sizeof(drm_i830_private_t), 235 kfree(dev->dev_private);
236 DRM_MEM_DRIVER);
237 dev->dev_private = NULL; 236 dev->dev_private = NULL;
238 237
239 for (i = 0; i < dma->buf_count; i++) { 238 for (i = 0; i < dma->buf_count; i++) {
@@ -459,8 +458,7 @@ static int i830_dma_init(struct drm_device *dev, void *data,
459 458
460 switch (init->func) { 459 switch (init->func) {
461 case I830_INIT_DMA: 460 case I830_INIT_DMA:
462 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 461 dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
463 DRM_MEM_DRIVER);
464 if (dev_priv == NULL) 462 if (dev_priv == NULL)
465 return -ENOMEM; 463 return -ENOMEM;
466 retcode = i830_dma_initialize(dev, dev_priv, init); 464 retcode = i830_dma_initialize(dev, dev_priv, init);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a050aa73..30d6b99fb302 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
13 intel_crt.o \ 13 intel_crt.o \
14 intel_lvds.o \ 14 intel_lvds.o \
15 intel_bios.o \ 15 intel_bios.o \
16 intel_dp.o \
17 intel_dp_i2c.o \
16 intel_hdmi.o \ 18 intel_hdmi.o \
17 intel_sdvo.o \ 19 intel_sdvo.o \
18 intel_modes.o \ 20 intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac42fe3a..288fc50627e2 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@ struct intel_dvo_device {
37 /* GPIO register used for i2c bus to control this device */ 37 /* GPIO register used for i2c bus to control this device */
38 u32 gpio; 38 u32 gpio;
39 int slave_addr; 39 int slave_addr;
40 struct intel_i2c_chan *i2c_bus; 40 struct i2c_adapter *i2c_bus;
41 41
42 const struct intel_dvo_dev_ops *dev_ops; 42 const struct intel_dvo_dev_ops *dev_ops;
43 void *dev_priv; 43 void *dev_priv;
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops {
52 * Returns NULL if the device does not exist. 52 * Returns NULL if the device does not exist.
53 */ 53 */
54 bool (*init)(struct intel_dvo_device *dvo, 54 bool (*init)(struct intel_dvo_device *dvo,
55 struct intel_i2c_chan *i2cbus); 55 struct i2c_adapter *i2cbus);
56 56
57 /* 57 /*
58 * Called to allow the output a chance to create properties after the 58 * Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b4973b02..621815b531db 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
176 176
177static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) 177static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
178{ 178{
179 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 179 struct i2c_adapter *adapter = dvo->i2c_bus;
180 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
180 u8 out_buf[2]; 181 u8 out_buf[2];
181 u8 in_buf[2]; 182 u8 in_buf[2];
182 183
183 struct i2c_msg msgs[] = { 184 struct i2c_msg msgs[] = {
184 { 185 {
185 .addr = i2cbus->slave_addr, 186 .addr = dvo->slave_addr,
186 .flags = 0, 187 .flags = 0,
187 .len = 1, 188 .len = 1,
188 .buf = out_buf, 189 .buf = out_buf,
189 }, 190 },
190 { 191 {
191 .addr = i2cbus->slave_addr, 192 .addr = dvo->slave_addr,
192 .flags = I2C_M_RD, 193 .flags = I2C_M_RD,
193 .len = 1, 194 .len = 1,
194 .buf = in_buf, 195 .buf = in_buf,
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
208 209
209static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) 210static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
210{ 211{
211 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 212 struct i2c_adapter *adapter = dvo->i2c_bus;
213 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
212 uint8_t out_buf[2]; 214 uint8_t out_buf[2];
213 struct i2c_msg msg = { 215 struct i2c_msg msg = {
214 .addr = i2cbus->slave_addr, 216 .addr = dvo->slave_addr,
215 .flags = 0, 217 .flags = 0,
216 .len = 2, 218 .len = 2,
217 .buf = out_buf, 219 .buf = out_buf,
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
228 230
229/** Probes for a CH7017 on the given bus and slave address. */ 231/** Probes for a CH7017 on the given bus and slave address. */
230static bool ch7017_init(struct intel_dvo_device *dvo, 232static bool ch7017_init(struct intel_dvo_device *dvo,
231 struct intel_i2c_chan *i2cbus) 233 struct i2c_adapter *adapter)
232{ 234{
235 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
233 struct ch7017_priv *priv; 236 struct ch7017_priv *priv;
234 uint8_t val; 237 uint8_t val;
235 238
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
237 if (priv == NULL) 240 if (priv == NULL)
238 return false; 241 return false;
239 242
240 dvo->i2c_bus = i2cbus; 243 dvo->i2c_bus = adapter;
241 dvo->i2c_bus->slave_addr = dvo->slave_addr;
242 dvo->dev_priv = priv; 244 dvo->dev_priv = priv;
243 245
244 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) 246 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
248 val != CH7018_DEVICE_ID_VALUE && 250 val != CH7018_DEVICE_ID_VALUE &&
249 val != CH7019_DEVICE_ID_VALUE) { 251 val != CH7019_DEVICE_ID_VALUE) {
250 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", 252 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
251 val, i2cbus->adapter.name,i2cbus->slave_addr); 253 val, i2cbus->adapter.name,dvo->slave_addr);
252 goto fail; 254 goto fail;
253 } 255 }
254 256
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95dbd034..a9b896289680 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid)
123static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 123static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
124{ 124{
125 struct ch7xxx_priv *ch7xxx= dvo->dev_priv; 125 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
126 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 126 struct i2c_adapter *adapter = dvo->i2c_bus;
127 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
127 u8 out_buf[2]; 128 u8 out_buf[2];
128 u8 in_buf[2]; 129 u8 in_buf[2];
129 130
130 struct i2c_msg msgs[] = { 131 struct i2c_msg msgs[] = {
131 { 132 {
132 .addr = i2cbus->slave_addr, 133 .addr = dvo->slave_addr,
133 .flags = 0, 134 .flags = 0,
134 .len = 1, 135 .len = 1,
135 .buf = out_buf, 136 .buf = out_buf,
136 }, 137 },
137 { 138 {
138 .addr = i2cbus->slave_addr, 139 .addr = dvo->slave_addr,
139 .flags = I2C_M_RD, 140 .flags = I2C_M_RD,
140 .len = 1, 141 .len = 1,
141 .buf = in_buf, 142 .buf = in_buf,
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
152 153
153 if (!ch7xxx->quiet) { 154 if (!ch7xxx->quiet) {
154 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 155 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
155 addr, i2cbus->adapter.name, i2cbus->slave_addr); 156 addr, i2cbus->adapter.name, dvo->slave_addr);
156 } 157 }
157 return false; 158 return false;
158} 159}
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
161static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 162static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
162{ 163{
163 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 164 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
164 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 165 struct i2c_adapter *adapter = dvo->i2c_bus;
166 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
165 uint8_t out_buf[2]; 167 uint8_t out_buf[2];
166 struct i2c_msg msg = { 168 struct i2c_msg msg = {
167 .addr = i2cbus->slave_addr, 169 .addr = dvo->slave_addr,
168 .flags = 0, 170 .flags = 0,
169 .len = 2, 171 .len = 2,
170 .buf = out_buf, 172 .buf = out_buf,
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
178 180
179 if (!ch7xxx->quiet) { 181 if (!ch7xxx->quiet) {
180 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 182 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
181 addr, i2cbus->adapter.name, i2cbus->slave_addr); 183 addr, i2cbus->adapter.name, dvo->slave_addr);
182 } 184 }
183 185
184 return false; 186 return false;
185} 187}
186 188
187static bool ch7xxx_init(struct intel_dvo_device *dvo, 189static bool ch7xxx_init(struct intel_dvo_device *dvo,
188 struct intel_i2c_chan *i2cbus) 190 struct i2c_adapter *adapter)
189{ 191{
190 /* this will detect the CH7xxx chip on the specified i2c bus */ 192 /* this will detect the CH7xxx chip on the specified i2c bus */
191 struct ch7xxx_priv *ch7xxx; 193 struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
196 if (ch7xxx == NULL) 198 if (ch7xxx == NULL)
197 return false; 199 return false;
198 200
199 dvo->i2c_bus = i2cbus; 201 dvo->i2c_bus = adapter;
200 dvo->i2c_bus->slave_addr = dvo->slave_addr;
201 dvo->dev_priv = ch7xxx; 202 dvo->dev_priv = ch7xxx;
202 ch7xxx->quiet = true; 203 ch7xxx->quiet = true;
203 204
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
207 name = ch7xxx_get_id(vendor); 208 name = ch7xxx_get_id(vendor);
208 if (!name) { 209 if (!name) {
209 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 210 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
210 vendor, i2cbus->adapter.name, i2cbus->slave_addr); 211 vendor, adapter->name, dvo->slave_addr);
211 goto out; 212 goto out;
212 } 213 }
213 214
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 218
218 if (device != CH7xxx_DID) { 219 if (device != CH7xxx_DID) {
219 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 220 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
220 vendor, i2cbus->adapter.name, i2cbus->slave_addr); 221 vendor, adapter->name, dvo->slave_addr);
221 goto out; 222 goto out;
222 } 223 }
223 224
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375e8e37..aa176f9921fe 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
169static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) 169static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
170{ 170{
171 struct ivch_priv *priv = dvo->dev_priv; 171 struct ivch_priv *priv = dvo->dev_priv;
172 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 172 struct i2c_adapter *adapter = dvo->i2c_bus;
173 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
173 u8 out_buf[1]; 174 u8 out_buf[1];
174 u8 in_buf[2]; 175 u8 in_buf[2];
175 176
176 struct i2c_msg msgs[] = { 177 struct i2c_msg msgs[] = {
177 { 178 {
178 .addr = i2cbus->slave_addr, 179 .addr = dvo->slave_addr,
179 .flags = I2C_M_RD, 180 .flags = I2C_M_RD,
180 .len = 0, 181 .len = 0,
181 }, 182 },
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
186 .buf = out_buf, 187 .buf = out_buf,
187 }, 188 },
188 { 189 {
189 .addr = i2cbus->slave_addr, 190 .addr = dvo->slave_addr,
190 .flags = I2C_M_RD | I2C_M_NOSTART, 191 .flags = I2C_M_RD | I2C_M_NOSTART,
191 .len = 2, 192 .len = 2,
192 .buf = in_buf, 193 .buf = in_buf,
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
202 203
203 if (!priv->quiet) { 204 if (!priv->quiet) {
204 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 205 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
205 addr, i2cbus->adapter.name, i2cbus->slave_addr); 206 addr, i2cbus->adapter.name, dvo->slave_addr);
206 } 207 }
207 return false; 208 return false;
208} 209}
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
211static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) 212static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
212{ 213{
213 struct ivch_priv *priv = dvo->dev_priv; 214 struct ivch_priv *priv = dvo->dev_priv;
214 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 215 struct i2c_adapter *adapter = dvo->i2c_bus;
216 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
215 u8 out_buf[3]; 217 u8 out_buf[3];
216 struct i2c_msg msg = { 218 struct i2c_msg msg = {
217 .addr = i2cbus->slave_addr, 219 .addr = dvo->slave_addr,
218 .flags = 0, 220 .flags = 0,
219 .len = 3, 221 .len = 3,
220 .buf = out_buf, 222 .buf = out_buf,
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
229 231
230 if (!priv->quiet) { 232 if (!priv->quiet) {
231 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 233 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
232 addr, i2cbus->adapter.name, i2cbus->slave_addr); 234 addr, i2cbus->adapter.name, dvo->slave_addr);
233 } 235 }
234 236
235 return false; 237 return false;
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
237 239
238/** Probes the given bus and slave address for an ivch */ 240/** Probes the given bus and slave address for an ivch */
239static bool ivch_init(struct intel_dvo_device *dvo, 241static bool ivch_init(struct intel_dvo_device *dvo,
240 struct intel_i2c_chan *i2cbus) 242 struct i2c_adapter *adapter)
241{ 243{
242 struct ivch_priv *priv; 244 struct ivch_priv *priv;
243 uint16_t temp; 245 uint16_t temp;
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
246 if (priv == NULL) 248 if (priv == NULL)
247 return false; 249 return false;
248 250
249 dvo->i2c_bus = i2cbus; 251 dvo->i2c_bus = adapter;
250 dvo->i2c_bus->slave_addr = dvo->slave_addr;
251 dvo->dev_priv = priv; 252 dvo->dev_priv = priv;
252 priv->quiet = true; 253 priv->quiet = true;
253 254
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb070b2..e1c1f7341e5c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@ struct sil164_priv {
76static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 76static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
77{ 77{
78 struct sil164_priv *sil = dvo->dev_priv; 78 struct sil164_priv *sil = dvo->dev_priv;
79 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 79 struct i2c_adapter *adapter = dvo->i2c_bus;
80 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
80 u8 out_buf[2]; 81 u8 out_buf[2];
81 u8 in_buf[2]; 82 u8 in_buf[2];
82 83
83 struct i2c_msg msgs[] = { 84 struct i2c_msg msgs[] = {
84 { 85 {
85 .addr = i2cbus->slave_addr, 86 .addr = dvo->slave_addr,
86 .flags = 0, 87 .flags = 0,
87 .len = 1, 88 .len = 1,
88 .buf = out_buf, 89 .buf = out_buf,
89 }, 90 },
90 { 91 {
91 .addr = i2cbus->slave_addr, 92 .addr = dvo->slave_addr,
92 .flags = I2C_M_RD, 93 .flags = I2C_M_RD,
93 .len = 1, 94 .len = 1,
94 .buf = in_buf, 95 .buf = in_buf,
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
105 106
106 if (!sil->quiet) { 107 if (!sil->quiet) {
107 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 108 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
108 addr, i2cbus->adapter.name, i2cbus->slave_addr); 109 addr, i2cbus->adapter.name, dvo->slave_addr);
109 } 110 }
110 return false; 111 return false;
111} 112}
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 114static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
114{ 115{
115 struct sil164_priv *sil= dvo->dev_priv; 116 struct sil164_priv *sil= dvo->dev_priv;
116 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 117 struct i2c_adapter *adapter = dvo->i2c_bus;
118 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
117 uint8_t out_buf[2]; 119 uint8_t out_buf[2];
118 struct i2c_msg msg = { 120 struct i2c_msg msg = {
119 .addr = i2cbus->slave_addr, 121 .addr = dvo->slave_addr,
120 .flags = 0, 122 .flags = 0,
121 .len = 2, 123 .len = 2,
122 .buf = out_buf, 124 .buf = out_buf,
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
130 132
131 if (!sil->quiet) { 133 if (!sil->quiet) {
132 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 134 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr); 135 addr, i2cbus->adapter.name, dvo->slave_addr);
134 } 136 }
135 137
136 return false; 138 return false;
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
138 140
139/* Silicon Image 164 driver for chip on i2c bus */ 141/* Silicon Image 164 driver for chip on i2c bus */
140static bool sil164_init(struct intel_dvo_device *dvo, 142static bool sil164_init(struct intel_dvo_device *dvo,
141 struct intel_i2c_chan *i2cbus) 143 struct i2c_adapter *adapter)
142{ 144{
143 /* this will detect the SIL164 chip on the specified i2c bus */ 145 /* this will detect the SIL164 chip on the specified i2c bus */
144 struct sil164_priv *sil; 146 struct sil164_priv *sil;
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
148 if (sil == NULL) 150 if (sil == NULL)
149 return false; 151 return false;
150 152
151 dvo->i2c_bus = i2cbus; 153 dvo->i2c_bus = adapter;
152 dvo->i2c_bus->slave_addr = dvo->slave_addr;
153 dvo->dev_priv = sil; 154 dvo->dev_priv = sil;
154 sil->quiet = true; 155 sil->quiet = true;
155 156
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
158 159
159 if (ch != (SIL164_VID & 0xff)) { 160 if (ch != (SIL164_VID & 0xff)) {
160 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 161 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
161 ch, i2cbus->adapter.name, i2cbus->slave_addr); 162 ch, adapter->name, dvo->slave_addr);
162 goto out; 163 goto out;
163 } 164 }
164 165
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
167 168
168 if (ch != (SIL164_DID & 0xff)) { 169 if (ch != (SIL164_DID & 0xff)) {
169 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 170 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
170 ch, i2cbus->adapter.name, i2cbus->slave_addr); 171 ch, adapter->name, dvo->slave_addr);
171 goto out; 172 goto out;
172 } 173 }
173 sil->quiet = false; 174 sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda806ebf..9ecc907384ec 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@ struct tfp410_priv {
101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
102{ 102{
103 struct tfp410_priv *tfp = dvo->dev_priv; 103 struct tfp410_priv *tfp = dvo->dev_priv;
104 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 104 struct i2c_adapter *adapter = dvo->i2c_bus;
105 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
105 u8 out_buf[2]; 106 u8 out_buf[2];
106 u8 in_buf[2]; 107 u8 in_buf[2];
107 108
108 struct i2c_msg msgs[] = { 109 struct i2c_msg msgs[] = {
109 { 110 {
110 .addr = i2cbus->slave_addr, 111 .addr = dvo->slave_addr,
111 .flags = 0, 112 .flags = 0,
112 .len = 1, 113 .len = 1,
113 .buf = out_buf, 114 .buf = out_buf,
114 }, 115 },
115 { 116 {
116 .addr = i2cbus->slave_addr, 117 .addr = dvo->slave_addr,
117 .flags = I2C_M_RD, 118 .flags = I2C_M_RD,
118 .len = 1, 119 .len = 1,
119 .buf = in_buf, 120 .buf = in_buf,
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
130 131
131 if (!tfp->quiet) { 132 if (!tfp->quiet) {
132 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 133 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
133 addr, i2cbus->adapter.name, i2cbus->slave_addr); 134 addr, i2cbus->adapter.name, dvo->slave_addr);
134 } 135 }
135 return false; 136 return false;
136} 137}
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
138static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) 139static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
139{ 140{
140 struct tfp410_priv *tfp = dvo->dev_priv; 141 struct tfp410_priv *tfp = dvo->dev_priv;
141 struct intel_i2c_chan *i2cbus = dvo->i2c_bus; 142 struct i2c_adapter *adapter = dvo->i2c_bus;
143 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
142 uint8_t out_buf[2]; 144 uint8_t out_buf[2];
143 struct i2c_msg msg = { 145 struct i2c_msg msg = {
144 .addr = i2cbus->slave_addr, 146 .addr = dvo->slave_addr,
145 .flags = 0, 147 .flags = 0,
146 .len = 2, 148 .len = 2,
147 .buf = out_buf, 149 .buf = out_buf,
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
155 157
156 if (!tfp->quiet) { 158 if (!tfp->quiet) {
157 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 159 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
158 addr, i2cbus->adapter.name, i2cbus->slave_addr); 160 addr, i2cbus->adapter.name, dvo->slave_addr);
159 } 161 }
160 162
161 return false; 163 return false;
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
174 176
175/* Ti TFP410 driver for chip on i2c bus */ 177/* Ti TFP410 driver for chip on i2c bus */
176static bool tfp410_init(struct intel_dvo_device *dvo, 178static bool tfp410_init(struct intel_dvo_device *dvo,
177 struct intel_i2c_chan *i2cbus) 179 struct i2c_adapter *adapter)
178{ 180{
179 /* this will detect the tfp410 chip on the specified i2c bus */ 181 /* this will detect the tfp410 chip on the specified i2c bus */
180 struct tfp410_priv *tfp; 182 struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
184 if (tfp == NULL) 186 if (tfp == NULL)
185 return false; 187 return false;
186 188
187 dvo->i2c_bus = i2cbus; 189 dvo->i2c_bus = adapter;
188 dvo->i2c_bus->slave_addr = dvo->slave_addr;
189 dvo->dev_priv = tfp; 190 dvo->dev_priv = tfp;
190 tfp->quiet = true; 191 tfp->quiet = true;
191 192
192 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { 193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
193 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", 194 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
194 id, i2cbus->adapter.name, i2cbus->slave_addr); 195 id, adapter->name, dvo->slave_addr);
195 goto out; 196 goto out;
196 } 197 }
197 198
198 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { 199 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
199 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", 200 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
200 id, i2cbus->adapter.name, i2cbus->slave_addr); 201 id, adapter->name, dvo->slave_addr);
201 goto out; 202 goto out;
202 } 203 }
203 tfp->quiet = false; 204 tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a60626f6803..f112c769d533 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -643,9 +643,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
643 return -EINVAL; 643 return -EINVAL;
644 644
645 if (batch->num_cliprects) { 645 if (batch->num_cliprects) {
646 cliprects = drm_calloc(batch->num_cliprects, 646 cliprects = kcalloc(batch->num_cliprects,
647 sizeof(struct drm_clip_rect), 647 sizeof(struct drm_clip_rect),
648 DRM_MEM_DRIVER); 648 GFP_KERNEL);
649 if (cliprects == NULL) 649 if (cliprects == NULL)
650 return -ENOMEM; 650 return -ENOMEM;
651 651
@@ -664,9 +664,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
665 665
666fail_free: 666fail_free:
667 drm_free(cliprects, 667 kfree(cliprects);
668 batch->num_cliprects * sizeof(struct drm_clip_rect),
669 DRM_MEM_DRIVER);
670 668
671 return ret; 669 return ret;
672} 670}
@@ -692,7 +690,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
692 if (cmdbuf->num_cliprects < 0) 690 if (cmdbuf->num_cliprects < 0)
693 return -EINVAL; 691 return -EINVAL;
694 692
695 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); 693 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
696 if (batch_data == NULL) 694 if (batch_data == NULL)
697 return -ENOMEM; 695 return -ENOMEM;
698 696
@@ -701,9 +699,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
701 goto fail_batch_free; 699 goto fail_batch_free;
702 700
703 if (cmdbuf->num_cliprects) { 701 if (cmdbuf->num_cliprects) {
704 cliprects = drm_calloc(cmdbuf->num_cliprects, 702 cliprects = kcalloc(cmdbuf->num_cliprects,
705 sizeof(struct drm_clip_rect), 703 sizeof(struct drm_clip_rect), GFP_KERNEL);
706 DRM_MEM_DRIVER);
707 if (cliprects == NULL) 704 if (cliprects == NULL)
708 goto fail_batch_free; 705 goto fail_batch_free;
709 706
@@ -726,11 +723,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 723 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
727 724
728fail_clip_free: 725fail_clip_free:
729 drm_free(cliprects, 726 kfree(cliprects);
730 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
731 DRM_MEM_DRIVER);
732fail_batch_free: 727fail_batch_free:
733 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); 728 kfree(batch_data);
734 729
735 return ret; 730 return ret;
736} 731}
@@ -1067,7 +1062,7 @@ int i915_master_create(struct drm_device *dev, struct drm_master *master)
1067{ 1062{
1068 struct drm_i915_master_private *master_priv; 1063 struct drm_i915_master_private *master_priv;
1069 1064
1070 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 1065 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1071 if (!master_priv) 1066 if (!master_priv)
1072 return -ENOMEM; 1067 return -ENOMEM;
1073 1068
@@ -1082,7 +1077,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1082 if (!master_priv) 1077 if (!master_priv)
1083 return; 1078 return;
1084 1079
1085 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 1080 kfree(master_priv);
1086 1081
1087 master->driver_priv = NULL; 1082 master->driver_priv = NULL;
1088} 1083}
@@ -1111,12 +1106,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1111 dev->types[8] = _DRM_STAT_SECONDARY; 1106 dev->types[8] = _DRM_STAT_SECONDARY;
1112 dev->types[9] = _DRM_STAT_DMA; 1107 dev->types[9] = _DRM_STAT_DMA;
1113 1108
1114 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); 1109 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1115 if (dev_priv == NULL) 1110 if (dev_priv == NULL)
1116 return -ENOMEM; 1111 return -ENOMEM;
1117 1112
1118 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1119
1120 dev->dev_private = (void *)dev_priv; 1113 dev->dev_private = (void *)dev_priv;
1121 dev_priv->dev = dev; 1114 dev_priv->dev = dev;
1122 1115
@@ -1153,13 +1146,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1153 "performance may suffer.\n"); 1146 "performance may suffer.\n");
1154 } 1147 }
1155 1148
1156#ifdef CONFIG_HIGHMEM64G
1157 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
1158 dev_priv->has_gem = 0;
1159#else
1160 /* enable GEM by default */ 1149 /* enable GEM by default */
1161 dev_priv->has_gem = 1; 1150 dev_priv->has_gem = 1;
1162#endif
1163 1151
1164 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1152 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1165 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1153 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -1221,7 +1209,7 @@ out_iomapfree:
1221out_rmmap: 1209out_rmmap:
1222 iounmap(dev_priv->regs); 1210 iounmap(dev_priv->regs);
1223free_priv: 1211free_priv:
1224 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); 1212 kfree(dev_priv);
1225 return ret; 1213 return ret;
1226} 1214}
1227 1215
@@ -1261,8 +1249,7 @@ int i915_driver_unload(struct drm_device *dev)
1261 i915_gem_lastclose(dev); 1249 i915_gem_lastclose(dev);
1262 } 1250 }
1263 1251
1264 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1252 kfree(dev->dev_private);
1265 DRM_MEM_DRIVER);
1266 1253
1267 return 0; 1254 return 0;
1268} 1255}
@@ -1273,7 +1260,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1273 1260
1274 DRM_DEBUG_DRIVER(I915_DRV, "\n"); 1261 DRM_DEBUG_DRIVER(I915_DRV, "\n");
1275 i915_file_priv = (struct drm_i915_file_private *) 1262 i915_file_priv = (struct drm_i915_file_private *)
1276 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1263 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
1277 1264
1278 if (!i915_file_priv) 1265 if (!i915_file_priv)
1279 return -ENOMEM; 1266 return -ENOMEM;
@@ -1326,7 +1313,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1326{ 1313{
1327 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1314 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1328 1315
1329 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 1316 kfree(i915_file_priv);
1330} 1317}
1331 1318
1332struct drm_ioctl_desc i915_ioctls[] = { 1319struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1e899a..e3cb4025e323 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,8 +67,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
67 67
68 pci_save_state(dev->pdev); 68 pci_save_state(dev->pdev);
69 69
70 i915_save_state(dev);
71
72 /* If KMS is active, we do the leavevt stuff here */ 70 /* If KMS is active, we do the leavevt stuff here */
73 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 71 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
74 if (i915_gem_idle(dev)) 72 if (i915_gem_idle(dev))
@@ -77,6 +75,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
77 drm_irq_uninstall(dev); 75 drm_irq_uninstall(dev);
78 } 76 }
79 77
78 i915_save_state(dev);
79
80 intel_opregion_free(dev, 1); 80 intel_opregion_free(dev, 1);
81 81
82 if (state.event == PM_EVENT_SUSPEND) { 82 if (state.event == PM_EVENT_SUSPEND) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ef6bcec211b..bb4c2d387b6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -306,6 +306,17 @@ typedef struct drm_i915_private {
306 u32 saveCURBPOS; 306 u32 saveCURBPOS;
307 u32 saveCURBBASE; 307 u32 saveCURBBASE;
308 u32 saveCURSIZE; 308 u32 saveCURSIZE;
309 u32 saveDP_B;
310 u32 saveDP_C;
311 u32 saveDP_D;
312 u32 savePIPEA_GMCH_DATA_M;
313 u32 savePIPEB_GMCH_DATA_M;
314 u32 savePIPEA_GMCH_DATA_N;
315 u32 savePIPEB_GMCH_DATA_N;
316 u32 savePIPEA_DP_LINK_M;
317 u32 savePIPEB_DP_LINK_M;
318 u32 savePIPEA_DP_LINK_N;
319 u32 savePIPEB_DP_LINK_N;
309 320
310 struct { 321 struct {
311 struct drm_mm gtt_space; 322 struct drm_mm gtt_space;
@@ -646,6 +657,8 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
646int i915_gem_object_unbind(struct drm_gem_object *obj); 657int i915_gem_object_unbind(struct drm_gem_object *obj);
647void i915_gem_lastclose(struct drm_device *dev); 658void i915_gem_lastclose(struct drm_device *dev);
648uint32_t i915_get_gem_seqno(struct drm_device *dev); 659uint32_t i915_get_gem_seqno(struct drm_device *dev);
660int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
661int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
649void i915_gem_retire_requests(struct drm_device *dev); 662void i915_gem_retire_requests(struct drm_device *dev);
650void i915_gem_retire_work_handler(struct work_struct *work); 663void i915_gem_retire_work_handler(struct work_struct *work);
651void i915_gem_clflush_object(struct drm_gem_object *obj); 664void i915_gem_clflush_object(struct drm_gem_object *obj);
@@ -855,6 +868,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
855#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 868#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
856 IS_I915GM(dev))) 869 IS_I915GM(dev)))
857#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 870#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
871#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
858#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 872#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
859 873
860#define PRIMARY_RINGBUFFER_SIZE (128*1024) 874#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c0ae6bbbd9b5..876b65cb7629 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment); 48 unsigned alignment);
49static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 49static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev); 50static int i915_gem_evict_something(struct drm_device *dev);
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 51static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -1007,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1007 1006
1008 mutex_lock(&dev->struct_mutex); 1007 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF 1008#if WATCH_BUF
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 1009 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1011 obj, obj->size, read_domains, write_domain); 1010 obj, obj->size, read_domains, write_domain);
1012#endif 1011#endif
1013 if (read_domains & I915_GEM_DOMAIN_GTT) { 1012 if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1051,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1051 } 1050 }
1052 1051
1053#if WATCH_BUF 1052#if WATCH_BUF
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n", 1053 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1055 __func__, args->handle, obj, obj->size); 1054 __func__, args->handle, obj, obj->size);
1056#endif 1055#endif
1057 obj_priv = obj->driver_private; 1056 obj_priv = obj->driver_private;
@@ -1158,7 +1157,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1158 /* Need a new fence register? */ 1157 /* Need a new fence register? */
1159 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1158 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1160 obj_priv->tiling_mode != I915_TILING_NONE) { 1159 obj_priv->tiling_mode != I915_TILING_NONE) {
1161 ret = i915_gem_object_get_fence_reg(obj, write); 1160 ret = i915_gem_object_get_fence_reg(obj);
1162 if (ret) { 1161 if (ret) {
1163 mutex_unlock(&dev->struct_mutex); 1162 mutex_unlock(&dev->struct_mutex);
1164 return VM_FAULT_SIGBUS; 1163 return VM_FAULT_SIGBUS;
@@ -1208,8 +1207,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1208 1207
1209 /* Set the object up for mmap'ing */ 1208 /* Set the object up for mmap'ing */
1210 list = &obj->map_list; 1209 list = &obj->map_list;
1211 list->map = drm_calloc(1, sizeof(struct drm_map_list), 1210 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1212 DRM_MEM_DRIVER);
1213 if (!list->map) 1211 if (!list->map)
1214 return -ENOMEM; 1212 return -ENOMEM;
1215 1213
@@ -1249,7 +1247,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1249out_free_mm: 1247out_free_mm:
1250 drm_mm_put_block(list->file_offset_node); 1248 drm_mm_put_block(list->file_offset_node);
1251out_free_list: 1249out_free_list:
1252 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); 1250 kfree(list->map);
1253 1251
1254 return ret; 1252 return ret;
1255} 1253}
@@ -1271,7 +1269,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1271 } 1269 }
1272 1270
1273 if (list->map) { 1271 if (list->map) {
1274 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); 1272 kfree(list->map);
1275 list->map = NULL; 1273 list->map = NULL;
1276 } 1274 }
1277 1275
@@ -1494,7 +1492,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1494 if (file_priv != NULL) 1492 if (file_priv != NULL)
1495 i915_file_priv = file_priv->driver_priv; 1493 i915_file_priv = file_priv->driver_priv;
1496 1494
1497 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1495 request = kzalloc(sizeof(*request), GFP_KERNEL);
1498 if (request == NULL) 1496 if (request == NULL)
1499 return 0; 1497 return 0;
1500 1498
@@ -1676,7 +1674,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1676 1674
1677 list_del(&request->list); 1675 list_del(&request->list);
1678 list_del(&request->client_list); 1676 list_del(&request->client_list);
1679 drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1677 kfree(request);
1680 } else 1678 } else
1681 break; 1679 break;
1682 } 1680 }
@@ -2163,13 +2161,11 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2163 val |= I830_FENCE_REG_VALID; 2161 val |= I830_FENCE_REG_VALID;
2164 2162
2165 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2163 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2166
2167} 2164}
2168 2165
2169/** 2166/**
2170 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2167 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2171 * @obj: object to map through a fence reg 2168 * @obj: object to map through a fence reg
2172 * @write: object is about to be written
2173 * 2169 *
2174 * When mapping objects through the GTT, userspace wants to be able to write 2170 * When mapping objects through the GTT, userspace wants to be able to write
2175 * to them without having to worry about swizzling if the object is tiled. 2171 * to them without having to worry about swizzling if the object is tiled.
@@ -2180,8 +2176,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2180 * It then sets up the reg based on the object's properties: address, pitch 2176 * It then sets up the reg based on the object's properties: address, pitch
2181 * and tiling format. 2177 * and tiling format.
2182 */ 2178 */
2183static int 2179int
2184i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) 2180i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2185{ 2181{
2186 struct drm_device *dev = obj->dev; 2182 struct drm_device *dev = obj->dev;
2187 struct drm_i915_private *dev_priv = dev->dev_private; 2183 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2331,6 +2327,42 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2331} 2327}
2332 2328
2333/** 2329/**
2330 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2331 * to the buffer to finish, and then resets the fence register.
2332 * @obj: tiled object holding a fence register.
2333 *
2334 * Zeroes out the fence register itself and clears out the associated
2335 * data structures in dev_priv and obj_priv.
2336 */
2337int
2338i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2339{
2340 struct drm_device *dev = obj->dev;
2341 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2342
2343 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2344 return 0;
2345
2346 /* On the i915, GPU access to tiled buffers is via a fence,
2347 * therefore we must wait for any outstanding access to complete
2348 * before clearing the fence.
2349 */
2350 if (!IS_I965G(dev)) {
2351 int ret;
2352
2353 i915_gem_object_flush_gpu_write_domain(obj);
2354 i915_gem_object_flush_gtt_write_domain(obj);
2355 ret = i915_gem_object_wait_rendering(obj);
2356 if (ret != 0)
2357 return ret;
2358 }
2359
2360 i915_gem_clear_fence_reg (obj);
2361
2362 return 0;
2363}
2364
2365/**
2334 * Finds free space in the GTT aperture and binds the object there. 2366 * Finds free space in the GTT aperture and binds the object there.
2335 */ 2367 */
2336static int 2368static int
@@ -2391,7 +2423,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2391 } 2423 }
2392 2424
2393#if WATCH_BUF 2425#if WATCH_BUF
2394 DRM_INFO("Binding object of size %d at 0x%08x\n", 2426 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2395 obj->size, obj_priv->gtt_offset); 2427 obj->size, obj_priv->gtt_offset);
2396#endif 2428#endif
2397 ret = i915_gem_object_get_pages(obj); 2429 ret = i915_gem_object_get_pages(obj);
@@ -2800,8 +2832,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2800 /* Free the page_cpu_valid mappings which are now stale, whether 2832 /* Free the page_cpu_valid mappings which are now stale, whether
2801 * or not we've got I915_GEM_DOMAIN_CPU. 2833 * or not we've got I915_GEM_DOMAIN_CPU.
2802 */ 2834 */
2803 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 2835 kfree(obj_priv->page_cpu_valid);
2804 DRM_MEM_DRIVER);
2805 obj_priv->page_cpu_valid = NULL; 2836 obj_priv->page_cpu_valid = NULL;
2806} 2837}
2807 2838
@@ -2843,8 +2874,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2843 * newly adding I915_GEM_DOMAIN_CPU 2874 * newly adding I915_GEM_DOMAIN_CPU
2844 */ 2875 */
2845 if (obj_priv->page_cpu_valid == NULL) { 2876 if (obj_priv->page_cpu_valid == NULL) {
2846 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 2877 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2847 DRM_MEM_DRIVER); 2878 GFP_KERNEL);
2848 if (obj_priv->page_cpu_valid == NULL) 2879 if (obj_priv->page_cpu_valid == NULL)
2849 return -ENOMEM; 2880 return -ENOMEM;
2850 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) 2881 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
@@ -3267,8 +3298,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3267 } 3298 }
3268 3299
3269 if (args->num_cliprects != 0) { 3300 if (args->num_cliprects != 0) {
3270 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), 3301 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3271 DRM_MEM_DRIVER); 3302 GFP_KERNEL);
3272 if (cliprects == NULL) 3303 if (cliprects == NULL)
3273 goto pre_mutex_err; 3304 goto pre_mutex_err;
3274 3305
@@ -3521,8 +3552,7 @@ err:
3521pre_mutex_err: 3552pre_mutex_err:
3522 drm_free_large(object_list); 3553 drm_free_large(object_list);
3523 drm_free_large(exec_list); 3554 drm_free_large(exec_list);
3524 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3555 kfree(cliprects);
3525 DRM_MEM_DRIVER);
3526 3556
3527 return ret; 3557 return ret;
3528} 3558}
@@ -3550,7 +3580,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3550 if (!IS_I965G(dev) && 3580 if (!IS_I965G(dev) &&
3551 obj_priv->fence_reg == I915_FENCE_REG_NONE && 3581 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3552 obj_priv->tiling_mode != I915_TILING_NONE) { 3582 obj_priv->tiling_mode != I915_TILING_NONE) {
3553 ret = i915_gem_object_get_fence_reg(obj, true); 3583 ret = i915_gem_object_get_fence_reg(obj);
3554 if (ret != 0) { 3584 if (ret != 0) {
3555 if (ret != -EBUSY && ret != -ERESTARTSYS) 3585 if (ret != -EBUSY && ret != -ERESTARTSYS)
3556 DRM_ERROR("Failure to install fence: %d\n", 3586 DRM_ERROR("Failure to install fence: %d\n",
@@ -3739,7 +3769,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3739{ 3769{
3740 struct drm_i915_gem_object *obj_priv; 3770 struct drm_i915_gem_object *obj_priv;
3741 3771
3742 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); 3772 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
3743 if (obj_priv == NULL) 3773 if (obj_priv == NULL)
3744 return -ENOMEM; 3774 return -ENOMEM;
3745 3775
@@ -3777,9 +3807,9 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3777 3807
3778 i915_gem_free_mmap_offset(obj); 3808 i915_gem_free_mmap_offset(obj);
3779 3809
3780 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3810 kfree(obj_priv->page_cpu_valid);
3781 kfree(obj_priv->bit_17); 3811 kfree(obj_priv->bit_17);
3782 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3812 kfree(obj->driver_private);
3783} 3813}
3784 3814
3785/** Unbinds all objects that are on the given buffer list. */ 3815/** Unbinds all objects that are on the given buffer list. */
@@ -4197,6 +4227,7 @@ i915_gem_lastclose(struct drm_device *dev)
4197void 4227void
4198i915_gem_load(struct drm_device *dev) 4228i915_gem_load(struct drm_device *dev)
4199{ 4229{
4230 int i;
4200 drm_i915_private_t *dev_priv = dev->dev_private; 4231 drm_i915_private_t *dev_priv = dev->dev_private;
4201 4232
4202 spin_lock_init(&dev_priv->mm.active_list_lock); 4233 spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4216,6 +4247,18 @@ i915_gem_load(struct drm_device *dev)
4216 else 4247 else
4217 dev_priv->num_fence_regs = 8; 4248 dev_priv->num_fence_regs = 8;
4218 4249
4250 /* Initialize fence registers to zero */
4251 if (IS_I965G(dev)) {
4252 for (i = 0; i < 16; i++)
4253 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4254 } else {
4255 for (i = 0; i < 8; i++)
4256 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4257 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4258 for (i = 0; i < 8; i++)
4259 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4260 }
4261
4219 i915_gem_detect_bit_6_swizzle(dev); 4262 i915_gem_detect_bit_6_swizzle(dev);
4220} 4263}
4221 4264
@@ -4233,7 +4276,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4233 if (dev_priv->mm.phys_objs[id - 1] || !size) 4276 if (dev_priv->mm.phys_objs[id - 1] || !size)
4234 return 0; 4277 return 0;
4235 4278
4236 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4279 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4237 if (!phys_obj) 4280 if (!phys_obj)
4238 return -ENOMEM; 4281 return -ENOMEM;
4239 4282
@@ -4252,7 +4295,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4252 4295
4253 return 0; 4296 return 0;
4254kfree_obj: 4297kfree_obj:
4255 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4298 kfree(phys_obj);
4256 return ret; 4299 return ret;
4257} 4300}
4258 4301
@@ -4312,6 +4355,8 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4312 } 4355 }
4313 drm_clflush_pages(obj_priv->pages, page_count); 4356 drm_clflush_pages(obj_priv->pages, page_count);
4314 drm_agp_chipset_flush(dev); 4357 drm_agp_chipset_flush(dev);
4358
4359 i915_gem_object_put_pages(obj);
4315out: 4360out:
4316 obj_priv->phys_obj->cur_obj = NULL; 4361 obj_priv->phys_obj->cur_obj = NULL;
4317 obj_priv->phys_obj = NULL; 4362 obj_priv->phys_obj = NULL;
@@ -4369,6 +4414,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4369 kunmap_atomic(src, KM_USER0); 4414 kunmap_atomic(src, KM_USER0);
4370 } 4415 }
4371 4416
4417 i915_gem_object_put_pages(obj);
4418
4372 return 0; 4419 return 0;
4373out: 4420out:
4374 return ret; 4421 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943e2c5a..e602614bd3f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
87 chunk_len = page_len - chunk; 87 chunk_len = page_len - chunk;
88 if (chunk_len > 128) 88 if (chunk_len > 128)
89 chunk_len = 128; 89 chunk_len = 128;
90 i915_gem_dump_page(obj_priv->page_list[page], 90 i915_gem_dump_page(obj_priv->pages[page],
91 chunk, chunk + chunk_len, 91 chunk, chunk + chunk_len,
92 obj_priv->gtt_offset + 92 obj_priv->gtt_offset +
93 page * PAGE_SIZE, 93 page * PAGE_SIZE,
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
144 int bad_count = 0; 144 int bad_count = 0;
145 145
146 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", 146 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
147 __func__, obj, obj_priv->gtt_offset, handle, 147 __func__, obj, obj_priv->gtt_offset, handle,
148 obj->size / 1024); 148 obj->size / 1024);
149 149
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
157 for (page = 0; page < obj->size / PAGE_SIZE; page++) { 157 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
158 int i; 158 int i;
159 159
160 backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); 160 backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
161 161
162 if (backing_map == NULL) { 162 if (backing_map == NULL) {
163 DRM_ERROR("failed to map backing page\n"); 163 DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 986f1082c596..28146e405e87 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -104,7 +104,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
104 if (obj->name) 104 if (obj->name)
105 seq_printf(m, " (name: %d)", obj->name); 105 seq_printf(m, " (name: %d)", obj->name);
106 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 106 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
107 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); 107 seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg);
108 seq_printf(m, "\n"); 108 seq_printf(m, "\n");
109 } 109 }
110 110
@@ -318,7 +318,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
318 seq_printf(m, "RingTail : %08x\n", tail); 318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask); 319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322 322
323 return 0; 323 return 0;
324} 324}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 9a05cadaa4ad..daeae62e1c28 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115 115
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
117#ifdef CONFIG_PNP
117 if (mchbar_addr && 118 if (mchbar_addr &&
118 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 119 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
119 ret = 0; 120 ret = 0;
120 goto out_put; 121 goto out_put;
121 } 122 }
123#endif
122 124
123 /* Get some space for it */ 125 /* Get some space for it */
124 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, 126 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
@@ -408,7 +410,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
408 if (stride & (stride - 1)) 410 if (stride & (stride - 1))
409 return false; 411 return false;
410 412
411 /* We don't handle the aperture area covered by the fence being bigger 413 /* We don't 0handle the aperture area covered by the fence being bigger
412 * than the object size. 414 * than the object size.
413 */ 415 */
414 if (i915_get_fence_size(dev, size) != size) 416 if (i915_get_fence_size(dev, size) != size)
@@ -417,6 +419,33 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
417 return true; 419 return true;
418} 420}
419 421
422static bool
423i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
424{
425 struct drm_device *dev = obj->dev;
426 struct drm_i915_gem_object *obj_priv = obj->driver_private;
427
428 if (obj_priv->gtt_space == NULL)
429 return true;
430
431 if (tiling_mode == I915_TILING_NONE)
432 return true;
433
434 if (!IS_I965G(dev)) {
435 if (obj_priv->gtt_offset & (obj->size - 1))
436 return false;
437 if (IS_I9XX(dev)) {
438 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
439 return false;
440 } else {
441 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
442 return false;
443 }
444 }
445
446 return true;
447}
448
420/** 449/**
421 * Sets the tiling mode of an object, returning the required swizzling of 450 * Sets the tiling mode of an object, returning the required swizzling of
422 * bit 6 of addresses in the object. 451 * bit 6 of addresses in the object.
@@ -429,6 +458,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
429 drm_i915_private_t *dev_priv = dev->dev_private; 458 drm_i915_private_t *dev_priv = dev->dev_private;
430 struct drm_gem_object *obj; 459 struct drm_gem_object *obj;
431 struct drm_i915_gem_object *obj_priv; 460 struct drm_i915_gem_object *obj_priv;
461 int ret = 0;
432 462
433 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 463 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
434 if (obj == NULL) 464 if (obj == NULL)
@@ -436,14 +466,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
436 obj_priv = obj->driver_private; 466 obj_priv = obj->driver_private;
437 467
438 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 468 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
469 mutex_lock(&dev->struct_mutex);
439 drm_gem_object_unreference(obj); 470 drm_gem_object_unreference(obj);
471 mutex_unlock(&dev->struct_mutex);
440 return -EINVAL; 472 return -EINVAL;
441 } 473 }
442 474
443 mutex_lock(&dev->struct_mutex);
444
445 if (args->tiling_mode == I915_TILING_NONE) { 475 if (args->tiling_mode == I915_TILING_NONE) {
446 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 476 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
477 args->stride = 0;
447 } else { 478 } else {
448 if (args->tiling_mode == I915_TILING_X) 479 if (args->tiling_mode == I915_TILING_X)
449 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 480 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -466,32 +497,38 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
466 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 497 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
467 args->tiling_mode = I915_TILING_NONE; 498 args->tiling_mode = I915_TILING_NONE;
468 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 499 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
500 args->stride = 0;
469 } 501 }
470 } 502 }
471 if (args->tiling_mode != obj_priv->tiling_mode) {
472 int ret;
473 503
474 /* Unbind the object, as switching tiling means we're 504 mutex_lock(&dev->struct_mutex);
475 * switching the cache organization due to fencing, probably. 505 if (args->tiling_mode != obj_priv->tiling_mode ||
506 args->stride != obj_priv->stride) {
507 /* We need to rebind the object if its current allocation
508 * no longer meets the alignment restrictions for its new
509 * tiling mode. Otherwise we can just leave it alone, but
510 * need to ensure that any fence register is cleared.
476 */ 511 */
477 ret = i915_gem_object_unbind(obj); 512 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
513 ret = i915_gem_object_unbind(obj);
514 else
515 ret = i915_gem_object_put_fence_reg(obj);
478 if (ret != 0) { 516 if (ret != 0) {
479 WARN(ret != -ERESTARTSYS, 517 WARN(ret != -ERESTARTSYS,
480 "failed to unbind object for tiling switch"); 518 "failed to reset object for tiling switch");
481 args->tiling_mode = obj_priv->tiling_mode; 519 args->tiling_mode = obj_priv->tiling_mode;
482 mutex_unlock(&dev->struct_mutex); 520 args->stride = obj_priv->stride;
483 drm_gem_object_unreference(obj); 521 goto err;
484
485 return ret;
486 } 522 }
523
487 obj_priv->tiling_mode = args->tiling_mode; 524 obj_priv->tiling_mode = args->tiling_mode;
525 obj_priv->stride = args->stride;
488 } 526 }
489 obj_priv->stride = args->stride; 527err:
490
491 drm_gem_object_unreference(obj); 528 drm_gem_object_unreference(obj);
492 mutex_unlock(&dev->struct_mutex); 529 mutex_unlock(&dev->struct_mutex);
493 530
494 return 0; 531 return ret;
495} 532}
496 533
497/** 534/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7130c6..228546f6eaa4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,7 +232,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
232 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 232 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
233 hotplug_work); 233 hotplug_work);
234 struct drm_device *dev = dev_priv->dev; 234 struct drm_device *dev = dev_priv->dev;
235 235 struct drm_mode_config *mode_config = &dev->mode_config;
236 struct drm_connector *connector;
237
238 if (mode_config->num_connector) {
239 list_for_each_entry(connector, &mode_config->connector_list, head) {
240 struct intel_output *intel_output = to_intel_output(connector);
241
242 if (intel_output->hot_plug)
243 (*intel_output->hot_plug) (intel_output);
244 }
245 }
236 /* Just fire off a uevent and let userspace tell us what to do */ 246 /* Just fire off a uevent and let userspace tell us what to do */
237 drm_sysfs_hotplug_event(dev); 247 drm_sysfs_hotplug_event(dev);
238} 248}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 96e271986d2a..83b7b81bb2b8 100644
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -94,8 +94,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
94{ 94{
95 /* Maybe cut off the start of an existing block */ 95 /* Maybe cut off the start of an existing block */
96 if (start > p->start) { 96 if (start > p->start) {
97 struct mem_block *newblock = 97 struct mem_block *newblock = kmalloc(sizeof(*newblock),
98 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 98 GFP_KERNEL);
99 if (!newblock) 99 if (!newblock)
100 goto out; 100 goto out;
101 newblock->start = start; 101 newblock->start = start;
@@ -111,8 +111,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
111 111
112 /* Maybe cut off the end of an existing block */ 112 /* Maybe cut off the end of an existing block */
113 if (size < p->size) { 113 if (size < p->size) {
114 struct mem_block *newblock = 114 struct mem_block *newblock = kmalloc(sizeof(*newblock),
115 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 115 GFP_KERNEL);
116 if (!newblock) 116 if (!newblock)
117 goto out; 117 goto out;
118 newblock->start = start + size; 118 newblock->start = start + size;
@@ -169,7 +169,7 @@ static void free_block(struct mem_block *p)
169 p->size += q->size; 169 p->size += q->size;
170 p->next = q->next; 170 p->next = q->next;
171 p->next->prev = p; 171 p->next->prev = p;
172 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 172 kfree(q);
173 } 173 }
174 174
175 if (p->prev->file_priv == NULL) { 175 if (p->prev->file_priv == NULL) {
@@ -177,7 +177,7 @@ static void free_block(struct mem_block *p)
177 q->size += p->size; 177 q->size += p->size;
178 q->next = p->next; 178 q->next = p->next;
179 q->next->prev = q; 179 q->next->prev = q;
180 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); 180 kfree(p);
181 } 181 }
182} 182}
183 183
@@ -185,14 +185,14 @@ static void free_block(struct mem_block *p)
185 */ 185 */
186static int init_heap(struct mem_block **heap, int start, int size) 186static int init_heap(struct mem_block **heap, int start, int size)
187{ 187{
188 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); 188 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
189 189
190 if (!blocks) 190 if (!blocks)
191 return -ENOMEM; 191 return -ENOMEM;
192 192
193 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); 193 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
194 if (!*heap) { 194 if (!*heap) {
195 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); 195 kfree(blocks);
196 return -ENOMEM; 196 return -ENOMEM;
197 } 197 }
198 198
@@ -233,7 +233,7 @@ void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
233 p->size += q->size; 233 p->size += q->size;
234 p->next = q->next; 234 p->next = q->next;
235 p->next->prev = p; 235 p->next->prev = p;
236 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 236 kfree(q);
237 } 237 }
238 } 238 }
239} 239}
@@ -250,10 +250,10 @@ void i915_mem_takedown(struct mem_block **heap)
250 for (p = (*heap)->next; p != *heap;) { 250 for (p = (*heap)->next; p != *heap;) {
251 struct mem_block *q = p; 251 struct mem_block *q = p;
252 p = p->next; 252 p = p->next;
253 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 253 kfree(q);
254 } 254 }
255 255
256 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); 256 kfree(*heap);
257 *heap = NULL; 257 *heap = NULL;
258} 258}
259 259
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index dc425e74a268..e4b4e8898e39 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -419,7 +419,7 @@ void intel_opregion_free(struct drm_device *dev, int suspend)
419 return; 419 return;
420 420
421 if (!suspend) 421 if (!suspend)
422 acpi_video_exit(); 422 acpi_video_unregister();
423 423
424 opregion->acpi->drdy = 0; 424 opregion->acpi->drdy = 0;
425 425
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0b1133..88bf7521405f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -569,6 +569,19 @@
569#define C0DRB3 0x10206 569#define C0DRB3 0x10206
570#define C1DRB3 0x10606 570#define C1DRB3 0x10606
571 571
572/* Clocking configuration register */
573#define CLKCFG 0x10c00
574#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
575#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
576#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
577#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
578#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
579#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
580/* this is a guess, could be 5 as well */
581#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
582#define CLKCFG_FSB_1600_ALT (5 << 0) /* hrawclk 400 */
583#define CLKCFG_FSB_MASK (7 << 0)
584
572/** GM965 GM45 render standby register */ 585/** GM965 GM45 render standby register */
573#define MCHBAR_RENDER_STANDBY 0x111B8 586#define MCHBAR_RENDER_STANDBY 0x111B8
574 587
@@ -834,9 +847,25 @@
834#define HORIZ_INTERP_MASK (3 << 6) 847#define HORIZ_INTERP_MASK (3 << 6)
835#define HORIZ_AUTO_SCALE (1 << 5) 848#define HORIZ_AUTO_SCALE (1 << 5)
836#define PANEL_8TO6_DITHER_ENABLE (1 << 3) 849#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
850#define PFIT_FILTER_FUZZY (0 << 24)
851#define PFIT_SCALING_AUTO (0 << 26)
852#define PFIT_SCALING_PROGRAMMED (1 << 26)
853#define PFIT_SCALING_PILLAR (2 << 26)
854#define PFIT_SCALING_LETTER (3 << 26)
837#define PFIT_PGM_RATIOS 0x61234 855#define PFIT_PGM_RATIOS 0x61234
838#define PFIT_VERT_SCALE_MASK 0xfff00000 856#define PFIT_VERT_SCALE_MASK 0xfff00000
839#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 857#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
858/* Pre-965 */
859#define PFIT_VERT_SCALE_SHIFT 20
860#define PFIT_VERT_SCALE_MASK 0xfff00000
861#define PFIT_HORIZ_SCALE_SHIFT 4
862#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
863/* 965+ */
864#define PFIT_VERT_SCALE_SHIFT_965 16
865#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
866#define PFIT_HORIZ_SCALE_SHIFT_965 0
867#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
868
840#define PFIT_AUTO_RATIOS 0x61238 869#define PFIT_AUTO_RATIOS 0x61238
841 870
842/* Backlight control */ 871/* Backlight control */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e2831ed31..8d8e083d14ab 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -322,6 +322,20 @@ int i915_save_state(struct drm_device *dev)
322 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 322 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
323 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 323 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
324 324
325 /* Display Port state */
326 if (SUPPORTS_INTEGRATED_DP(dev)) {
327 dev_priv->saveDP_B = I915_READ(DP_B);
328 dev_priv->saveDP_C = I915_READ(DP_C);
329 dev_priv->saveDP_D = I915_READ(DP_D);
330 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
331 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
332 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
333 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
334 dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
335 dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
336 dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
337 dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
338 }
325 /* FIXME: save TV & SDVO state */ 339 /* FIXME: save TV & SDVO state */
326 340
327 /* FBC state */ 341 /* FBC state */
@@ -404,7 +418,19 @@ int i915_restore_state(struct drm_device *dev)
404 for (i = 0; i < 8; i++) 418 for (i = 0; i < 8; i++)
405 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 419 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
406 } 420 }
407 421
422 /* Display port ratios (must be done before clock is set) */
423 if (SUPPORTS_INTEGRATED_DP(dev)) {
424 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
425 I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
426 I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
427 I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
428 I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
429 I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
430 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
431 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
432 }
433
408 /* Pipe & plane A info */ 434 /* Pipe & plane A info */
409 /* Prime the clock */ 435 /* Prime the clock */
410 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 436 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -518,6 +544,12 @@ int i915_restore_state(struct drm_device *dev)
518 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 544 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
519 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 545 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
520 546
547 /* Display Port state */
548 if (SUPPORTS_INTEGRATED_DP(dev)) {
549 I915_WRITE(DP_B, dev_priv->saveDP_B);
550 I915_WRITE(DP_C, dev_priv->saveDP_C);
551 I915_WRITE(DP_D, dev_priv->saveDP_D);
552 }
521 /* FIXME: restore TV & SDVO state */ 553 /* FIXME: restore TV & SDVO state */
522 554
523 /* FBC info */ 555 /* FBC info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 754dd22fdd77..716409a57244 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
99{ 99{
100 struct bdb_lvds_options *lvds_options; 100 struct bdb_lvds_options *lvds_options;
101 struct bdb_lvds_lfp_data *lvds_lfp_data; 101 struct bdb_lvds_lfp_data *lvds_lfp_data;
102 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
102 struct bdb_lvds_lfp_data_entry *entry; 103 struct bdb_lvds_lfp_data_entry *entry;
103 struct lvds_dvo_timing *dvo_timing; 104 struct lvds_dvo_timing *dvo_timing;
104 struct drm_display_mode *panel_fixed_mode; 105 struct drm_display_mode *panel_fixed_mode;
106 int lfp_data_size;
105 107
106 /* Defaults if we can't find VBT info */ 108 /* Defaults if we can't find VBT info */
107 dev_priv->lvds_dither = 0; 109 dev_priv->lvds_dither = 0;
@@ -119,13 +121,20 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
119 if (!lvds_lfp_data) 121 if (!lvds_lfp_data)
120 return; 122 return;
121 123
124 lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
125 if (!lvds_lfp_data_ptrs)
126 return;
127
122 dev_priv->lvds_vbt = 1; 128 dev_priv->lvds_vbt = 1;
123 129
124 entry = &lvds_lfp_data->data[lvds_options->panel_type]; 130 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
131 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
132 entry = (struct bdb_lvds_lfp_data_entry *)
133 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
134 lvds_options->panel_type));
125 dvo_timing = &entry->dvo_timing; 135 dvo_timing = &entry->dvo_timing;
126 136
127 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 137 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
128 DRM_MEM_DRIVER);
129 138
130 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 139 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
131 140
@@ -156,8 +165,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
156 if (!dvo_timing) 165 if (!dvo_timing)
157 return; 166 return;
158 167
159 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 168 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
160 DRM_MEM_DRIVER);
161 169
162 if (!panel_fixed_mode) 170 if (!panel_fixed_mode)
163 return; 171 return;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 028f5b66e3d8..73e7b9cecac8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
29#include "intel_drv.h" 29#include "intel_drv.h"
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "intel_dp.h"
32 33
33#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
34 35
@@ -127,19 +128,6 @@ struct intel_limit {
127#define I9XX_P2_LVDS_FAST 7 128#define I9XX_P2_LVDS_FAST 7
128#define I9XX_P2_LVDS_SLOW_LIMIT 112000 129#define I9XX_P2_LVDS_SLOW_LIMIT 112000
129 130
130#define INTEL_LIMIT_I8XX_DVO_DAC 0
131#define INTEL_LIMIT_I8XX_LVDS 1
132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
141#define INTEL_LIMIT_IGDNG_LVDS 11
142
143/*The parameter is for SDVO on G4x platform*/ 131/*The parameter is for SDVO on G4x platform*/
144#define G4X_DOT_SDVO_MIN 25000 132#define G4X_DOT_SDVO_MIN 25000
145#define G4X_DOT_SDVO_MAX 270000 133#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +206,25 @@ struct intel_limit {
218#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 206#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
219#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 207#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
220 208
209/*The parameter is for DISPLAY PORT on G4x platform*/
210#define G4X_DOT_DISPLAY_PORT_MIN 161670
211#define G4X_DOT_DISPLAY_PORT_MAX 227000
212#define G4X_N_DISPLAY_PORT_MIN 1
213#define G4X_N_DISPLAY_PORT_MAX 2
214#define G4X_M_DISPLAY_PORT_MIN 97
215#define G4X_M_DISPLAY_PORT_MAX 108
216#define G4X_M1_DISPLAY_PORT_MIN 0x10
217#define G4X_M1_DISPLAY_PORT_MAX 0x12
218#define G4X_M2_DISPLAY_PORT_MIN 0x05
219#define G4X_M2_DISPLAY_PORT_MAX 0x06
220#define G4X_P_DISPLAY_PORT_MIN 10
221#define G4X_P_DISPLAY_PORT_MAX 20
222#define G4X_P1_DISPLAY_PORT_MIN 1
223#define G4X_P1_DISPLAY_PORT_MAX 2
224#define G4X_P2_DISPLAY_PORT_SLOW 10
225#define G4X_P2_DISPLAY_PORT_FAST 10
226#define G4X_P2_DISPLAY_PORT_LIMIT 0
227
221/* IGDNG */ 228/* IGDNG */
222/* as we calculate clock using (register_value + 2) for 229/* as we calculate clock using (register_value + 2) for
223 N/M1/M2, so here the range value for them is (actual_value-2). 230 N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +263,11 @@ static bool
256intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 263intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 int target, int refclk, intel_clock_t *best_clock); 264 int target, int refclk, intel_clock_t *best_clock);
258 265
259static const intel_limit_t intel_limits[] = { 266static bool
260 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 267intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
268 int target, int refclk, intel_clock_t *best_clock);
269
270static const intel_limit_t intel_limits_i8xx_dvo = {
261 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 271 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
262 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 272 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
263 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 273 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +279,9 @@ static const intel_limit_t intel_limits[] = {
269 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 279 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
270 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 280 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
271 .find_pll = intel_find_best_PLL, 281 .find_pll = intel_find_best_PLL,
272 }, 282};
273 { /* INTEL_LIMIT_I8XX_LVDS */ 283
284static const intel_limit_t intel_limits_i8xx_lvds = {
274 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 285 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
275 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 286 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
276 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 287 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +293,9 @@ static const intel_limit_t intel_limits[] = {
282 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 293 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
283 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 294 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
284 .find_pll = intel_find_best_PLL, 295 .find_pll = intel_find_best_PLL,
285 }, 296};
286 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 297
298static const intel_limit_t intel_limits_i9xx_sdvo = {
287 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 299 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
288 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 300 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
289 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 301 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +307,9 @@ static const intel_limit_t intel_limits[] = {
295 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 307 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
296 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 308 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
297 .find_pll = intel_find_best_PLL, 309 .find_pll = intel_find_best_PLL,
298 }, 310};
299 { /* INTEL_LIMIT_I9XX_LVDS */ 311
312static const intel_limit_t intel_limits_i9xx_lvds = {
300 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 313 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
301 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 314 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
302 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 315 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +324,10 @@ static const intel_limit_t intel_limits[] = {
311 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 324 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
312 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 325 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
313 .find_pll = intel_find_best_PLL, 326 .find_pll = intel_find_best_PLL,
314 }, 327};
328
315 /* below parameter and function is for G4X Chipset Family*/ 329 /* below parameter and function is for G4X Chipset Family*/
316 { /* INTEL_LIMIT_G4X_SDVO */ 330static const intel_limit_t intel_limits_g4x_sdvo = {
317 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 331 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
318 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 332 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
319 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 333 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +341,9 @@ static const intel_limit_t intel_limits[] = {
327 .p2_fast = G4X_P2_SDVO_FAST 341 .p2_fast = G4X_P2_SDVO_FAST
328 }, 342 },
329 .find_pll = intel_g4x_find_best_PLL, 343 .find_pll = intel_g4x_find_best_PLL,
330 }, 344};
331 { /* INTEL_LIMIT_G4X_HDMI_DAC */ 345
346static const intel_limit_t intel_limits_g4x_hdmi = {
332 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 347 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
333 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 348 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
334 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 349 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +357,9 @@ static const intel_limit_t intel_limits[] = {
342 .p2_fast = G4X_P2_HDMI_DAC_FAST 357 .p2_fast = G4X_P2_HDMI_DAC_FAST
343 }, 358 },
344 .find_pll = intel_g4x_find_best_PLL, 359 .find_pll = intel_g4x_find_best_PLL,
345 }, 360};
346 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ 361
362static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
347 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 363 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
348 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 364 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
349 .vco = { .min = G4X_VCO_MIN, 365 .vco = { .min = G4X_VCO_MIN,
@@ -365,8 +381,9 @@ static const intel_limit_t intel_limits[] = {
365 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 381 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
366 }, 382 },
367 .find_pll = intel_g4x_find_best_PLL, 383 .find_pll = intel_g4x_find_best_PLL,
368 }, 384};
369 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ 385
386static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
370 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 387 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
371 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 388 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
372 .vco = { .min = G4X_VCO_MIN, 389 .vco = { .min = G4X_VCO_MIN,
@@ -388,8 +405,32 @@ static const intel_limit_t intel_limits[] = {
388 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 405 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
389 }, 406 },
390 .find_pll = intel_g4x_find_best_PLL, 407 .find_pll = intel_g4x_find_best_PLL,
391 }, 408};
392 { /* INTEL_LIMIT_IGD_SDVO */ 409
410static const intel_limit_t intel_limits_g4x_display_port = {
411 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
412 .max = G4X_DOT_DISPLAY_PORT_MAX },
413 .vco = { .min = G4X_VCO_MIN,
414 .max = G4X_VCO_MAX},
415 .n = { .min = G4X_N_DISPLAY_PORT_MIN,
416 .max = G4X_N_DISPLAY_PORT_MAX },
417 .m = { .min = G4X_M_DISPLAY_PORT_MIN,
418 .max = G4X_M_DISPLAY_PORT_MAX },
419 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
420 .max = G4X_M1_DISPLAY_PORT_MAX },
421 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
422 .max = G4X_M2_DISPLAY_PORT_MAX },
423 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
424 .max = G4X_P_DISPLAY_PORT_MAX },
425 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
426 .max = G4X_P1_DISPLAY_PORT_MAX},
427 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
428 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
429 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
430 .find_pll = intel_find_pll_g4x_dp,
431};
432
433static const intel_limit_t intel_limits_igd_sdvo = {
393 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 434 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
394 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 435 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
395 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 436 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +442,9 @@ static const intel_limit_t intel_limits[] = {
401 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 442 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
402 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 443 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
403 .find_pll = intel_find_best_PLL, 444 .find_pll = intel_find_best_PLL,
404 }, 445};
405 { /* INTEL_LIMIT_IGD_LVDS */ 446
447static const intel_limit_t intel_limits_igd_lvds = {
406 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 448 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
407 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 449 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
408 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 450 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +457,9 @@ static const intel_limit_t intel_limits[] = {
415 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 457 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
416 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 458 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
417 .find_pll = intel_find_best_PLL, 459 .find_pll = intel_find_best_PLL,
418 }, 460};
419 { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ 461
462static const intel_limit_t intel_limits_igdng_sdvo = {
420 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 463 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
421 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 464 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
422 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 465 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +472,9 @@ static const intel_limit_t intel_limits[] = {
429 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, 472 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
430 .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, 473 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
431 .find_pll = intel_igdng_find_best_PLL, 474 .find_pll = intel_igdng_find_best_PLL,
432 }, 475};
433 { /* INTEL_LIMIT_IGDNG_LVDS */ 476
477static const intel_limit_t intel_limits_igdng_lvds = {
434 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 478 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
435 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 479 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
436 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 480 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +487,15 @@ static const intel_limit_t intel_limits[] = {
443 .p2_slow = IGDNG_P2_LVDS_SLOW, 487 .p2_slow = IGDNG_P2_LVDS_SLOW,
444 .p2_fast = IGDNG_P2_LVDS_FAST }, 488 .p2_fast = IGDNG_P2_LVDS_FAST },
445 .find_pll = intel_igdng_find_best_PLL, 489 .find_pll = intel_igdng_find_best_PLL,
446 },
447}; 490};
448 491
449static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) 492static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
450{ 493{
451 const intel_limit_t *limit; 494 const intel_limit_t *limit;
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 495 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453 limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; 496 limit = &intel_limits_igdng_lvds;
454 else 497 else
455 limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; 498 limit = &intel_limits_igdng_sdvo;
456 499
457 return limit; 500 return limit;
458} 501}
@@ -467,19 +510,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
467 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 510 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
468 LVDS_CLKB_POWER_UP) 511 LVDS_CLKB_POWER_UP)
469 /* LVDS with dual channel */ 512 /* LVDS with dual channel */
470 limit = &intel_limits 513 limit = &intel_limits_g4x_dual_channel_lvds;
471 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
472 else 514 else
473 /* LVDS with dual channel */ 515 /* LVDS with dual channel */
474 limit = &intel_limits 516 limit = &intel_limits_g4x_single_channel_lvds;
475 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
476 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 517 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
477 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 518 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
478 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; 519 limit = &intel_limits_g4x_hdmi;
479 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 520 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
480 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; 521 limit = &intel_limits_g4x_sdvo;
522 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
523 limit = &intel_limits_g4x_display_port;
481 } else /* The option is for other outputs */ 524 } else /* The option is for other outputs */
482 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 525 limit = &intel_limits_i9xx_sdvo;
483 526
484 return limit; 527 return limit;
485} 528}
@@ -495,19 +538,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
495 limit = intel_g4x_limit(crtc); 538 limit = intel_g4x_limit(crtc);
496 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 539 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 540 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
498 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 541 limit = &intel_limits_i9xx_lvds;
499 else 542 else
500 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 543 limit = &intel_limits_i9xx_sdvo;
501 } else if (IS_IGD(dev)) { 544 } else if (IS_IGD(dev)) {
502 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 545 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
503 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; 546 limit = &intel_limits_igd_lvds;
504 else 547 else
505 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; 548 limit = &intel_limits_igd_sdvo;
506 } else { 549 } else {
507 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 550 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
508 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 551 limit = &intel_limits_i8xx_lvds;
509 else 552 else
510 limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; 553 limit = &intel_limits_i8xx_dvo;
511 } 554 }
512 return limit; 555 return limit;
513} 556}
@@ -764,6 +807,35 @@ out:
764 return found; 807 return found;
765} 808}
766 809
810/* DisplayPort has only two frequencies, 162MHz and 270MHz */
811static bool
812intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
813 int target, int refclk, intel_clock_t *best_clock)
814{
815 intel_clock_t clock;
816 if (target < 200000) {
817 clock.dot = 161670;
818 clock.p = 20;
819 clock.p1 = 2;
820 clock.p2 = 10;
821 clock.n = 0x01;
822 clock.m = 97;
823 clock.m1 = 0x10;
824 clock.m2 = 0x05;
825 } else {
826 clock.dot = 270000;
827 clock.p = 10;
828 clock.p1 = 1;
829 clock.p2 = 10;
830 clock.n = 0x02;
831 clock.m = 108;
832 clock.m1 = 0x12;
833 clock.m2 = 0x06;
834 }
835 memcpy(best_clock, &clock, sizeof(intel_clock_t));
836 return true;
837}
838
767void 839void
768intel_wait_for_vblank(struct drm_device *dev) 840intel_wait_for_vblank(struct drm_device *dev)
769{ 841{
@@ -828,19 +900,31 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
828 } 900 }
829 901
830 mutex_lock(&dev->struct_mutex); 902 mutex_lock(&dev->struct_mutex);
831 ret = i915_gem_object_pin(intel_fb->obj, alignment); 903 ret = i915_gem_object_pin(obj, alignment);
832 if (ret != 0) { 904 if (ret != 0) {
833 mutex_unlock(&dev->struct_mutex); 905 mutex_unlock(&dev->struct_mutex);
834 return ret; 906 return ret;
835 } 907 }
836 908
837 ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 909 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
838 if (ret != 0) { 910 if (ret != 0) {
839 i915_gem_object_unpin(intel_fb->obj); 911 i915_gem_object_unpin(obj);
840 mutex_unlock(&dev->struct_mutex); 912 mutex_unlock(&dev->struct_mutex);
841 return ret; 913 return ret;
842 } 914 }
843 915
916 /* Pre-i965 needs to install a fence for tiled scan-out */
917 if (!IS_I965G(dev) &&
918 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
919 obj_priv->tiling_mode != I915_TILING_NONE) {
920 ret = i915_gem_object_get_fence_reg(obj);
921 if (ret != 0) {
922 i915_gem_object_unpin(obj);
923 mutex_unlock(&dev->struct_mutex);
924 return ret;
925 }
926 }
927
844 dspcntr = I915_READ(dspcntr_reg); 928 dspcntr = I915_READ(dspcntr_reg);
845 /* Mask out pixel format bits in case we change it */ 929 /* Mask out pixel format bits in case we change it */
846 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 930 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -860,7 +944,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
860 break; 944 break;
861 default: 945 default:
862 DRM_ERROR("Unknown color depth\n"); 946 DRM_ERROR("Unknown color depth\n");
863 i915_gem_object_unpin(intel_fb->obj); 947 i915_gem_object_unpin(obj);
864 mutex_unlock(&dev->struct_mutex); 948 mutex_unlock(&dev->struct_mutex);
865 return -EINVAL; 949 return -EINVAL;
866 } 950 }
@@ -1529,7 +1613,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1529 intel_clock_t clock; 1613 intel_clock_t clock;
1530 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 1614 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
1531 bool ok, is_sdvo = false, is_dvo = false; 1615 bool ok, is_sdvo = false, is_dvo = false;
1532 bool is_crt = false, is_lvds = false, is_tv = false; 1616 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
1533 struct drm_mode_config *mode_config = &dev->mode_config; 1617 struct drm_mode_config *mode_config = &dev->mode_config;
1534 struct drm_connector *connector; 1618 struct drm_connector *connector;
1535 const intel_limit_t *limit; 1619 const intel_limit_t *limit;
@@ -1573,6 +1657,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1573 case INTEL_OUTPUT_ANALOG: 1657 case INTEL_OUTPUT_ANALOG:
1574 is_crt = true; 1658 is_crt = true;
1575 break; 1659 break;
1660 case INTEL_OUTPUT_DISPLAYPORT:
1661 is_dp = true;
1662 break;
1576 } 1663 }
1577 1664
1578 num_outputs++; 1665 num_outputs++;
@@ -1588,6 +1675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1588 } else { 1675 } else {
1589 refclk = 48000; 1676 refclk = 48000;
1590 } 1677 }
1678
1591 1679
1592 /* 1680 /*
1593 * Returns a set of divisors for the desired target clock with the given 1681 * Returns a set of divisors for the desired target clock with the given
@@ -1650,6 +1738,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1650 else if (IS_IGDNG(dev)) 1738 else if (IS_IGDNG(dev))
1651 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 1739 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1652 } 1740 }
1741 if (is_dp)
1742 dpll |= DPLL_DVO_HIGH_SPEED;
1653 1743
1654 /* compute bitmask from p1 value */ 1744 /* compute bitmask from p1 value */
1655 if (IS_IGD(dev)) 1745 if (IS_IGD(dev))
@@ -1797,6 +1887,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1797 I915_WRITE(lvds_reg, lvds); 1887 I915_WRITE(lvds_reg, lvds);
1798 I915_READ(lvds_reg); 1888 I915_READ(lvds_reg);
1799 } 1889 }
1890 if (is_dp)
1891 intel_dp_set_m_n(crtc, mode, adjusted_mode);
1800 1892
1801 I915_WRITE(fp_reg, fp); 1893 I915_WRITE(fp_reg, fp);
1802 I915_WRITE(dpll_reg, dpll); 1894 I915_WRITE(dpll_reg, dpll);
@@ -2463,6 +2555,8 @@ static void intel_setup_outputs(struct drm_device *dev)
2463 found = intel_sdvo_init(dev, SDVOB); 2555 found = intel_sdvo_init(dev, SDVOB);
2464 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 2556 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2465 intel_hdmi_init(dev, SDVOB); 2557 intel_hdmi_init(dev, SDVOB);
2558 if (!found && SUPPORTS_INTEGRATED_DP(dev))
2559 intel_dp_init(dev, DP_B);
2466 } 2560 }
2467 2561
2468 /* Before G4X SDVOC doesn't have its own detect register */ 2562 /* Before G4X SDVOC doesn't have its own detect register */
@@ -2475,7 +2569,11 @@ static void intel_setup_outputs(struct drm_device *dev)
2475 found = intel_sdvo_init(dev, SDVOC); 2569 found = intel_sdvo_init(dev, SDVOC);
2476 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 2570 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2477 intel_hdmi_init(dev, SDVOC); 2571 intel_hdmi_init(dev, SDVOC);
2572 if (!found && SUPPORTS_INTEGRATED_DP(dev))
2573 intel_dp_init(dev, DP_C);
2478 } 2574 }
2575 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
2576 intel_dp_init(dev, DP_D);
2479 } else 2577 } else
2480 intel_dvo_init(dev); 2578 intel_dvo_init(dev);
2481 2579
@@ -2518,6 +2616,11 @@ static void intel_setup_outputs(struct drm_device *dev)
2518 (1 << 1)); 2616 (1 << 1));
2519 clone_mask = (1 << INTEL_OUTPUT_TVOUT); 2617 clone_mask = (1 << INTEL_OUTPUT_TVOUT);
2520 break; 2618 break;
2619 case INTEL_OUTPUT_DISPLAYPORT:
2620 crtc_mask = ((1 << 0) |
2621 (1 << 1));
2622 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
2623 break;
2521 } 2624 }
2522 encoder->possible_crtcs = crtc_mask; 2625 encoder->possible_crtcs = crtc_mask;
2523 encoder->possible_clones = intel_connector_clones(dev, clone_mask); 2626 encoder->possible_clones = intel_connector_clones(dev, clone_mask);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 000000000000..8f8d37d5663a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1153 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include "drmP.h"
30#include "drm.h"
31#include "drm_crtc.h"
32#include "drm_crtc_helper.h"
33#include "intel_drv.h"
34#include "i915_drm.h"
35#include "i915_drv.h"
36#include "intel_dp.h"
37
38#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41#define DP_LINK_CONFIGURATION_SIZE 9
42
43struct intel_dp_priv {
44 uint32_t output_reg;
45 uint32_t DP;
46 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
47 uint32_t save_DP;
48 uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
49 bool has_audio;
50 int dpms_mode;
51 uint8_t link_bw;
52 uint8_t lane_count;
53 uint8_t dpcd[4];
54 struct intel_output *intel_output;
55 struct i2c_adapter adapter;
56 struct i2c_algo_dp_aux_data algo;
57};
58
59static void
60intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
61 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
62
63static void
64intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
65
66static int
67intel_dp_max_lane_count(struct intel_output *intel_output)
68{
69 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
70 int max_lane_count = 4;
71
72 if (dp_priv->dpcd[0] >= 0x11) {
73 max_lane_count = dp_priv->dpcd[2] & 0x1f;
74 switch (max_lane_count) {
75 case 1: case 2: case 4:
76 break;
77 default:
78 max_lane_count = 4;
79 }
80 }
81 return max_lane_count;
82}
83
84static int
85intel_dp_max_link_bw(struct intel_output *intel_output)
86{
87 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
88 int max_link_bw = dp_priv->dpcd[1];
89
90 switch (max_link_bw) {
91 case DP_LINK_BW_1_62:
92 case DP_LINK_BW_2_7:
93 break;
94 default:
95 max_link_bw = DP_LINK_BW_1_62;
96 break;
97 }
98 return max_link_bw;
99}
100
101static int
102intel_dp_link_clock(uint8_t link_bw)
103{
104 if (link_bw == DP_LINK_BW_2_7)
105 return 270000;
106 else
107 return 162000;
108}
109
110/* I think this is a fiction */
111static int
112intel_dp_link_required(int pixel_clock)
113{
114 return pixel_clock * 3;
115}
116
117static int
118intel_dp_mode_valid(struct drm_connector *connector,
119 struct drm_display_mode *mode)
120{
121 struct intel_output *intel_output = to_intel_output(connector);
122 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
123 int max_lanes = intel_dp_max_lane_count(intel_output);
124
125 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
126 return MODE_CLOCK_HIGH;
127
128 if (mode->clock < 10000)
129 return MODE_CLOCK_LOW;
130
131 return MODE_OK;
132}
133
134static uint32_t
135pack_aux(uint8_t *src, int src_bytes)
136{
137 int i;
138 uint32_t v = 0;
139
140 if (src_bytes > 4)
141 src_bytes = 4;
142 for (i = 0; i < src_bytes; i++)
143 v |= ((uint32_t) src[i]) << ((3-i) * 8);
144 return v;
145}
146
147static void
148unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
149{
150 int i;
151 if (dst_bytes > 4)
152 dst_bytes = 4;
153 for (i = 0; i < dst_bytes; i++)
154 dst[i] = src >> ((3-i) * 8);
155}
156
157/* hrawclock is 1/4 the FSB frequency */
158static int
159intel_hrawclk(struct drm_device *dev)
160{
161 struct drm_i915_private *dev_priv = dev->dev_private;
162 uint32_t clkcfg;
163
164 clkcfg = I915_READ(CLKCFG);
165 switch (clkcfg & CLKCFG_FSB_MASK) {
166 case CLKCFG_FSB_400:
167 return 100;
168 case CLKCFG_FSB_533:
169 return 133;
170 case CLKCFG_FSB_667:
171 return 166;
172 case CLKCFG_FSB_800:
173 return 200;
174 case CLKCFG_FSB_1067:
175 return 266;
176 case CLKCFG_FSB_1333:
177 return 333;
178 /* these two are just a guess; one of them might be right */
179 case CLKCFG_FSB_1600:
180 case CLKCFG_FSB_1600_ALT:
181 return 400;
182 default:
183 return 133;
184 }
185}
186
187static int
188intel_dp_aux_ch(struct intel_output *intel_output,
189 uint8_t *send, int send_bytes,
190 uint8_t *recv, int recv_size)
191{
192 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
193 uint32_t output_reg = dp_priv->output_reg;
194 struct drm_device *dev = intel_output->base.dev;
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t ch_ctl = output_reg + 0x10;
197 uint32_t ch_data = ch_ctl + 4;
198 int i;
199 int recv_bytes;
200 uint32_t ctl;
201 uint32_t status;
202 uint32_t aux_clock_divider;
203 int try;
204
205 /* The clock divider is based off the hrawclk,
206 * and would like to run at 2MHz. So, take the
207 * hrawclk value and divide by 2 and use that
208 */
209 aux_clock_divider = intel_hrawclk(dev) / 2;
210 /* Must try at least 3 times according to DP spec */
211 for (try = 0; try < 5; try++) {
212 /* Load the send data into the aux channel data registers */
213 for (i = 0; i < send_bytes; i += 4) {
214 uint32_t d = pack_aux(send + i, send_bytes - i);;
215
216 I915_WRITE(ch_data + i, d);
217 }
218
219 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
220 DP_AUX_CH_CTL_TIME_OUT_400us |
221 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
222 (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
223 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
224 DP_AUX_CH_CTL_DONE |
225 DP_AUX_CH_CTL_TIME_OUT_ERROR |
226 DP_AUX_CH_CTL_RECEIVE_ERROR);
227
228 /* Send the command and wait for it to complete */
229 I915_WRITE(ch_ctl, ctl);
230 (void) I915_READ(ch_ctl);
231 for (;;) {
232 udelay(100);
233 status = I915_READ(ch_ctl);
234 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
235 break;
236 }
237
238 /* Clear done status and any errors */
239 I915_WRITE(ch_ctl, (ctl |
240 DP_AUX_CH_CTL_DONE |
241 DP_AUX_CH_CTL_TIME_OUT_ERROR |
242 DP_AUX_CH_CTL_RECEIVE_ERROR));
243 (void) I915_READ(ch_ctl);
244 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
245 break;
246 }
247
248 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
249 printk(KERN_ERR "dp_aux_ch not done status 0x%08x\n", status);
250 return -EBUSY;
251 }
252
253 /* Check for timeout or receive error.
254 * Timeouts occur when the sink is not connected
255 */
256 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
257 printk(KERN_ERR "dp_aux_ch receive error status 0x%08x\n", status);
258 return -EIO;
259 }
260 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
261 printk(KERN_ERR "dp_aux_ch timeout status 0x%08x\n", status);
262 return -ETIMEDOUT;
263 }
264
265 /* Unload any bytes sent back from the other side */
266 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
267 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
268
269 if (recv_bytes > recv_size)
270 recv_bytes = recv_size;
271
272 for (i = 0; i < recv_bytes; i += 4) {
273 uint32_t d = I915_READ(ch_data + i);
274
275 unpack_aux(d, recv + i, recv_bytes - i);
276 }
277
278 return recv_bytes;
279}
280
281/* Write data to the aux channel in native mode */
282static int
283intel_dp_aux_native_write(struct intel_output *intel_output,
284 uint16_t address, uint8_t *send, int send_bytes)
285{
286 int ret;
287 uint8_t msg[20];
288 int msg_bytes;
289 uint8_t ack;
290
291 if (send_bytes > 16)
292 return -1;
293 msg[0] = AUX_NATIVE_WRITE << 4;
294 msg[1] = address >> 8;
295 msg[2] = address;
296 msg[3] = send_bytes - 1;
297 memcpy(&msg[4], send, send_bytes);
298 msg_bytes = send_bytes + 4;
299 for (;;) {
300 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
301 if (ret < 0)
302 return ret;
303 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
304 break;
305 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
306 udelay(100);
307 else
308 return -EIO;
309 }
310 return send_bytes;
311}
312
313/* Write a single byte to the aux channel in native mode */
314static int
315intel_dp_aux_native_write_1(struct intel_output *intel_output,
316 uint16_t address, uint8_t byte)
317{
318 return intel_dp_aux_native_write(intel_output, address, &byte, 1);
319}
320
321/* read bytes from a native aux channel */
322static int
323intel_dp_aux_native_read(struct intel_output *intel_output,
324 uint16_t address, uint8_t *recv, int recv_bytes)
325{
326 uint8_t msg[4];
327 int msg_bytes;
328 uint8_t reply[20];
329 int reply_bytes;
330 uint8_t ack;
331 int ret;
332
333 msg[0] = AUX_NATIVE_READ << 4;
334 msg[1] = address >> 8;
335 msg[2] = address & 0xff;
336 msg[3] = recv_bytes - 1;
337
338 msg_bytes = 4;
339 reply_bytes = recv_bytes + 1;
340
341 for (;;) {
342 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
343 reply, reply_bytes);
344 if (ret == 0)
345 return -EPROTO;
346 if (ret < 0)
347 return ret;
348 ack = reply[0];
349 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
350 memcpy(recv, reply + 1, ret - 1);
351 return ret - 1;
352 }
353 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
354 udelay(100);
355 else
356 return -EIO;
357 }
358}
359
360static int
361intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
362 uint8_t *send, int send_bytes,
363 uint8_t *recv, int recv_bytes)
364{
365 struct intel_dp_priv *dp_priv = container_of(adapter,
366 struct intel_dp_priv,
367 adapter);
368 struct intel_output *intel_output = dp_priv->intel_output;
369
370 return intel_dp_aux_ch(intel_output,
371 send, send_bytes, recv, recv_bytes);
372}
373
374static int
375intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
376{
377 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
378
379 DRM_ERROR("i2c_init %s\n", name);
380 dp_priv->algo.running = false;
381 dp_priv->algo.address = 0;
382 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
383
384 memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
385 dp_priv->adapter.owner = THIS_MODULE;
386 dp_priv->adapter.class = I2C_CLASS_DDC;
387 strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1);
388 dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0';
389 dp_priv->adapter.algo_data = &dp_priv->algo;
390 dp_priv->adapter.dev.parent = &intel_output->base.kdev;
391
392 return i2c_dp_aux_add_bus(&dp_priv->adapter);
393}
394
395static bool
396intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
397 struct drm_display_mode *adjusted_mode)
398{
399 struct intel_output *intel_output = enc_to_intel_output(encoder);
400 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
401 int lane_count, clock;
402 int max_lane_count = intel_dp_max_lane_count(intel_output);
403 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
404 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
405
406 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
407 for (clock = 0; clock <= max_clock; clock++) {
408 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
409
410 if (intel_dp_link_required(mode->clock) <= link_avail) {
411 dp_priv->link_bw = bws[clock];
412 dp_priv->lane_count = lane_count;
413 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
414 printk(KERN_ERR "link bw %02x lane count %d clock %d\n",
415 dp_priv->link_bw, dp_priv->lane_count,
416 adjusted_mode->clock);
417 return true;
418 }
419 }
420 }
421 return false;
422}
423
424struct intel_dp_m_n {
425 uint32_t tu;
426 uint32_t gmch_m;
427 uint32_t gmch_n;
428 uint32_t link_m;
429 uint32_t link_n;
430};
431
432static void
433intel_reduce_ratio(uint32_t *num, uint32_t *den)
434{
435 while (*num > 0xffffff || *den > 0xffffff) {
436 *num >>= 1;
437 *den >>= 1;
438 }
439}
440
441static void
442intel_dp_compute_m_n(int bytes_per_pixel,
443 int nlanes,
444 int pixel_clock,
445 int link_clock,
446 struct intel_dp_m_n *m_n)
447{
448 m_n->tu = 64;
449 m_n->gmch_m = pixel_clock * bytes_per_pixel;
450 m_n->gmch_n = link_clock * nlanes;
451 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
452 m_n->link_m = pixel_clock;
453 m_n->link_n = link_clock;
454 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
455}
456
457void
458intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
459 struct drm_display_mode *adjusted_mode)
460{
461 struct drm_device *dev = crtc->dev;
462 struct drm_mode_config *mode_config = &dev->mode_config;
463 struct drm_connector *connector;
464 struct drm_i915_private *dev_priv = dev->dev_private;
465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
466 int lane_count = 4;
467 struct intel_dp_m_n m_n;
468
469 /*
470 * Find the lane count in the intel_output private
471 */
472 list_for_each_entry(connector, &mode_config->connector_list, head) {
473 struct intel_output *intel_output = to_intel_output(connector);
474 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
475
476 if (!connector->encoder || connector->encoder->crtc != crtc)
477 continue;
478
479 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
480 lane_count = dp_priv->lane_count;
481 break;
482 }
483 }
484
485 /*
486 * Compute the GMCH and Link ratios. The '3' here is
487 * the number of bytes_per_pixel post-LUT, which we always
488 * set up for 8-bits of R/G/B, or 3 bytes total.
489 */
490 intel_dp_compute_m_n(3, lane_count,
491 mode->clock, adjusted_mode->clock, &m_n);
492
493 if (intel_crtc->pipe == 0) {
494 I915_WRITE(PIPEA_GMCH_DATA_M,
495 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
496 m_n.gmch_m);
497 I915_WRITE(PIPEA_GMCH_DATA_N,
498 m_n.gmch_n);
499 I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
500 I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
501 } else {
502 I915_WRITE(PIPEB_GMCH_DATA_M,
503 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
504 m_n.gmch_m);
505 I915_WRITE(PIPEB_GMCH_DATA_N,
506 m_n.gmch_n);
507 I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
508 I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
509 }
510}
511
512static void
513intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
514 struct drm_display_mode *adjusted_mode)
515{
516 struct intel_output *intel_output = enc_to_intel_output(encoder);
517 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
518 struct drm_crtc *crtc = intel_output->enc.crtc;
519 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
520
521 dp_priv->DP = (DP_LINK_TRAIN_OFF |
522 DP_VOLTAGE_0_4 |
523 DP_PRE_EMPHASIS_0 |
524 DP_SYNC_VS_HIGH |
525 DP_SYNC_HS_HIGH);
526
527 switch (dp_priv->lane_count) {
528 case 1:
529 dp_priv->DP |= DP_PORT_WIDTH_1;
530 break;
531 case 2:
532 dp_priv->DP |= DP_PORT_WIDTH_2;
533 break;
534 case 4:
535 dp_priv->DP |= DP_PORT_WIDTH_4;
536 break;
537 }
538 if (dp_priv->has_audio)
539 dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
540
541 memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
542 dp_priv->link_configuration[0] = dp_priv->link_bw;
543 dp_priv->link_configuration[1] = dp_priv->lane_count;
544
545 /*
546 * Check for DPCD version > 1.1,
547 * enable enahanced frame stuff in that case
548 */
549 if (dp_priv->dpcd[0] >= 0x11) {
550 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
551 dp_priv->DP |= DP_ENHANCED_FRAMING;
552 }
553
554 if (intel_crtc->pipe == 1)
555 dp_priv->DP |= DP_PIPEB_SELECT;
556}
557
558
559static void
560intel_dp_dpms(struct drm_encoder *encoder, int mode)
561{
562 struct intel_output *intel_output = enc_to_intel_output(encoder);
563 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
564 struct drm_device *dev = intel_output->base.dev;
565 struct drm_i915_private *dev_priv = dev->dev_private;
566 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
567
568 if (mode != DRM_MODE_DPMS_ON) {
569 if (dp_reg & DP_PORT_EN)
570 intel_dp_link_down(intel_output, dp_priv->DP);
571 } else {
572 if (!(dp_reg & DP_PORT_EN))
573 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
574 }
575 dp_priv->dpms_mode = mode;
576}
577
578/*
579 * Fetch AUX CH registers 0x202 - 0x207 which contain
580 * link status information
581 */
582static bool
583intel_dp_get_link_status(struct intel_output *intel_output,
584 uint8_t link_status[DP_LINK_STATUS_SIZE])
585{
586 int ret;
587
588 ret = intel_dp_aux_native_read(intel_output,
589 DP_LANE0_1_STATUS,
590 link_status, DP_LINK_STATUS_SIZE);
591 if (ret != DP_LINK_STATUS_SIZE)
592 return false;
593 return true;
594}
595
596static uint8_t
597intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
598 int r)
599{
600 return link_status[r - DP_LANE0_1_STATUS];
601}
602
603static void
604intel_dp_save(struct drm_connector *connector)
605{
606 struct intel_output *intel_output = to_intel_output(connector);
607 struct drm_device *dev = intel_output->base.dev;
608 struct drm_i915_private *dev_priv = dev->dev_private;
609 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
610
611 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
612 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
613 dp_priv->save_link_configuration,
614 sizeof (dp_priv->save_link_configuration));
615}
616
617static uint8_t
618intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
619 int lane)
620{
621 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
622 int s = ((lane & 1) ?
623 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
624 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
625 uint8_t l = intel_dp_link_status(link_status, i);
626
627 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
628}
629
630static uint8_t
631intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
632 int lane)
633{
634 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
635 int s = ((lane & 1) ?
636 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
637 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
638 uint8_t l = intel_dp_link_status(link_status, i);
639
640 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
641}
642
643
644#if 0
645static char *voltage_names[] = {
646 "0.4V", "0.6V", "0.8V", "1.2V"
647};
648static char *pre_emph_names[] = {
649 "0dB", "3.5dB", "6dB", "9.5dB"
650};
651static char *link_train_names[] = {
652 "pattern 1", "pattern 2", "idle", "off"
653};
654#endif
655
656/*
657 * These are source-specific values; current Intel hardware supports
658 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
659 */
660#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
661
662static uint8_t
663intel_dp_pre_emphasis_max(uint8_t voltage_swing)
664{
665 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
666 case DP_TRAIN_VOLTAGE_SWING_400:
667 return DP_TRAIN_PRE_EMPHASIS_6;
668 case DP_TRAIN_VOLTAGE_SWING_600:
669 return DP_TRAIN_PRE_EMPHASIS_6;
670 case DP_TRAIN_VOLTAGE_SWING_800:
671 return DP_TRAIN_PRE_EMPHASIS_3_5;
672 case DP_TRAIN_VOLTAGE_SWING_1200:
673 default:
674 return DP_TRAIN_PRE_EMPHASIS_0;
675 }
676}
677
678static void
679intel_get_adjust_train(struct intel_output *intel_output,
680 uint8_t link_status[DP_LINK_STATUS_SIZE],
681 int lane_count,
682 uint8_t train_set[4])
683{
684 uint8_t v = 0;
685 uint8_t p = 0;
686 int lane;
687
688 for (lane = 0; lane < lane_count; lane++) {
689 uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
690 uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
691
692 if (this_v > v)
693 v = this_v;
694 if (this_p > p)
695 p = this_p;
696 }
697
698 if (v >= I830_DP_VOLTAGE_MAX)
699 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
700
701 if (p >= intel_dp_pre_emphasis_max(v))
702 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
703
704 for (lane = 0; lane < 4; lane++)
705 train_set[lane] = v | p;
706}
707
708static uint32_t
709intel_dp_signal_levels(uint8_t train_set, int lane_count)
710{
711 uint32_t signal_levels = 0;
712
713 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
714 case DP_TRAIN_VOLTAGE_SWING_400:
715 default:
716 signal_levels |= DP_VOLTAGE_0_4;
717 break;
718 case DP_TRAIN_VOLTAGE_SWING_600:
719 signal_levels |= DP_VOLTAGE_0_6;
720 break;
721 case DP_TRAIN_VOLTAGE_SWING_800:
722 signal_levels |= DP_VOLTAGE_0_8;
723 break;
724 case DP_TRAIN_VOLTAGE_SWING_1200:
725 signal_levels |= DP_VOLTAGE_1_2;
726 break;
727 }
728 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
729 case DP_TRAIN_PRE_EMPHASIS_0:
730 default:
731 signal_levels |= DP_PRE_EMPHASIS_0;
732 break;
733 case DP_TRAIN_PRE_EMPHASIS_3_5:
734 signal_levels |= DP_PRE_EMPHASIS_3_5;
735 break;
736 case DP_TRAIN_PRE_EMPHASIS_6:
737 signal_levels |= DP_PRE_EMPHASIS_6;
738 break;
739 case DP_TRAIN_PRE_EMPHASIS_9_5:
740 signal_levels |= DP_PRE_EMPHASIS_9_5;
741 break;
742 }
743 return signal_levels;
744}
745
746static uint8_t
747intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
748 int lane)
749{
750 int i = DP_LANE0_1_STATUS + (lane >> 1);
751 int s = (lane & 1) * 4;
752 uint8_t l = intel_dp_link_status(link_status, i);
753
754 return (l >> s) & 0xf;
755}
756
757/* Check for clock recovery is done on all channels */
758static bool
759intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
760{
761 int lane;
762 uint8_t lane_status;
763
764 for (lane = 0; lane < lane_count; lane++) {
765 lane_status = intel_get_lane_status(link_status, lane);
766 if ((lane_status & DP_LANE_CR_DONE) == 0)
767 return false;
768 }
769 return true;
770}
771
772/* Check to see if channel eq is done on all channels */
773#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
774 DP_LANE_CHANNEL_EQ_DONE|\
775 DP_LANE_SYMBOL_LOCKED)
776static bool
777intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
778{
779 uint8_t lane_align;
780 uint8_t lane_status;
781 int lane;
782
783 lane_align = intel_dp_link_status(link_status,
784 DP_LANE_ALIGN_STATUS_UPDATED);
785 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
786 return false;
787 for (lane = 0; lane < lane_count; lane++) {
788 lane_status = intel_get_lane_status(link_status, lane);
789 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
790 return false;
791 }
792 return true;
793}
794
795static bool
796intel_dp_set_link_train(struct intel_output *intel_output,
797 uint32_t dp_reg_value,
798 uint8_t dp_train_pat,
799 uint8_t train_set[4],
800 bool first)
801{
802 struct drm_device *dev = intel_output->base.dev;
803 struct drm_i915_private *dev_priv = dev->dev_private;
804 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
805 int ret;
806
807 I915_WRITE(dp_priv->output_reg, dp_reg_value);
808 POSTING_READ(dp_priv->output_reg);
809 if (first)
810 intel_wait_for_vblank(dev);
811
812 intel_dp_aux_native_write_1(intel_output,
813 DP_TRAINING_PATTERN_SET,
814 dp_train_pat);
815
816 ret = intel_dp_aux_native_write(intel_output,
817 DP_TRAINING_LANE0_SET, train_set, 4);
818 if (ret != 4)
819 return false;
820
821 return true;
822}
823
824static void
825intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
826 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
827{
828 struct drm_device *dev = intel_output->base.dev;
829 struct drm_i915_private *dev_priv = dev->dev_private;
830 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
831 uint8_t train_set[4];
832 uint8_t link_status[DP_LINK_STATUS_SIZE];
833 int i;
834 uint8_t voltage;
835 bool clock_recovery = false;
836 bool channel_eq = false;
837 bool first = true;
838 int tries;
839
840 /* Write the link configuration data */
841 intel_dp_aux_native_write(intel_output, 0x100,
842 link_configuration, DP_LINK_CONFIGURATION_SIZE);
843
844 DP |= DP_PORT_EN;
845 DP &= ~DP_LINK_TRAIN_MASK;
846 memset(train_set, 0, 4);
847 voltage = 0xff;
848 tries = 0;
849 clock_recovery = false;
850 for (;;) {
851 /* Use train_set[0] to set the voltage and pre emphasis values */
852 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
853 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
854
855 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
856 DP_TRAINING_PATTERN_1, train_set, first))
857 break;
858 first = false;
859 /* Set training pattern 1 */
860
861 udelay(100);
862 if (!intel_dp_get_link_status(intel_output, link_status))
863 break;
864
865 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
866 clock_recovery = true;
867 break;
868 }
869
870 /* Check to see if we've tried the max voltage */
871 for (i = 0; i < dp_priv->lane_count; i++)
872 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
873 break;
874 if (i == dp_priv->lane_count)
875 break;
876
877 /* Check to see if we've tried the same voltage 5 times */
878 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
879 ++tries;
880 if (tries == 5)
881 break;
882 } else
883 tries = 0;
884 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
885
886 /* Compute new train_set as requested by target */
887 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
888 }
889
890 /* channel equalization */
891 tries = 0;
892 channel_eq = false;
893 for (;;) {
894 /* Use train_set[0] to set the voltage and pre emphasis values */
895 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
896 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
897
898 /* channel eq pattern */
899 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
900 DP_TRAINING_PATTERN_2, train_set,
901 false))
902 break;
903
904 udelay(400);
905 if (!intel_dp_get_link_status(intel_output, link_status))
906 break;
907
908 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
909 channel_eq = true;
910 break;
911 }
912
913 /* Try 5 times */
914 if (tries > 5)
915 break;
916
917 /* Compute new train_set as requested by target */
918 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
919 ++tries;
920 }
921
922 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
923 POSTING_READ(dp_priv->output_reg);
924 intel_dp_aux_native_write_1(intel_output,
925 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
926}
927
928static void
929intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
930{
931 struct drm_device *dev = intel_output->base.dev;
932 struct drm_i915_private *dev_priv = dev->dev_private;
933 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
934
935 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
936 POSTING_READ(dp_priv->output_reg);
937}
938
939static void
940intel_dp_restore(struct drm_connector *connector)
941{
942 struct intel_output *intel_output = to_intel_output(connector);
943 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
944
945 if (dp_priv->save_DP & DP_PORT_EN)
946 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
947 else
948 intel_dp_link_down(intel_output, dp_priv->save_DP);
949}
950
951/*
952 * According to DP spec
953 * 5.1.2:
954 * 1. Read DPCD
955 * 2. Configure link according to Receiver Capabilities
956 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
957 * 4. Check link status on receipt of hot-plug interrupt
958 */
959
960static void
961intel_dp_check_link_status(struct intel_output *intel_output)
962{
963 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
964 uint8_t link_status[DP_LINK_STATUS_SIZE];
965
966 if (!intel_output->enc.crtc)
967 return;
968
969 if (!intel_dp_get_link_status(intel_output, link_status)) {
970 intel_dp_link_down(intel_output, dp_priv->DP);
971 return;
972 }
973
974 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
975 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
976}
977
978/**
979 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
980 *
981 * \return true if DP port is connected.
982 * \return false if DP port is disconnected.
983 */
984static enum drm_connector_status
985intel_dp_detect(struct drm_connector *connector)
986{
987 struct intel_output *intel_output = to_intel_output(connector);
988 struct drm_device *dev = intel_output->base.dev;
989 struct drm_i915_private *dev_priv = dev->dev_private;
990 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
991 uint32_t temp, bit;
992 enum drm_connector_status status;
993
994 dp_priv->has_audio = false;
995
996 temp = I915_READ(PORT_HOTPLUG_EN);
997
998 I915_WRITE(PORT_HOTPLUG_EN,
999 temp |
1000 DPB_HOTPLUG_INT_EN |
1001 DPC_HOTPLUG_INT_EN |
1002 DPD_HOTPLUG_INT_EN);
1003
1004 POSTING_READ(PORT_HOTPLUG_EN);
1005
1006 switch (dp_priv->output_reg) {
1007 case DP_B:
1008 bit = DPB_HOTPLUG_INT_STATUS;
1009 break;
1010 case DP_C:
1011 bit = DPC_HOTPLUG_INT_STATUS;
1012 break;
1013 case DP_D:
1014 bit = DPD_HOTPLUG_INT_STATUS;
1015 break;
1016 default:
1017 return connector_status_unknown;
1018 }
1019
1020 temp = I915_READ(PORT_HOTPLUG_STAT);
1021
1022 if ((temp & bit) == 0)
1023 return connector_status_disconnected;
1024
1025 status = connector_status_disconnected;
1026 if (intel_dp_aux_native_read(intel_output,
1027 0x000, dp_priv->dpcd,
1028 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1029 {
1030 if (dp_priv->dpcd[0] != 0)
1031 status = connector_status_connected;
1032 }
1033 return status;
1034}
1035
1036static int intel_dp_get_modes(struct drm_connector *connector)
1037{
1038 struct intel_output *intel_output = to_intel_output(connector);
1039
1040 /* We should parse the EDID data and find out if it has an audio sink
1041 */
1042
1043 return intel_ddc_get_modes(intel_output);
1044}
1045
1046static void
1047intel_dp_destroy (struct drm_connector *connector)
1048{
1049 struct intel_output *intel_output = to_intel_output(connector);
1050
1051 if (intel_output->i2c_bus)
1052 intel_i2c_destroy(intel_output->i2c_bus);
1053 drm_sysfs_connector_remove(connector);
1054 drm_connector_cleanup(connector);
1055 kfree(intel_output);
1056}
1057
1058static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1059 .dpms = intel_dp_dpms,
1060 .mode_fixup = intel_dp_mode_fixup,
1061 .prepare = intel_encoder_prepare,
1062 .mode_set = intel_dp_mode_set,
1063 .commit = intel_encoder_commit,
1064};
1065
1066static const struct drm_connector_funcs intel_dp_connector_funcs = {
1067 .dpms = drm_helper_connector_dpms,
1068 .save = intel_dp_save,
1069 .restore = intel_dp_restore,
1070 .detect = intel_dp_detect,
1071 .fill_modes = drm_helper_probe_single_connector_modes,
1072 .destroy = intel_dp_destroy,
1073};
1074
1075static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1076 .get_modes = intel_dp_get_modes,
1077 .mode_valid = intel_dp_mode_valid,
1078 .best_encoder = intel_best_encoder,
1079};
1080
1081static void intel_dp_enc_destroy(struct drm_encoder *encoder)
1082{
1083 drm_encoder_cleanup(encoder);
1084}
1085
1086static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1087 .destroy = intel_dp_enc_destroy,
1088};
1089
1090void
1091intel_dp_hot_plug(struct intel_output *intel_output)
1092{
1093 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1094
1095 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1096 intel_dp_check_link_status(intel_output);
1097}
1098
1099void
1100intel_dp_init(struct drm_device *dev, int output_reg)
1101{
1102 struct drm_i915_private *dev_priv = dev->dev_private;
1103 struct drm_connector *connector;
1104 struct intel_output *intel_output;
1105 struct intel_dp_priv *dp_priv;
1106
1107 intel_output = kcalloc(sizeof(struct intel_output) +
1108 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1109 if (!intel_output)
1110 return;
1111
1112 dp_priv = (struct intel_dp_priv *)(intel_output + 1);
1113
1114 connector = &intel_output->base;
1115 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1116 DRM_MODE_CONNECTOR_DisplayPort);
1117 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1118
1119 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
1120
1121 connector->interlace_allowed = true;
1122 connector->doublescan_allowed = 0;
1123
1124 dp_priv->intel_output = intel_output;
1125 dp_priv->output_reg = output_reg;
1126 dp_priv->has_audio = false;
1127 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1128 intel_output->dev_priv = dp_priv;
1129
1130 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
1131 DRM_MODE_ENCODER_TMDS);
1132 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
1133
1134 drm_mode_connector_attach_encoder(&intel_output->base,
1135 &intel_output->enc);
1136 drm_sysfs_connector_add(connector);
1137
1138 /* Set up the DDC bus. */
1139 intel_dp_i2c_init(intel_output,
1140 (output_reg == DP_B) ? "DPDDC-B" :
1141 (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D");
1142 intel_output->ddc_bus = &dp_priv->adapter;
1143 intel_output->hot_plug = intel_dp_hot_plug;
1144
1145 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
1146 * 0xd. Failure to do so will result in spurious interrupts being
1147 * generated on the port when a cable is not attached.
1148 */
1149 if (IS_G4X(dev) && !IS_GM45(dev)) {
1150 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
1151 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
1152 }
1153}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..2b38054d3b6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright © 2008 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _INTEL_DP_H_
24#define _INTEL_DP_H_
25
26/* From the VESA DisplayPort spec */
27
28#define AUX_NATIVE_WRITE 0x8
29#define AUX_NATIVE_READ 0x9
30#define AUX_I2C_WRITE 0x0
31#define AUX_I2C_READ 0x1
32#define AUX_I2C_STATUS 0x2
33#define AUX_I2C_MOT 0x4
34
35#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
36#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
37#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
38#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
39
40#define AUX_I2C_REPLY_ACK (0x0 << 6)
41#define AUX_I2C_REPLY_NACK (0x1 << 6)
42#define AUX_I2C_REPLY_DEFER (0x2 << 6)
43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44
45/* AUX CH addresses */
46#define DP_LINK_BW_SET 0x100
47# define DP_LINK_BW_1_62 0x06
48# define DP_LINK_BW_2_7 0x0a
49
50#define DP_LANE_COUNT_SET 0x101
51# define DP_LANE_COUNT_MASK 0x0f
52# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
53
54#define DP_TRAINING_PATTERN_SET 0x102
55
56# define DP_TRAINING_PATTERN_DISABLE 0
57# define DP_TRAINING_PATTERN_1 1
58# define DP_TRAINING_PATTERN_2 2
59# define DP_TRAINING_PATTERN_MASK 0x3
60
61# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
62# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
63# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
64# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
65# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
66
67# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
68# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
69
70# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
71# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
72# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
73# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
74
75#define DP_TRAINING_LANE0_SET 0x103
76#define DP_TRAINING_LANE1_SET 0x104
77#define DP_TRAINING_LANE2_SET 0x105
78#define DP_TRAINING_LANE3_SET 0x106
79
80# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
81# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
82# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
83# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
84# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
85# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
86# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
87
88# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
89# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
90# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
91# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
92# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
93
94# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
95# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
96
97#define DP_DOWNSPREAD_CTRL 0x107
98# define DP_SPREAD_AMP_0_5 (1 << 4)
99
100#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
101# define DP_SET_ANSI_8B10B (1 << 0)
102
103#define DP_LANE0_1_STATUS 0x202
104#define DP_LANE2_3_STATUS 0x203
105
106# define DP_LANE_CR_DONE (1 << 0)
107# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
108# define DP_LANE_SYMBOL_LOCKED (1 << 2)
109
110#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
111
112#define DP_INTERLANE_ALIGN_DONE (1 << 0)
113#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
114#define DP_LINK_STATUS_UPDATED (1 << 7)
115
116#define DP_SINK_STATUS 0x205
117
118#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
119#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
120
121#define DP_ADJUST_REQUEST_LANE0_1 0x206
122#define DP_ADJUST_REQUEST_LANE2_3 0x207
123
124#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
125#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
126#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
127#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
128#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
129#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
130#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
131#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
132
133struct i2c_algo_dp_aux_data {
134 bool running;
135 u16 address;
136 int (*aux_ch) (struct i2c_adapter *adapter,
137 uint8_t *send, int send_bytes,
138 uint8_t *recv, int recv_bytes);
139};
140
141int
142i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
143
144#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 000000000000..4e60f14b1a6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright © 2009 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/i2c.h>
31#include "intel_dp.h"
32
33/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
34
35#define MODE_I2C_START 1
36#define MODE_I2C_WRITE 2
37#define MODE_I2C_READ 4
38#define MODE_I2C_STOP 8
39
40static int
41i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
42 uint8_t write_byte, uint8_t *read_byte)
43{
44 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
45 uint16_t address = algo_data->address;
46 uint8_t msg[5];
47 uint8_t reply[2];
48 int msg_bytes;
49 int reply_bytes;
50 int ret;
51
52 /* Set up the command byte */
53 if (mode & MODE_I2C_READ)
54 msg[0] = AUX_I2C_READ << 4;
55 else
56 msg[0] = AUX_I2C_WRITE << 4;
57
58 if (!(mode & MODE_I2C_STOP))
59 msg[0] |= AUX_I2C_MOT << 4;
60
61 msg[1] = address >> 8;
62 msg[2] = address;
63
64 switch (mode) {
65 case MODE_I2C_WRITE:
66 msg[3] = 0;
67 msg[4] = write_byte;
68 msg_bytes = 5;
69 reply_bytes = 1;
70 break;
71 case MODE_I2C_READ:
72 msg[3] = 0;
73 msg_bytes = 4;
74 reply_bytes = 2;
75 break;
76 default:
77 msg_bytes = 3;
78 reply_bytes = 1;
79 break;
80 }
81
82 for (;;) {
83 ret = (*algo_data->aux_ch)(adapter,
84 msg, msg_bytes,
85 reply, reply_bytes);
86 if (ret < 0) {
87 printk(KERN_ERR "aux_ch failed %d\n", ret);
88 return ret;
89 }
90 switch (reply[0] & AUX_I2C_REPLY_MASK) {
91 case AUX_I2C_REPLY_ACK:
92 if (mode == MODE_I2C_READ) {
93 *read_byte = reply[1];
94 }
95 return reply_bytes - 1;
96 case AUX_I2C_REPLY_NACK:
97 printk(KERN_ERR "aux_ch nack\n");
98 return -EREMOTEIO;
99 case AUX_I2C_REPLY_DEFER:
100 printk(KERN_ERR "aux_ch defer\n");
101 udelay(100);
102 break;
103 default:
104 printk(KERN_ERR "aux_ch invalid reply 0x%02x\n", reply[0]);
105 return -EREMOTEIO;
106 }
107 }
108}
109
110/*
111 * I2C over AUX CH
112 */
113
114/*
115 * Send the address. If the I2C link is running, this 'restarts'
116 * the connection with the new address, this is used for doing
117 * a write followed by a read (as needed for DDC)
118 */
119static int
120i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
121{
122 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
123 int mode = MODE_I2C_START;
124 int ret;
125
126 if (reading)
127 mode |= MODE_I2C_READ;
128 else
129 mode |= MODE_I2C_WRITE;
130 algo_data->address = address;
131 algo_data->running = true;
132 ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
133 return ret;
134}
135
136/*
137 * Stop the I2C transaction. This closes out the link, sending
138 * a bare address packet with the MOT bit turned off
139 */
140static void
141i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
142{
143 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
144 int mode = MODE_I2C_STOP;
145
146 if (reading)
147 mode |= MODE_I2C_READ;
148 else
149 mode |= MODE_I2C_WRITE;
150 if (algo_data->running) {
151 (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
152 algo_data->running = false;
153 }
154}
155
156/*
157 * Write a single byte to the current I2C address, the
158 * the I2C link must be running or this returns -EIO
159 */
160static int
161i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
162{
163 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
164 int ret;
165
166 if (!algo_data->running)
167 return -EIO;
168
169 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
170 return ret;
171}
172
173/*
174 * Read a single byte from the current I2C address, the
175 * I2C link must be running or this returns -EIO
176 */
177static int
178i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
179{
180 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
181 int ret;
182
183 if (!algo_data->running)
184 return -EIO;
185
186 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
187 return ret;
188}
189
190static int
191i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
192 struct i2c_msg *msgs,
193 int num)
194{
195 int ret = 0;
196 bool reading = false;
197 int m;
198 int b;
199
200 for (m = 0; m < num; m++) {
201 u16 len = msgs[m].len;
202 u8 *buf = msgs[m].buf;
203 reading = (msgs[m].flags & I2C_M_RD) != 0;
204 ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
205 if (ret < 0)
206 break;
207 if (reading) {
208 for (b = 0; b < len; b++) {
209 ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
210 if (ret < 0)
211 break;
212 }
213 } else {
214 for (b = 0; b < len; b++) {
215 ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
216 if (ret < 0)
217 break;
218 }
219 }
220 if (ret < 0)
221 break;
222 }
223 if (ret >= 0)
224 ret = num;
225 i2c_algo_dp_aux_stop(adapter, reading);
226 printk(KERN_ERR "dp_aux_xfer return %d\n", ret);
227 return ret;
228}
229
230static u32
231i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
232{
233 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
234 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
235 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
236 I2C_FUNC_10BIT_ADDR;
237}
238
239static const struct i2c_algorithm i2c_dp_aux_algo = {
240 .master_xfer = i2c_algo_dp_aux_xfer,
241 .functionality = i2c_algo_dp_aux_functionality,
242};
243
244static void
245i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
246{
247 (void) i2c_algo_dp_aux_address(adapter, 0, false);
248 (void) i2c_algo_dp_aux_stop(adapter, false);
249
250}
251
252static int
253i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
254{
255 adapter->algo = &i2c_dp_aux_algo;
256 adapter->retries = 3;
257 i2c_dp_aux_reset_bus(adapter);
258 return 0;
259}
260
261int
262i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
263{
264 int error;
265
266 error = i2c_dp_aux_prepare_bus(adapter);
267 if (error)
268 return error;
269 error = i2c_add_adapter(adapter);
270 return error;
271}
272EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5f715e..004541c935a8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,7 @@
54#define INTEL_OUTPUT_LVDS 4 54#define INTEL_OUTPUT_LVDS 4
55#define INTEL_OUTPUT_TVOUT 5 55#define INTEL_OUTPUT_TVOUT 5
56#define INTEL_OUTPUT_HDMI 6 56#define INTEL_OUTPUT_HDMI 6
57#define INTEL_OUTPUT_DISPLAYPORT 7
57 58
58#define INTEL_DVO_CHIP_NONE 0 59#define INTEL_DVO_CHIP_NONE 0
59#define INTEL_DVO_CHIP_LVDS 1 60#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +66,6 @@ struct intel_i2c_chan {
65 u32 reg; /* GPIO reg */ 66 u32 reg; /* GPIO reg */
66 struct i2c_adapter adapter; 67 struct i2c_adapter adapter;
67 struct i2c_algo_bit_data algo; 68 struct i2c_algo_bit_data algo;
68 u8 slave_addr;
69}; 69};
70 70
71struct intel_framebuffer { 71struct intel_framebuffer {
@@ -79,11 +79,12 @@ struct intel_output {
79 79
80 struct drm_encoder enc; 80 struct drm_encoder enc;
81 int type; 81 int type;
82 struct intel_i2c_chan *i2c_bus; /* for control functions */ 82 struct i2c_adapter *i2c_bus;
83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ 83 struct i2c_adapter *ddc_bus;
84 bool load_detect_temp; 84 bool load_detect_temp;
85 bool needs_tv_clock; 85 bool needs_tv_clock;
86 void *dev_priv; 86 void *dev_priv;
87 void (*hot_plug)(struct intel_output *);
87}; 88};
88 89
89struct intel_crtc { 90struct intel_crtc {
@@ -104,9 +105,9 @@ struct intel_crtc {
104#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 105#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
105#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 106#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
106 107
107struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, 108struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
108 const char *name); 109 const char *name);
109void intel_i2c_destroy(struct intel_i2c_chan *chan); 110void intel_i2c_destroy(struct i2c_adapter *adapter);
110int intel_ddc_get_modes(struct intel_output *intel_output); 111int intel_ddc_get_modes(struct intel_output *intel_output);
111extern bool intel_ddc_probe(struct intel_output *intel_output); 112extern bool intel_ddc_probe(struct intel_output *intel_output);
112void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 113void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +117,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
116extern void intel_dvo_init(struct drm_device *dev); 117extern void intel_dvo_init(struct drm_device *dev);
117extern void intel_tv_init(struct drm_device *dev); 118extern void intel_tv_init(struct drm_device *dev);
118extern void intel_lvds_init(struct drm_device *dev); 119extern void intel_lvds_init(struct drm_device *dev);
120extern void intel_dp_init(struct drm_device *dev, int dp_reg);
121void
122intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
123 struct drm_display_mode *adjusted_mode);
119 124
120extern void intel_crtc_load_lut(struct drm_crtc *crtc); 125extern void intel_crtc_load_lut(struct drm_crtc *crtc);
121extern void intel_encoder_prepare (struct drm_encoder *encoder); 126extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007d6ec0..13bff20930e8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev)
384{ 384{
385 struct intel_output *intel_output; 385 struct intel_output *intel_output;
386 struct intel_dvo_device *dvo; 386 struct intel_dvo_device *dvo;
387 struct intel_i2c_chan *i2cbus = NULL; 387 struct i2c_adapter *i2cbus = NULL;
388 int ret = 0; 388 int ret = 0;
389 int i; 389 int i;
390 int gpio_inited = 0;
391 int encoder_type = DRM_MODE_ENCODER_NONE; 390 int encoder_type = DRM_MODE_ENCODER_NONE;
392 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 391 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
393 if (!intel_output) 392 if (!intel_output)
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev)
420 * It appears that everything is on GPIOE except for panels 419 * It appears that everything is on GPIOE except for panels
421 * on i830 laptops, which are on GPIOB (DVOA). 420 * on i830 laptops, which are on GPIOB (DVOA).
422 */ 421 */
423 if (gpio_inited != gpio) { 422 if (i2cbus != NULL)
424 if (i2cbus != NULL) 423 intel_i2c_destroy(i2cbus);
425 intel_i2c_destroy(i2cbus); 424 if (!(i2cbus = intel_i2c_create(dev, gpio,
426 if (!(i2cbus = intel_i2c_create(dev, gpio, 425 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
427 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { 426 continue;
428 continue;
429 }
430 gpio_inited = gpio;
431 } 427 }
432 428
433 if (dvo->dev_ops!= NULL) 429 if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8e28e5993df5..1af7d68e3807 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -870,7 +870,11 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
870 */ 870 */
871void intelfb_restore(void) 871void intelfb_restore(void)
872{ 872{
873 drm_crtc_helper_set_config(&kernelfb_mode); 873 int ret;
874 if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
875 printk(KERN_ERR "Failed to restore crtc configuration: %d\n",
876 ret);
877 }
874} 878}
875 879
876static void intelfb_restore_work_fn(struct work_struct *ignored) 880static void intelfb_restore_work_fn(struct work_struct *ignored)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a651b92c..9e30daae37dc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
31#include "drmP.h" 31#include "drmP.h"
32#include "drm.h" 32#include "drm.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_edid.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
56 sdvox = SDVO_ENCODING_HDMI | 57 sdvox = SDVO_ENCODING_HDMI |
57 SDVO_BORDER_ENABLE | 58 SDVO_BORDER_ENABLE |
58 SDVO_VSYNC_ACTIVE_HIGH | 59 SDVO_VSYNC_ACTIVE_HIGH |
59 SDVO_HSYNC_ACTIVE_HIGH | 60 SDVO_HSYNC_ACTIVE_HIGH;
60 SDVO_NULL_PACKETS_DURING_VSYNC;
61 61
62 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
63 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,20 +129,26 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
129 return true; 129 return true;
130} 130}
131 131
132static void 132static enum drm_connector_status
133intel_hdmi_sink_detect(struct drm_connector *connector) 133intel_hdmi_edid_detect(struct drm_connector *connector)
134{ 134{
135 struct intel_output *intel_output = to_intel_output(connector); 135 struct intel_output *intel_output = to_intel_output(connector);
136 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 136 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
137 struct edid *edid = NULL; 137 struct edid *edid = NULL;
138 enum drm_connector_status status = connector_status_disconnected;
138 139
139 edid = drm_get_edid(&intel_output->base, 140 edid = drm_get_edid(&intel_output->base,
140 &intel_output->ddc_bus->adapter); 141 intel_output->ddc_bus);
141 if (edid != NULL) { 142 hdmi_priv->has_hdmi_sink = false;
142 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 143 if (edid) {
143 kfree(edid); 144 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
145 status = connector_status_connected;
146 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
147 }
144 intel_output->base.display_info.raw_edid = NULL; 148 intel_output->base.display_info.raw_edid = NULL;
149 kfree(edid);
145 } 150 }
151 return status;
146} 152}
147 153
148static enum drm_connector_status 154static enum drm_connector_status
@@ -154,11 +160,7 @@ igdng_hdmi_detect(struct drm_connector *connector)
154 /* FIXME hotplug detect */ 160 /* FIXME hotplug detect */
155 161
156 hdmi_priv->has_hdmi_sink = false; 162 hdmi_priv->has_hdmi_sink = false;
157 intel_hdmi_sink_detect(connector); 163 return intel_hdmi_edid_detect(connector);
158 if (hdmi_priv->has_hdmi_sink)
159 return connector_status_connected;
160 else
161 return connector_status_disconnected;
162} 164}
163 165
164static enum drm_connector_status 166static enum drm_connector_status
@@ -201,10 +203,9 @@ intel_hdmi_detect(struct drm_connector *connector)
201 return connector_status_unknown; 203 return connector_status_unknown;
202 } 204 }
203 205
204 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { 206 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
205 intel_hdmi_sink_detect(connector); 207 return intel_hdmi_edid_detect(connector);
206 return connector_status_connected; 208 else
207 } else
208 return connector_status_disconnected; 209 return connector_status_disconnected;
209} 210}
210 211
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f68d050..62b8bead7652 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high)
124 * @output: driver specific output device 124 * @output: driver specific output device
125 * @reg: GPIO reg to use 125 * @reg: GPIO reg to use
126 * @name: name for this bus 126 * @name: name for this bus
127 * @slave_addr: slave address (if fixed)
127 * 128 *
128 * Creates and registers a new i2c bus with the Linux i2c layer, for use 129 * Creates and registers a new i2c bus with the Linux i2c layer, for use
129 * in output probing and control (e.g. DDC or SDVO control functions). 130 * in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high)
139 * %GPIOH 140 * %GPIOH
140 * see PRM for details on how these different busses are used. 141 * see PRM for details on how these different busses are used.
141 */ 142 */
142struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, 143struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
143 const char *name) 144 const char *name)
144{ 145{
145 struct intel_i2c_chan *chan; 146 struct intel_i2c_chan *chan;
146 147
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
174 intel_i2c_quirk_set(dev, false); 175 intel_i2c_quirk_set(dev, false);
175 udelay(20); 176 udelay(20);
176 177
177 return chan; 178 return &chan->adapter;
178 179
179out_free: 180out_free:
180 kfree(chan); 181 kfree(chan);
@@ -187,11 +188,16 @@ out_free:
187 * 188 *
188 * Unregister the adapter from the i2c layer, then free the structure. 189 * Unregister the adapter from the i2c layer, then free the structure.
189 */ 190 */
190void intel_i2c_destroy(struct intel_i2c_chan *chan) 191void intel_i2c_destroy(struct i2c_adapter *adapter)
191{ 192{
192 if (!chan) 193 struct intel_i2c_chan *chan;
194
195 if (!adapter)
193 return; 196 return;
194 197
198 chan = container_of(adapter,
199 struct intel_i2c_chan,
200 adapter);
195 i2c_del_adapter(&chan->adapter); 201 i2c_del_adapter(&chan->adapter);
196 kfree(chan); 202 kfree(chan);
197} 203}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8432e8..9564ca44a977 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -39,6 +39,21 @@
39 39
40#define I915_LVDS "i915_lvds" 40#define I915_LVDS "i915_lvds"
41 41
42/*
43 * the following four scaling options are defined.
44 * #define DRM_MODE_SCALE_NON_GPU 0
45 * #define DRM_MODE_SCALE_FULLSCREEN 1
46 * #define DRM_MODE_SCALE_NO_SCALE 2
47 * #define DRM_MODE_SCALE_ASPECT 3
48 */
49
50/* Private structure for the integrated LVDS support */
51struct intel_lvds_priv {
52 int fitting_mode;
53 u32 pfit_control;
54 u32 pfit_pgm_ratios;
55};
56
42/** 57/**
43 * Sets the backlight level. 58 * Sets the backlight level.
44 * 59 *
@@ -213,10 +228,27 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
213 struct drm_display_mode *mode, 228 struct drm_display_mode *mode,
214 struct drm_display_mode *adjusted_mode) 229 struct drm_display_mode *adjusted_mode)
215{ 230{
231 /*
232 * float point operation is not supported . So the PANEL_RATIO_FACTOR
233 * is defined, which can avoid the float point computation when
234 * calculating the panel ratio.
235 */
236#define PANEL_RATIO_FACTOR 8192
216 struct drm_device *dev = encoder->dev; 237 struct drm_device *dev = encoder->dev;
217 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
218 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 239 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
219 struct drm_encoder *tmp_encoder; 240 struct drm_encoder *tmp_encoder;
241 struct intel_output *intel_output = enc_to_intel_output(encoder);
242 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
243 u32 pfit_control = 0, pfit_pgm_ratios = 0;
244 int left_border = 0, right_border = 0, top_border = 0;
245 int bottom_border = 0;
246 bool border = 0;
247 int panel_ratio, desired_ratio, vert_scale, horiz_scale;
248 int horiz_ratio, vert_ratio;
249 u32 hsync_width, vsync_width;
250 u32 hblank_width, vblank_width;
251 u32 hsync_pos, vsync_pos;
220 252
221 /* Should never happen!! */ 253 /* Should never happen!! */
222 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 254 if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -232,7 +264,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
232 return false; 264 return false;
233 } 265 }
234 } 266 }
235 267 /* If we don't have a panel mode, there is nothing we can do */
268 if (dev_priv->panel_fixed_mode == NULL)
269 return true;
236 /* 270 /*
237 * If we have timings from the BIOS for the panel, put them in 271 * If we have timings from the BIOS for the panel, put them in
238 * to the adjusted mode. The CRTC will be set up for this mode, 272 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +290,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
256 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); 290 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
257 } 291 }
258 292
293 /* Make sure pre-965s set dither correctly */
294 if (!IS_I965G(dev)) {
295 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
296 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
297 }
298
299 /* Native modes don't need fitting */
300 if (adjusted_mode->hdisplay == mode->hdisplay &&
301 adjusted_mode->vdisplay == mode->vdisplay) {
302 pfit_pgm_ratios = 0;
303 border = 0;
304 goto out;
305 }
306
307 /* 965+ wants fuzzy fitting */
308 if (IS_I965G(dev))
309 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
310 PFIT_FILTER_FUZZY;
311
312 hsync_width = adjusted_mode->crtc_hsync_end -
313 adjusted_mode->crtc_hsync_start;
314 vsync_width = adjusted_mode->crtc_vsync_end -
315 adjusted_mode->crtc_vsync_start;
316 hblank_width = adjusted_mode->crtc_hblank_end -
317 adjusted_mode->crtc_hblank_start;
318 vblank_width = adjusted_mode->crtc_vblank_end -
319 adjusted_mode->crtc_vblank_start;
320 /*
321 * Deal with panel fitting options. Figure out how to stretch the
322 * image based on its aspect ratio & the current panel fitting mode.
323 */
324 panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
325 adjusted_mode->vdisplay;
326 desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
327 mode->vdisplay;
328 /*
329 * Enable automatic panel scaling for non-native modes so that they fill
330 * the screen. Should be enabled before the pipe is enabled, according
331 * to register description and PRM.
332 * Change the value here to see the borders for debugging
333 */
334 I915_WRITE(BCLRPAT_A, 0);
335 I915_WRITE(BCLRPAT_B, 0);
336
337 switch (lvds_priv->fitting_mode) {
338 case DRM_MODE_SCALE_NO_SCALE:
339 /*
340 * For centered modes, we have to calculate border widths &
341 * heights and modify the values programmed into the CRTC.
342 */
343 left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
344 right_border = left_border;
345 if (mode->hdisplay & 1)
346 right_border++;
347 top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
348 bottom_border = top_border;
349 if (mode->vdisplay & 1)
350 bottom_border++;
351 /* Set active & border values */
352 adjusted_mode->crtc_hdisplay = mode->hdisplay;
353 /* Keep the boder be even */
354 if (right_border & 1)
355 right_border++;
356 /* use the border directly instead of border minuse one */
357 adjusted_mode->crtc_hblank_start = mode->hdisplay +
358 right_border;
359 /* keep the blank width constant */
360 adjusted_mode->crtc_hblank_end =
361 adjusted_mode->crtc_hblank_start + hblank_width;
362 /* get the hsync pos relative to hblank start */
363 hsync_pos = (hblank_width - hsync_width) / 2;
364 /* keep the hsync pos be even */
365 if (hsync_pos & 1)
366 hsync_pos++;
367 adjusted_mode->crtc_hsync_start =
368 adjusted_mode->crtc_hblank_start + hsync_pos;
369 /* keep the hsync width constant */
370 adjusted_mode->crtc_hsync_end =
371 adjusted_mode->crtc_hsync_start + hsync_width;
372 adjusted_mode->crtc_vdisplay = mode->vdisplay;
373 /* use the border instead of border minus one */
374 adjusted_mode->crtc_vblank_start = mode->vdisplay +
375 bottom_border;
376 /* keep the vblank width constant */
377 adjusted_mode->crtc_vblank_end =
378 adjusted_mode->crtc_vblank_start + vblank_width;
379 /* get the vsync start postion relative to vblank start */
380 vsync_pos = (vblank_width - vsync_width) / 2;
381 adjusted_mode->crtc_vsync_start =
382 adjusted_mode->crtc_vblank_start + vsync_pos;
383 /* keep the vsync width constant */
384 adjusted_mode->crtc_vsync_end =
385 adjusted_mode->crtc_vblank_start + vsync_width;
386 border = 1;
387 break;
388 case DRM_MODE_SCALE_ASPECT:
389 /* Scale but preserve the spect ratio */
390 pfit_control |= PFIT_ENABLE;
391 if (IS_I965G(dev)) {
392 /* 965+ is easy, it does everything in hw */
393 if (panel_ratio > desired_ratio)
394 pfit_control |= PFIT_SCALING_PILLAR;
395 else if (panel_ratio < desired_ratio)
396 pfit_control |= PFIT_SCALING_LETTER;
397 else
398 pfit_control |= PFIT_SCALING_AUTO;
399 } else {
400 /*
401 * For earlier chips we have to calculate the scaling
402 * ratio by hand and program it into the
403 * PFIT_PGM_RATIO register
404 */
405 u32 horiz_bits, vert_bits, bits = 12;
406 horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
407 adjusted_mode->hdisplay;
408 vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
409 adjusted_mode->vdisplay;
410 horiz_scale = adjusted_mode->hdisplay *
411 PANEL_RATIO_FACTOR / mode->hdisplay;
412 vert_scale = adjusted_mode->vdisplay *
413 PANEL_RATIO_FACTOR / mode->vdisplay;
414
415 /* retain aspect ratio */
416 if (panel_ratio > desired_ratio) { /* Pillar */
417 u32 scaled_width;
418 scaled_width = mode->hdisplay * vert_scale /
419 PANEL_RATIO_FACTOR;
420 horiz_ratio = vert_ratio;
421 pfit_control |= (VERT_AUTO_SCALE |
422 VERT_INTERP_BILINEAR |
423 HORIZ_INTERP_BILINEAR);
424 /* Pillar will have left/right borders */
425 left_border = (adjusted_mode->hdisplay -
426 scaled_width) / 2;
427 right_border = left_border;
428 if (mode->hdisplay & 1) /* odd resolutions */
429 right_border++;
430 /* keep the border be even */
431 if (right_border & 1)
432 right_border++;
433 adjusted_mode->crtc_hdisplay = scaled_width;
434 /* use border instead of border minus one */
435 adjusted_mode->crtc_hblank_start =
436 scaled_width + right_border;
437 /* keep the hblank width constant */
438 adjusted_mode->crtc_hblank_end =
439 adjusted_mode->crtc_hblank_start +
440 hblank_width;
441 /*
442 * get the hsync start pos relative to
443 * hblank start
444 */
445 hsync_pos = (hblank_width - hsync_width) / 2;
446 /* keep the hsync_pos be even */
447 if (hsync_pos & 1)
448 hsync_pos++;
449 adjusted_mode->crtc_hsync_start =
450 adjusted_mode->crtc_hblank_start +
451 hsync_pos;
452 /* keept hsync width constant */
453 adjusted_mode->crtc_hsync_end =
454 adjusted_mode->crtc_hsync_start +
455 hsync_width;
456 border = 1;
457 } else if (panel_ratio < desired_ratio) { /* letter */
458 u32 scaled_height = mode->vdisplay *
459 horiz_scale / PANEL_RATIO_FACTOR;
460 vert_ratio = horiz_ratio;
461 pfit_control |= (HORIZ_AUTO_SCALE |
462 VERT_INTERP_BILINEAR |
463 HORIZ_INTERP_BILINEAR);
464 /* Letterbox will have top/bottom border */
465 top_border = (adjusted_mode->vdisplay -
466 scaled_height) / 2;
467 bottom_border = top_border;
468 if (mode->vdisplay & 1)
469 bottom_border++;
470 adjusted_mode->crtc_vdisplay = scaled_height;
471 /* use border instead of border minus one */
472 adjusted_mode->crtc_vblank_start =
473 scaled_height + bottom_border;
474 /* keep the vblank width constant */
475 adjusted_mode->crtc_vblank_end =
476 adjusted_mode->crtc_vblank_start +
477 vblank_width;
478 /*
479 * get the vsync start pos relative to
480 * vblank start
481 */
482 vsync_pos = (vblank_width - vsync_width) / 2;
483 adjusted_mode->crtc_vsync_start =
484 adjusted_mode->crtc_vblank_start +
485 vsync_pos;
486 /* keep the vsync width constant */
487 adjusted_mode->crtc_vsync_end =
488 adjusted_mode->crtc_vsync_start +
489 vsync_width;
490 border = 1;
491 } else {
492 /* Aspects match, Let hw scale both directions */
493 pfit_control |= (VERT_AUTO_SCALE |
494 HORIZ_AUTO_SCALE |
495 VERT_INTERP_BILINEAR |
496 HORIZ_INTERP_BILINEAR);
497 }
498 horiz_bits = (1 << bits) * horiz_ratio /
499 PANEL_RATIO_FACTOR;
500 vert_bits = (1 << bits) * vert_ratio /
501 PANEL_RATIO_FACTOR;
502 pfit_pgm_ratios =
503 ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
504 PFIT_VERT_SCALE_MASK) |
505 ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
506 PFIT_HORIZ_SCALE_MASK);
507 }
508 break;
509
510 case DRM_MODE_SCALE_FULLSCREEN:
511 /*
512 * Full scaling, even if it changes the aspect ratio.
513 * Fortunately this is all done for us in hw.
514 */
515 pfit_control |= PFIT_ENABLE;
516 if (IS_I965G(dev))
517 pfit_control |= PFIT_SCALING_AUTO;
518 else
519 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
520 VERT_INTERP_BILINEAR |
521 HORIZ_INTERP_BILINEAR);
522 break;
523 default:
524 break;
525 }
526
527out:
528 lvds_priv->pfit_control = pfit_control;
529 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
259 /* 530 /*
260 * XXX: It would be nice to support lower refresh rates on the 531 * XXX: It would be nice to support lower refresh rates on the
261 * panels to reduce power consumption, and perhaps match the 532 * panels to reduce power consumption, and perhaps match the
@@ -301,8 +572,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
301{ 572{
302 struct drm_device *dev = encoder->dev; 573 struct drm_device *dev = encoder->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private; 574 struct drm_i915_private *dev_priv = dev->dev_private;
304 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 575 struct intel_output *intel_output = enc_to_intel_output(encoder);
305 u32 pfit_control; 576 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
306 577
307 /* 578 /*
308 * The LVDS pin pair will already have been turned on in the 579 * The LVDS pin pair will already have been turned on in the
@@ -319,22 +590,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
319 * screen. Should be enabled before the pipe is enabled, according to 590 * screen. Should be enabled before the pipe is enabled, according to
320 * register description and PRM. 591 * register description and PRM.
321 */ 592 */
322 if (mode->hdisplay != adjusted_mode->hdisplay || 593 I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
323 mode->vdisplay != adjusted_mode->vdisplay) 594 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
324 pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
325 HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
326 HORIZ_INTERP_BILINEAR);
327 else
328 pfit_control = 0;
329
330 if (!IS_I965G(dev)) {
331 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
332 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
333 }
334 else
335 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
336
337 I915_WRITE(PFIT_CONTROL, pfit_control);
338} 595}
339 596
340/** 597/**
@@ -406,6 +663,34 @@ static int intel_lvds_set_property(struct drm_connector *connector,
406 struct drm_property *property, 663 struct drm_property *property,
407 uint64_t value) 664 uint64_t value)
408{ 665{
666 struct drm_device *dev = connector->dev;
667 struct intel_output *intel_output =
668 to_intel_output(connector);
669
670 if (property == dev->mode_config.scaling_mode_property &&
671 connector->encoder) {
672 struct drm_crtc *crtc = connector->encoder->crtc;
673 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
674 if (value == DRM_MODE_SCALE_NON_GPU) {
675 DRM_DEBUG_KMS(I915_LVDS,
676 "non_GPU property is unsupported\n");
677 return 0;
678 }
679 if (lvds_priv->fitting_mode == value) {
680 /* the LVDS scaling property is not changed */
681 return 0;
682 }
683 lvds_priv->fitting_mode = value;
684 if (crtc && crtc->enabled) {
685 /*
686 * If the CRTC is enabled, the display will be changed
687 * according to the new panel fitting mode.
688 */
689 drm_crtc_helper_set_mode(crtc, &crtc->mode,
690 crtc->x, crtc->y, crtc->fb);
691 }
692 }
693
409 return 0; 694 return 0;
410} 695}
411 696
@@ -456,7 +741,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
456 .callback = intel_no_lvds_dmi_callback, 741 .callback = intel_no_lvds_dmi_callback,
457 .ident = "Apple Mac Mini (Core series)", 742 .ident = "Apple Mac Mini (Core series)",
458 .matches = { 743 .matches = {
459 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 744 DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
460 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), 745 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
461 }, 746 },
462 }, 747 },
@@ -464,7 +749,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
464 .callback = intel_no_lvds_dmi_callback, 749 .callback = intel_no_lvds_dmi_callback,
465 .ident = "Apple Mac Mini (Core 2 series)", 750 .ident = "Apple Mac Mini (Core 2 series)",
466 .matches = { 751 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 752 DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
468 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), 753 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
469 }, 754 },
470 }, 755 },
@@ -518,6 +803,7 @@ void intel_lvds_init(struct drm_device *dev)
518 struct drm_encoder *encoder; 803 struct drm_encoder *encoder;
519 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 804 struct drm_display_mode *scan; /* *modes, *bios_mode; */
520 struct drm_crtc *crtc; 805 struct drm_crtc *crtc;
806 struct intel_lvds_priv *lvds_priv;
521 u32 lvds; 807 u32 lvds;
522 int pipe, gpio = GPIOC; 808 int pipe, gpio = GPIOC;
523 809
@@ -531,7 +817,8 @@ void intel_lvds_init(struct drm_device *dev)
531 gpio = PCH_GPIOC; 817 gpio = PCH_GPIOC;
532 } 818 }
533 819
534 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 820 intel_output = kzalloc(sizeof(struct intel_output) +
821 sizeof(struct intel_lvds_priv), GFP_KERNEL);
535 if (!intel_output) { 822 if (!intel_output) {
536 return; 823 return;
537 } 824 }
@@ -553,7 +840,18 @@ void intel_lvds_init(struct drm_device *dev)
553 connector->interlace_allowed = false; 840 connector->interlace_allowed = false;
554 connector->doublescan_allowed = false; 841 connector->doublescan_allowed = false;
555 842
843 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
844 intel_output->dev_priv = lvds_priv;
845 /* create the scaling mode property */
846 drm_mode_create_scaling_mode_property(dev);
847 /*
848 * the initial panel fitting mode will be FULL_SCREEN.
849 */
556 850
851 drm_connector_attach_property(&intel_output->base,
852 dev->mode_config.scaling_mode_property,
853 DRM_MODE_SCALE_FULLSCREEN);
854 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
557 /* 855 /*
558 * LVDS discovery: 856 * LVDS discovery:
559 * 1) check for EDID on DDC 857 * 1) check for EDID on DDC
@@ -649,5 +947,5 @@ failed:
649 if (intel_output->ddc_bus) 947 if (intel_output->ddc_bus)
650 intel_i2c_destroy(intel_output->ddc_bus); 948 intel_i2c_destroy(intel_output->ddc_bus);
651 drm_connector_cleanup(connector); 949 drm_connector_cleanup(connector);
652 kfree(connector); 950 kfree(intel_output);
653} 951}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fefce87..67e2f4632a24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
53 } 53 }
54 }; 54 };
55 55
56 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); 56 intel_i2c_quirk_set(intel_output->base.dev, true);
57 ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); 57 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
58 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); 58 intel_i2c_quirk_set(intel_output->base.dev, false);
59
60 if (ret == 2) 59 if (ret == 2)
61 return true; 60 return true;
62 61
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
74 struct edid *edid; 73 struct edid *edid;
75 int ret = 0; 74 int ret = 0;
76 75
77 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); 76 intel_i2c_quirk_set(intel_output->base.dev, true);
78 edid = drm_get_edid(&intel_output->base, 77 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
79 &intel_output->ddc_bus->adapter); 78 intel_i2c_quirk_set(intel_output->base.dev, false);
80 intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
81 if (edid) { 79 if (edid) {
82 drm_mode_connector_update_edid_property(&intel_output->base, 80 drm_mode_connector_update_edid_property(&intel_output->base,
83 edid); 81 edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb3a508..f03473779feb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -38,8 +38,7 @@
38#undef SDVO_DEBUG 38#undef SDVO_DEBUG
39#define I915_SDVO "i915_sdvo" 39#define I915_SDVO "i915_sdvo"
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 u8 slave_addr;
42 int slaveaddr;
43 42
44 /* Register for the SDVO device: SDVOB or SDVOC */ 43 /* Register for the SDVO device: SDVOB or SDVOC */
45 int output_device; 44 int output_device;
@@ -146,13 +145,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
146 145
147 struct i2c_msg msgs[] = { 146 struct i2c_msg msgs[] = {
148 { 147 {
149 .addr = sdvo_priv->i2c_bus->slave_addr, 148 .addr = sdvo_priv->slave_addr >> 1,
150 .flags = 0, 149 .flags = 0,
151 .len = 1, 150 .len = 1,
152 .buf = out_buf, 151 .buf = out_buf,
153 }, 152 },
154 { 153 {
155 .addr = sdvo_priv->i2c_bus->slave_addr, 154 .addr = sdvo_priv->slave_addr >> 1,
156 .flags = I2C_M_RD, 155 .flags = I2C_M_RD,
157 .len = 1, 156 .len = 1,
158 .buf = buf, 157 .buf = buf,
@@ -162,7 +161,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
162 out_buf[0] = addr; 161 out_buf[0] = addr;
163 out_buf[1] = 0; 162 out_buf[1] = 0;
164 163
165 if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) 164 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
166 { 165 {
167 *ch = buf[0]; 166 *ch = buf[0];
168 return true; 167 return true;
@@ -175,10 +174,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
175static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 174static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
176 u8 ch) 175 u8 ch)
177{ 176{
177 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
178 u8 out_buf[2]; 178 u8 out_buf[2];
179 struct i2c_msg msgs[] = { 179 struct i2c_msg msgs[] = {
180 { 180 {
181 .addr = intel_output->i2c_bus->slave_addr, 181 .addr = sdvo_priv->slave_addr >> 1,
182 .flags = 0, 182 .flags = 0,
183 .len = 2, 183 .len = 2,
184 .buf = out_buf, 184 .buf = out_buf,
@@ -188,7 +188,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
188 out_buf[0] = addr; 188 out_buf[0] = addr;
189 out_buf[1] = ch; 189 out_buf[1] = ch;
190 190
191 if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) 191 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
192 { 192 {
193 return true; 193 return true;
194 } 194 }
@@ -1369,9 +1369,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1369 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1369 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1370 struct edid *edid = NULL; 1370 struct edid *edid = NULL;
1371 1371
1372 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1373 edid = drm_get_edid(&intel_output->base, 1372 edid = drm_get_edid(&intel_output->base,
1374 &intel_output->ddc_bus->adapter); 1373 intel_output->ddc_bus);
1375 if (edid != NULL) { 1374 if (edid != NULL) {
1376 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); 1375 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1377 kfree(edid); 1376 kfree(edid);
@@ -1549,7 +1548,6 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1549static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1548static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1550{ 1549{
1551 struct intel_output *intel_output = to_intel_output(connector); 1550 struct intel_output *intel_output = to_intel_output(connector);
1552 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1553 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1551 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1554 1552
1555 /* 1553 /*
@@ -1557,8 +1555,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1557 * Assume that the preferred modes are 1555 * Assume that the preferred modes are
1558 * arranged in priority order. 1556 * arranged in priority order.
1559 */ 1557 */
1560 /* set the bus switch and get the modes */
1561 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1562 intel_ddc_get_modes(intel_output); 1558 intel_ddc_get_modes(intel_output);
1563 if (list_empty(&connector->probed_modes) == false) 1559 if (list_empty(&connector->probed_modes) == false)
1564 return; 1560 return;
@@ -1709,7 +1705,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
1709 1705
1710 list_for_each_entry(connector, 1706 list_for_each_entry(connector,
1711 &dev->mode_config.connector_list, head) { 1707 &dev->mode_config.connector_list, head) {
1712 if (to_intel_output(connector)->ddc_bus == chan) { 1708 if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
1713 intel_output = to_intel_output(connector); 1709 intel_output = to_intel_output(connector);
1714 break; 1710 break;
1715 } 1711 }
@@ -1723,7 +1719,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1723 struct intel_output *intel_output; 1719 struct intel_output *intel_output;
1724 struct intel_sdvo_priv *sdvo_priv; 1720 struct intel_sdvo_priv *sdvo_priv;
1725 struct i2c_algo_bit_data *algo_data; 1721 struct i2c_algo_bit_data *algo_data;
1726 struct i2c_algorithm *algo; 1722 const struct i2c_algorithm *algo;
1727 1723
1728 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 1724 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
1729 intel_output = 1725 intel_output =
@@ -1733,7 +1729,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1733 return -EINVAL; 1729 return -EINVAL;
1734 1730
1735 sdvo_priv = intel_output->dev_priv; 1731 sdvo_priv = intel_output->dev_priv;
1736 algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; 1732 algo = intel_output->i2c_bus->algo;
1737 1733
1738 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 1734 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1739 return algo->master_xfer(i2c_adap, msgs, num); 1735 return algo->master_xfer(i2c_adap, msgs, num);
@@ -1785,13 +1781,11 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1785 struct drm_connector *connector; 1781 struct drm_connector *connector;
1786 struct intel_output *intel_output; 1782 struct intel_output *intel_output;
1787 struct intel_sdvo_priv *sdvo_priv; 1783 struct intel_sdvo_priv *sdvo_priv;
1788 struct intel_i2c_chan *i2cbus = NULL; 1784
1789 struct intel_i2c_chan *ddcbus = NULL;
1790 int connector_type; 1785 int connector_type;
1791 u8 ch[0x40]; 1786 u8 ch[0x40];
1792 int i; 1787 int i;
1793 int encoder_type, output_id; 1788 int encoder_type;
1794 u8 slave_addr;
1795 1789
1796 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 1790 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
1797 if (!intel_output) { 1791 if (!intel_output) {
@@ -1799,29 +1793,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1799 } 1793 }
1800 1794
1801 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 1795 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
1796 sdvo_priv->output_device = output_device;
1797
1798 intel_output->dev_priv = sdvo_priv;
1802 intel_output->type = INTEL_OUTPUT_SDVO; 1799 intel_output->type = INTEL_OUTPUT_SDVO;
1803 1800
1804 /* setup the DDC bus. */ 1801 /* setup the DDC bus. */
1805 if (output_device == SDVOB) 1802 if (output_device == SDVOB)
1806 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 1803 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
1807 else 1804 else
1808 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 1805 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1809 1806
1810 if (!i2cbus) 1807 if (!intel_output->i2c_bus)
1811 goto err_inteloutput; 1808 goto err_inteloutput;
1812 1809
1813 slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 1810 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
1814 sdvo_priv->i2c_bus = i2cbus;
1815 1811
1816 if (output_device == SDVOB) { 1812 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
1817 output_id = 1; 1813 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
1818 } else {
1819 output_id = 2;
1820 }
1821 sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
1822 sdvo_priv->output_device = output_device;
1823 intel_output->i2c_bus = i2cbus;
1824 intel_output->dev_priv = sdvo_priv;
1825 1814
1826 /* Read the regs to test if we can talk to the device */ 1815 /* Read the regs to test if we can talk to the device */
1827 for (i = 0; i < 0x40; i++) { 1816 for (i = 0; i < 0x40; i++) {
@@ -1835,17 +1824,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1835 1824
1836 /* setup the DDC bus. */ 1825 /* setup the DDC bus. */
1837 if (output_device == SDVOB) 1826 if (output_device == SDVOB)
1838 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 1827 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
1839 else 1828 else
1840 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 1829 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
1841 1830
1842 if (ddcbus == NULL) 1831 if (intel_output->ddc_bus == NULL)
1843 goto err_i2c; 1832 goto err_i2c;
1844 1833
1845 intel_sdvo_i2c_bit_algo.functionality = 1834 /* Wrap with our custom algo which switches to DDC mode */
1846 intel_output->i2c_bus->adapter.algo->functionality; 1835 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
1847 ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
1848 intel_output->ddc_bus = ddcbus;
1849 1836
1850 /* In defaut case sdvo lvds is false */ 1837 /* In defaut case sdvo lvds is false */
1851 sdvo_priv->is_lvds = false; 1838 sdvo_priv->is_lvds = false;
@@ -1965,9 +1952,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1965 return true; 1952 return true;
1966 1953
1967err_i2c: 1954err_i2c:
1968 if (ddcbus != NULL) 1955 if (intel_output->ddc_bus != NULL)
1969 intel_i2c_destroy(intel_output->ddc_bus); 1956 intel_i2c_destroy(intel_output->ddc_bus);
1970 intel_i2c_destroy(intel_output->i2c_bus); 1957 if (intel_output->i2c_bus != NULL)
1958 intel_i2c_destroy(intel_output->i2c_bus);
1971err_inteloutput: 1959err_inteloutput:
1972 kfree(intel_output); 1960 kfree(intel_output);
1973 1961
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 50d7ed70b338..a43c98e3f077 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1383 /* 1383 /*
1384 * Detect TV by polling) 1384 * Detect TV by polling)
1385 */ 1385 */
1386 if (intel_output->load_detect_temp) { 1386 save_tv_dac = tv_dac;
1387 /* TV not currently running, prod it with destructive detect */ 1387 tv_ctl = I915_READ(TV_CTL);
1388 save_tv_dac = tv_dac; 1388 save_tv_ctl = tv_ctl;
1389 tv_ctl = I915_READ(TV_CTL); 1389 tv_ctl &= ~TV_ENC_ENABLE;
1390 save_tv_ctl = tv_ctl; 1390 tv_ctl &= ~TV_TEST_MODE_MASK;
1391 tv_ctl &= ~TV_ENC_ENABLE; 1391 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1392 tv_ctl &= ~TV_TEST_MODE_MASK; 1392 tv_dac &= ~TVDAC_SENSE_MASK;
1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1393 tv_dac &= ~DAC_A_MASK;
1394 tv_dac &= ~TVDAC_SENSE_MASK; 1394 tv_dac &= ~DAC_B_MASK;
1395 tv_dac &= ~DAC_A_MASK; 1395 tv_dac &= ~DAC_C_MASK;
1396 tv_dac &= ~DAC_B_MASK; 1396 tv_dac |= (TVDAC_STATE_CHG_EN |
1397 tv_dac &= ~DAC_C_MASK; 1397 TVDAC_A_SENSE_CTL |
1398 tv_dac |= (TVDAC_STATE_CHG_EN | 1398 TVDAC_B_SENSE_CTL |
1399 TVDAC_A_SENSE_CTL | 1399 TVDAC_C_SENSE_CTL |
1400 TVDAC_B_SENSE_CTL | 1400 DAC_CTL_OVERRIDE |
1401 TVDAC_C_SENSE_CTL | 1401 DAC_A_0_7_V |
1402 DAC_CTL_OVERRIDE | 1402 DAC_B_0_7_V |
1403 DAC_A_0_7_V | 1403 DAC_C_0_7_V);
1404 DAC_B_0_7_V | 1404 I915_WRITE(TV_CTL, tv_ctl);
1405 DAC_C_0_7_V); 1405 I915_WRITE(TV_DAC, tv_dac);
1406 I915_WRITE(TV_CTL, tv_ctl); 1406 intel_wait_for_vblank(dev);
1407 I915_WRITE(TV_DAC, tv_dac); 1407 tv_dac = I915_READ(TV_DAC);
1408 intel_wait_for_vblank(dev); 1408 I915_WRITE(TV_DAC, save_tv_dac);
1409 tv_dac = I915_READ(TV_DAC); 1409 I915_WRITE(TV_CTL, save_tv_ctl);
1410 I915_WRITE(TV_DAC, save_tv_dac); 1410 intel_wait_for_vblank(dev);
1411 I915_WRITE(TV_CTL, save_tv_ctl);
1412 intel_wait_for_vblank(dev);
1413 }
1414 /* 1411 /*
1415 * A B C 1412 * A B C
1416 * 0 1 1 Composite 1413 * 0 1 1 Composite
@@ -1561,8 +1558,7 @@ intel_tv_destroy (struct drm_connector *connector)
1561 1558
1562 drm_sysfs_connector_remove(connector); 1559 drm_sysfs_connector_remove(connector);
1563 drm_connector_cleanup(connector); 1560 drm_connector_cleanup(connector);
1564 drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), 1561 kfree(intel_output);
1565 DRM_MEM_DRIVER);
1566} 1562}
1567 1563
1568 1564
@@ -1695,8 +1691,8 @@ intel_tv_init(struct drm_device *dev)
1695 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1691 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1696 return; 1692 return;
1697 1693
1698 intel_output = drm_calloc(1, sizeof(struct intel_output) + 1694 intel_output = kzalloc(sizeof(struct intel_output) +
1699 sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); 1695 sizeof(struct intel_tv_priv), GFP_KERNEL);
1700 if (!intel_output) { 1696 if (!intel_output) {
1701 return; 1697 return;
1702 } 1698 }
@@ -1730,8 +1726,8 @@ intel_tv_init(struct drm_device *dev)
1730 connector->doublescan_allowed = false; 1726 connector->doublescan_allowed = false;
1731 1727
1732 /* Create TV properties then attach current values */ 1728 /* Create TV properties then attach current values */
1733 tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, 1729 tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES,
1734 DRM_MEM_DRIVER); 1730 GFP_KERNEL);
1735 if (!tv_format_names) 1731 if (!tv_format_names)
1736 goto out; 1732 goto out;
1737 for (i = 0; i < NUM_TV_MODES; i++) 1733 for (i = 0; i < NUM_TV_MODES; i++)
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 7a6bf9ffc5a3..6c67a02910c8 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -254,23 +254,20 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
254 int i; 254 int i;
255 DRM_DEBUG("count=%d\n", dma->buf_count); 255 DRM_DEBUG("count=%d\n", dma->buf_count);
256 256
257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 257 dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
258 if (dev_priv->head == NULL) 258 if (dev_priv->head == NULL)
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 261 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
263 262
264 for (i = 0; i < dma->buf_count; i++) { 263 for (i = 0; i < dma->buf_count; i++) {
265 buf = dma->buflist[i]; 264 buf = dma->buflist[i];
266 buf_priv = buf->dev_private; 265 buf_priv = buf->dev_private;
267 266
268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 267 entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
269 if (entry == NULL) 268 if (entry == NULL)
270 return -ENOMEM; 269 return -ENOMEM;
271 270
272 memset(entry, 0, sizeof(drm_mga_freelist_t));
273
274 entry->next = dev_priv->head->next; 271 entry->next = dev_priv->head->next;
275 entry->prev = dev_priv->head; 272 entry->prev = dev_priv->head;
276 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); 273 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
@@ -301,7 +298,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
301 entry = dev_priv->head; 298 entry = dev_priv->head;
302 while (entry) { 299 while (entry) {
303 next = entry->next; 300 next = entry->next;
304 drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 301 kfree(entry);
305 entry = next; 302 entry = next;
306 } 303 }
307 304
@@ -399,12 +396,11 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
399 drm_mga_private_t *dev_priv; 396 drm_mga_private_t *dev_priv;
400 int ret; 397 int ret;
401 398
402 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 399 dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
403 if (!dev_priv) 400 if (!dev_priv)
404 return -ENOMEM; 401 return -ENOMEM;
405 402
406 dev->dev_private = (void *)dev_priv; 403 dev->dev_private = (void *)dev_priv;
407 memset(dev_priv, 0, sizeof(drm_mga_private_t));
408 404
409 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 405 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
410 dev_priv->chipset = flags; 406 dev_priv->chipset = flags;
@@ -1150,7 +1146,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
1150 */ 1146 */
1151int mga_driver_unload(struct drm_device * dev) 1147int mga_driver_unload(struct drm_device * dev)
1152{ 1148{
1153 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1149 kfree(dev->dev_private);
1154 dev->dev_private = NULL; 1150 dev->dev_private = NULL;
1155 1151
1156 return 0; 1152 return 0;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 077c0455a6b9..c75fd3564040 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -353,12 +353,10 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
353 353
354 DRM_DEBUG("\n"); 354 DRM_DEBUG("\n");
355 355
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 356 dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
357 if (dev_priv == NULL) 357 if (dev_priv == NULL)
358 return -ENOMEM; 358 return -ENOMEM;
359 359
360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
361
362 dev_priv->is_pci = init->is_pci; 360 dev_priv->is_pci = init->is_pci;
363 361
364 if (dev_priv->is_pci && !dev->sg) { 362 if (dev_priv->is_pci && !dev->sg) {
@@ -619,8 +617,7 @@ int r128_do_cleanup_cce(struct drm_device * dev)
619 ("failed to cleanup PCI GART!\n"); 617 ("failed to cleanup PCI GART!\n");
620 } 618 }
621 619
622 drm_free(dev->dev_private, sizeof(drm_r128_private_t), 620 kfree(dev->dev_private);
623 DRM_MEM_DRIVER);
624 dev->dev_private = NULL; 621 dev->dev_private = NULL;
625 } 622 }
626 623
@@ -768,18 +765,17 @@ static int r128_freelist_init(struct drm_device * dev)
768 drm_r128_freelist_t *entry; 765 drm_r128_freelist_t *entry;
769 int i; 766 int i;
770 767
771 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 768 dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
772 if (dev_priv->head == NULL) 769 if (dev_priv->head == NULL)
773 return -ENOMEM; 770 return -ENOMEM;
774 771
775 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
776 dev_priv->head->age = R128_BUFFER_USED; 772 dev_priv->head->age = R128_BUFFER_USED;
777 773
778 for (i = 0; i < dma->buf_count; i++) { 774 for (i = 0; i < dma->buf_count; i++) {
779 buf = dma->buflist[i]; 775 buf = dma->buflist[i];
780 buf_priv = buf->dev_private; 776 buf_priv = buf->dev_private;
781 777
782 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 778 entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
783 if (!entry) 779 if (!entry)
784 return -ENOMEM; 780 return -ENOMEM;
785 781
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index f7a5b5740764..026a48c95c8f 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -910,24 +910,24 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
910 } 910 }
911 911
912 buffer_size = depth->n * sizeof(u32); 912 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 913 buffer = kmalloc(buffer_size, GFP_KERNEL);
914 if (buffer == NULL) 914 if (buffer == NULL)
915 return -ENOMEM; 915 return -ENOMEM;
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 917 kfree(buffer);
918 return -EFAULT; 918 return -EFAULT;
919 } 919 }
920 920
921 mask_size = depth->n * sizeof(u8); 921 mask_size = depth->n * sizeof(u8);
922 if (depth->mask) { 922 if (depth->mask) {
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 923 mask = kmalloc(mask_size, GFP_KERNEL);
924 if (mask == NULL) { 924 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 925 kfree(buffer);
926 return -ENOMEM; 926 return -ENOMEM;
927 } 927 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 929 kfree(buffer);
930 drm_free(mask, mask_size, DRM_MEM_BUFS); 930 kfree(mask);
931 return -EFAULT; 931 return -EFAULT;
932 } 932 }
933 933
@@ -954,7 +954,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
954 } 954 }
955 } 955 }
956 956
957 drm_free(mask, mask_size, DRM_MEM_BUFS); 957 kfree(mask);
958 } else { 958 } else {
959 for (i = 0; i < count; i++, x++) { 959 for (i = 0; i < count; i++, x++) {
960 BEGIN_RING(6); 960 BEGIN_RING(6);
@@ -978,7 +978,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
978 } 978 }
979 } 979 }
980 980
981 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 981 kfree(buffer);
982 982
983 return 0; 983 return 0;
984} 984}
@@ -1000,54 +1000,54 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1000 1000
1001 xbuf_size = count * sizeof(*x); 1001 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y); 1002 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1003 x = kmalloc(xbuf_size, GFP_KERNEL);
1004 if (x == NULL) { 1004 if (x == NULL) {
1005 return -ENOMEM; 1005 return -ENOMEM;
1006 } 1006 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1007 y = kmalloc(ybuf_size, GFP_KERNEL);
1008 if (y == NULL) { 1008 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1009 kfree(x);
1010 return -ENOMEM; 1010 return -ENOMEM;
1011 } 1011 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1013 kfree(x);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1014 kfree(y);
1015 return -EFAULT; 1015 return -EFAULT;
1016 } 1016 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1018 kfree(x);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1019 kfree(y);
1020 return -EFAULT; 1020 return -EFAULT;
1021 } 1021 }
1022 1022
1023 buffer_size = depth->n * sizeof(u32); 1023 buffer_size = depth->n * sizeof(u32);
1024 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 1024 buffer = kmalloc(buffer_size, GFP_KERNEL);
1025 if (buffer == NULL) { 1025 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1026 kfree(x);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1027 kfree(y);
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 } 1029 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1031 kfree(x);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1032 kfree(y);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1033 kfree(buffer);
1034 return -EFAULT; 1034 return -EFAULT;
1035 } 1035 }
1036 1036
1037 if (depth->mask) { 1037 if (depth->mask) {
1038 mask_size = depth->n * sizeof(u8); 1038 mask_size = depth->n * sizeof(u8);
1039 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 1039 mask = kmalloc(mask_size, GFP_KERNEL);
1040 if (mask == NULL) { 1040 if (mask == NULL) {
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1041 kfree(x);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1042 kfree(y);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1043 kfree(buffer);
1044 return -ENOMEM; 1044 return -ENOMEM;
1045 } 1045 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1047 kfree(x);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1048 kfree(y);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1049 kfree(buffer);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS); 1050 kfree(mask);
1051 return -EFAULT; 1051 return -EFAULT;
1052 } 1052 }
1053 1053
@@ -1074,7 +1074,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1074 } 1074 }
1075 } 1075 }
1076 1076
1077 drm_free(mask, mask_size, DRM_MEM_BUFS); 1077 kfree(mask);
1078 } else { 1078 } else {
1079 for (i = 0; i < count; i++) { 1079 for (i = 0; i < count; i++) {
1080 BEGIN_RING(6); 1080 BEGIN_RING(6);
@@ -1098,9 +1098,9 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1098 } 1098 }
1099 } 1099 }
1100 1100
1101 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1101 kfree(x);
1102 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1102 kfree(y);
1103 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1103 kfree(buffer);
1104 1104
1105 return 0; 1105 return 0;
1106} 1106}
@@ -1167,23 +1167,23 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1167 1167
1168 xbuf_size = count * sizeof(*x); 1168 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y); 1169 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1170 x = kmalloc(xbuf_size, GFP_KERNEL);
1171 if (x == NULL) { 1171 if (x == NULL) {
1172 return -ENOMEM; 1172 return -ENOMEM;
1173 } 1173 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1174 y = kmalloc(ybuf_size, GFP_KERNEL);
1175 if (y == NULL) { 1175 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1176 kfree(x);
1177 return -ENOMEM; 1177 return -ENOMEM;
1178 } 1178 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1180 kfree(x);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1181 kfree(y);
1182 return -EFAULT; 1182 return -EFAULT;
1183 } 1183 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1185 kfree(x);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1186 kfree(y);
1187 return -EFAULT; 1187 return -EFAULT;
1188 } 1188 }
1189 1189
@@ -1210,8 +1210,8 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1210 ADVANCE_RING(); 1210 ADVANCE_RING();
1211 } 1211 }
1212 1212
1213 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1213 kfree(x);
1214 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1214 kfree(y);
1215 1215
1216 return 0; 1216 return 0;
1217} 1217}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5225f5be7ea7..c550932a108f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -551,6 +551,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
551 /* cp setup */ 551 /* cp setup */
552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
553 WREG32(RADEON_CP_RB_CNTL, 553 WREG32(RADEON_CP_RB_CNTL,
554#ifdef __BIG_ENDIAN
555 RADEON_BUF_SWAP_32BIT |
556#endif
554 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 557 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
555 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 558 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
556 REG_SET(RADEON_MAX_FETCH, max_fetch) | 559 REG_SET(RADEON_MAX_FETCH, max_fetch) |
@@ -644,7 +647,7 @@ int r100_cp_reset(struct radeon_device *rdev)
644 */ 647 */
645int r100_cs_parse_packet0(struct radeon_cs_parser *p, 648int r100_cs_parse_packet0(struct radeon_cs_parser *p,
646 struct radeon_cs_packet *pkt, 649 struct radeon_cs_packet *pkt,
647 unsigned *auth, unsigned n, 650 const unsigned *auth, unsigned n,
648 radeon_packet0_check_t check) 651 radeon_packet0_check_t check)
649{ 652{
650 unsigned reg; 653 unsigned reg;
@@ -654,6 +657,10 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
654 657
655 idx = pkt->idx + 1; 658 idx = pkt->idx + 1;
656 reg = pkt->reg; 659 reg = pkt->reg;
660 /* Check that register fall into register range
661 * determined by the number of entry (n) in the
662 * safe register bitmap.
663 */
657 if (pkt->one_reg_wr) { 664 if (pkt->one_reg_wr) {
658 if ((reg >> 7) > n) { 665 if ((reg >> 7) > n) {
659 return -EINVAL; 666 return -EINVAL;
@@ -683,24 +690,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
683 return 0; 690 return 0;
684} 691}
685 692
686int r100_cs_parse_packet3(struct radeon_cs_parser *p,
687 struct radeon_cs_packet *pkt,
688 unsigned *auth, unsigned n,
689 radeon_packet3_check_t check)
690{
691 unsigned i, m;
692
693 if ((pkt->opcode >> 5) > n) {
694 return -EINVAL;
695 }
696 i = pkt->opcode >> 5;
697 m = 1 << (pkt->opcode & 31);
698 if (auth[i] & m) {
699 return check(p, pkt);
700 }
701 return 0;
702}
703
704void r100_cs_dump_packet(struct radeon_cs_parser *p, 693void r100_cs_dump_packet(struct radeon_cs_parser *p,
705 struct radeon_cs_packet *pkt) 694 struct radeon_cs_packet *pkt)
706{ 695{
@@ -901,6 +890,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
901 return 0; 890 return 0;
902} 891}
903 892
893int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
894 struct radeon_cs_packet *pkt,
895 struct radeon_object *robj)
896{
897 struct radeon_cs_chunk *ib_chunk;
898 unsigned idx;
899
900 ib_chunk = &p->chunks[p->chunk_ib_idx];
901 idx = pkt->idx + 1;
902 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
903 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
904 "(need %u have %lu) !\n",
905 ib_chunk->kdata[idx+2] + 1,
906 radeon_object_size(robj));
907 return -EINVAL;
908 }
909 return 0;
910}
911
904static int r100_packet3_check(struct radeon_cs_parser *p, 912static int r100_packet3_check(struct radeon_cs_parser *p,
905 struct radeon_cs_packet *pkt) 913 struct radeon_cs_packet *pkt)
906{ 914{
@@ -954,6 +962,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
954 return r; 962 return r;
955 } 963 }
956 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 964 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
965 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
966 if (r) {
967 return r;
968 }
957 break; 969 break;
958 case 0x23: 970 case 0x23:
959 /* FIXME: cleanup */ 971 /* FIXME: cleanup */
@@ -999,18 +1011,18 @@ int r100_cs_parse(struct radeon_cs_parser *p)
999 } 1011 }
1000 p->idx += pkt.count + 2; 1012 p->idx += pkt.count + 2;
1001 switch (pkt.type) { 1013 switch (pkt.type) {
1002 case PACKET_TYPE0: 1014 case PACKET_TYPE0:
1003 r = r100_packet0_check(p, &pkt); 1015 r = r100_packet0_check(p, &pkt);
1004 break; 1016 break;
1005 case PACKET_TYPE2: 1017 case PACKET_TYPE2:
1006 break; 1018 break;
1007 case PACKET_TYPE3: 1019 case PACKET_TYPE3:
1008 r = r100_packet3_check(p, &pkt); 1020 r = r100_packet3_check(p, &pkt);
1009 break; 1021 break;
1010 default: 1022 default:
1011 DRM_ERROR("Unknown packet type %d !\n", 1023 DRM_ERROR("Unknown packet type %d !\n",
1012 pkt.type); 1024 pkt.type);
1013 return -EINVAL; 1025 return -EINVAL;
1014 } 1026 }
1015 if (r) { 1027 if (r) {
1016 return r; 1028 return r;
@@ -1267,12 +1279,6 @@ void r100_vram_info(struct radeon_device *rdev)
1267 1279
1268 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1280 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1269 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1281 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1270 if (rdev->mc.aper_size > rdev->mc.vram_size) {
1271 /* Why does some hw doesn't have CONFIG_MEMSIZE properly
1272 * setup ? */
1273 rdev->mc.vram_size = rdev->mc.aper_size;
1274 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1275 }
1276} 1282}
1277 1283
1278 1284
@@ -1352,6 +1358,11 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1352 } 1358 }
1353} 1359}
1354 1360
1361int r100_init(struct radeon_device *rdev)
1362{
1363 return 0;
1364}
1365
1355/* 1366/*
1356 * Debugfs info 1367 * Debugfs info
1357 */ 1368 */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index f5870a099d4f..e2ed5bc08170 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -48,14 +48,13 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc); 48 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p, 49int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt, 50 struct radeon_cs_packet *pkt,
51 unsigned *auth, unsigned n, 51 const unsigned *auth, unsigned n,
52 radeon_packet0_check_t check); 52 radeon_packet0_check_t check);
53int r100_cs_parse_packet3(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt,
55 unsigned *auth, unsigned n,
56 radeon_packet3_check_t check);
57void r100_cs_dump_packet(struct radeon_cs_parser *p, 53void r100_cs_dump_packet(struct radeon_cs_parser *p,
58 struct radeon_cs_packet *pkt); 54 struct radeon_cs_packet *pkt);
55int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
56 struct radeon_cs_packet *pkt,
57 struct radeon_object *robj);
59 58
60/* This files gather functions specifics to: 59/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380 60 * r300,r350,rv350,rv370,rv380
@@ -288,7 +287,7 @@ int r300_copy_dma(struct radeon_device *rdev,
288 return r; 287 return r;
289 } 288 }
290 /* Must wait for 2D idle & clean before DMA or hangs might happen */ 289 /* Must wait for 2D idle & clean before DMA or hangs might happen */
291 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 290 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
292 radeon_ring_write(rdev, (1 << 16)); 291 radeon_ring_write(rdev, (1 << 16));
293 for (i = 0; i < num_loops; i++) { 292 for (i = 0; i < num_loops; i++) {
294 cur_size = size; 293 cur_size = size;
@@ -319,7 +318,7 @@ void r300_ring_start(struct radeon_device *rdev)
319 318
320 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 319 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
321 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 320 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
322 switch (rdev->num_gb_pipes) { 321 switch(rdev->num_gb_pipes) {
323 case 2: 322 case 2:
324 gb_tile_config |= R300_PIPE_COUNT_R300; 323 gb_tile_config |= R300_PIPE_COUNT_R300;
325 break; 324 break;
@@ -452,8 +451,8 @@ void r300_gpu_init(struct radeon_device *rdev)
452 case 4: 451 case 4:
453 gb_tile_config |= R300_PIPE_COUNT_R420; 452 gb_tile_config |= R300_PIPE_COUNT_R420;
454 break; 453 break;
455 case 1:
456 default: 454 default:
455 case 1:
457 gb_tile_config |= R300_PIPE_COUNT_RV350; 456 gb_tile_config |= R300_PIPE_COUNT_RV350;
458 break; 457 break;
459 } 458 }
@@ -725,18 +724,120 @@ struct r300_cs_track_cb {
725 unsigned offset; 724 unsigned offset;
726}; 725};
727 726
727struct r300_cs_track_array {
728 struct radeon_object *robj;
729 unsigned esize;
730};
731
732struct r300_cs_track_texture {
733 struct radeon_object *robj;
734 unsigned pitch;
735 unsigned width;
736 unsigned height;
737 unsigned num_levels;
738 unsigned cpp;
739 unsigned tex_coord_type;
740 unsigned txdepth;
741 unsigned width_11;
742 unsigned height_11;
743 bool use_pitch;
744 bool enabled;
745 bool roundup_w;
746 bool roundup_h;
747};
748
728struct r300_cs_track { 749struct r300_cs_track {
729 unsigned num_cb; 750 unsigned num_cb;
730 unsigned maxy; 751 unsigned maxy;
731 struct r300_cs_track_cb cb[4]; 752 unsigned vtx_size;
732 struct r300_cs_track_cb zb; 753 unsigned vap_vf_cntl;
733 bool z_enabled; 754 unsigned immd_dwords;
755 unsigned num_arrays;
756 unsigned max_indx;
757 struct r300_cs_track_array arrays[11];
758 struct r300_cs_track_cb cb[4];
759 struct r300_cs_track_cb zb;
760 struct r300_cs_track_texture textures[16];
761 bool z_enabled;
734}; 762};
735 763
764static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
765{
766 DRM_ERROR("pitch %d\n", t->pitch);
767 DRM_ERROR("width %d\n", t->width);
768 DRM_ERROR("height %d\n", t->height);
769 DRM_ERROR("num levels %d\n", t->num_levels);
770 DRM_ERROR("depth %d\n", t->txdepth);
771 DRM_ERROR("bpp %d\n", t->cpp);
772 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
773 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
774 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
775}
776
777static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
778 struct r300_cs_track *track)
779{
780 struct radeon_object *robj;
781 unsigned long size;
782 unsigned u, i, w, h;
783
784 for (u = 0; u < 16; u++) {
785 if (!track->textures[u].enabled)
786 continue;
787 robj = track->textures[u].robj;
788 if (robj == NULL) {
789 DRM_ERROR("No texture bound to unit %u\n", u);
790 return -EINVAL;
791 }
792 size = 0;
793 for (i = 0; i <= track->textures[u].num_levels; i++) {
794 if (track->textures[u].use_pitch) {
795 w = track->textures[u].pitch / (1 << i);
796 } else {
797 w = track->textures[u].width / (1 << i);
798 if (rdev->family >= CHIP_RV515)
799 w |= track->textures[u].width_11;
800 if (track->textures[u].roundup_w)
801 w = roundup_pow_of_two(w);
802 }
803 h = track->textures[u].height / (1 << i);
804 if (rdev->family >= CHIP_RV515)
805 h |= track->textures[u].height_11;
806 if (track->textures[u].roundup_h)
807 h = roundup_pow_of_two(h);
808 size += w * h;
809 }
810 size *= track->textures[u].cpp;
811 switch (track->textures[u].tex_coord_type) {
812 case 0:
813 break;
814 case 1:
815 size *= (1 << track->textures[u].txdepth);
816 break;
817 case 2:
818 size *= 6;
819 break;
820 default:
821 DRM_ERROR("Invalid texture coordinate type %u for unit "
822 "%u\n", track->textures[u].tex_coord_type, u);
823 return -EINVAL;
824 }
825 if (size > radeon_object_size(robj)) {
826 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
827 "%lu\n", u, size, radeon_object_size(robj));
828 r300_cs_track_texture_print(&track->textures[u]);
829 return -EINVAL;
830 }
831 }
832 return 0;
833}
834
736int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track) 835int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
737{ 836{
738 unsigned i; 837 unsigned i;
739 unsigned long size; 838 unsigned long size;
839 unsigned prim_walk;
840 unsigned nverts;
740 841
741 for (i = 0; i < track->num_cb; i++) { 842 for (i = 0; i < track->num_cb; i++) {
742 if (track->cb[i].robj == NULL) { 843 if (track->cb[i].robj == NULL) {
@@ -769,7 +870,59 @@ int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
769 return -EINVAL; 870 return -EINVAL;
770 } 871 }
771 } 872 }
772 return 0; 873 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
874 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
875 switch (prim_walk) {
876 case 1:
877 for (i = 0; i < track->num_arrays; i++) {
878 size = track->arrays[i].esize * track->max_indx * 4;
879 if (track->arrays[i].robj == NULL) {
880 DRM_ERROR("(PW %u) Vertex array %u no buffer "
881 "bound\n", prim_walk, i);
882 return -EINVAL;
883 }
884 if (size > radeon_object_size(track->arrays[i].robj)) {
885 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
886 "have %lu dwords\n", prim_walk, i,
887 size >> 2,
888 radeon_object_size(track->arrays[i].robj) >> 2);
889 DRM_ERROR("Max indices %u\n", track->max_indx);
890 return -EINVAL;
891 }
892 }
893 break;
894 case 2:
895 for (i = 0; i < track->num_arrays; i++) {
896 size = track->arrays[i].esize * (nverts - 1) * 4;
897 if (track->arrays[i].robj == NULL) {
898 DRM_ERROR("(PW %u) Vertex array %u no buffer "
899 "bound\n", prim_walk, i);
900 return -EINVAL;
901 }
902 if (size > radeon_object_size(track->arrays[i].robj)) {
903 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
904 "have %lu dwords\n", prim_walk, i, size >> 2,
905 radeon_object_size(track->arrays[i].robj) >> 2);
906 return -EINVAL;
907 }
908 }
909 break;
910 case 3:
911 size = track->vtx_size * nverts;
912 if (size != track->immd_dwords) {
913 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
914 track->immd_dwords, size);
915 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
916 nverts, track->vtx_size);
917 return -EINVAL;
918 }
919 break;
920 default:
921 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
922 prim_walk);
923 return -EINVAL;
924 }
925 return r300_cs_track_texture_check(rdev, track);
773} 926}
774 927
775static inline void r300_cs_track_clear(struct r300_cs_track *track) 928static inline void r300_cs_track_clear(struct r300_cs_track *track)
@@ -789,9 +942,33 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)
789 track->zb.pitch = 8192; 942 track->zb.pitch = 8192;
790 track->zb.cpp = 4; 943 track->zb.cpp = 4;
791 track->zb.offset = 0; 944 track->zb.offset = 0;
945 track->vtx_size = 0x7F;
946 track->immd_dwords = 0xFFFFFFFFUL;
947 track->num_arrays = 11;
948 track->max_indx = 0x00FFFFFFUL;
949 for (i = 0; i < track->num_arrays; i++) {
950 track->arrays[i].robj = NULL;
951 track->arrays[i].esize = 0x7F;
952 }
953 for (i = 0; i < 16; i++) {
954 track->textures[i].pitch = 16536;
955 track->textures[i].width = 16536;
956 track->textures[i].height = 16536;
957 track->textures[i].width_11 = 1 << 11;
958 track->textures[i].height_11 = 1 << 11;
959 track->textures[i].num_levels = 12;
960 track->textures[i].txdepth = 16;
961 track->textures[i].cpp = 64;
962 track->textures[i].tex_coord_type = 1;
963 track->textures[i].robj = NULL;
964 /* CS IB emission code makes sure texture unit are disabled */
965 track->textures[i].enabled = false;
966 track->textures[i].roundup_w = true;
967 track->textures[i].roundup_h = true;
968 }
792} 969}
793 970
794static unsigned r300_auth_reg[] = { 971static const unsigned r300_reg_safe_bm[159] = {
795 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
796 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 973 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
797 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -808,7 +985,7 @@ static unsigned r300_auth_reg[] = {
808 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 985 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
809 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, 986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
810 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 987 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
811 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000, 988 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
812 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, 989 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
813 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 990 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
814 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 991 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -824,9 +1001,9 @@ static unsigned r300_auth_reg[] = {
824 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1001 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
825 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1002 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
826 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1003 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
827 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF, 1004 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
828 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, 1005 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
829 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000, 1006 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
830 0x00000000, 0x0000C100, 0x00000000, 0x00000000, 1007 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1008 0x00000000, 0x00000000, 0x00000000, 0x00000000,
832 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, 1009 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
@@ -848,8 +1025,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
848 1025
849 ib = p->ib->ptr; 1026 ib = p->ib->ptr;
850 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1027 ib_chunk = &p->chunks[p->chunk_ib_idx];
851 track = (struct r300_cs_track *)p->track; 1028 track = (struct r300_cs_track*)p->track;
852 switch (reg) { 1029 switch(reg) {
853 case RADEON_DST_PITCH_OFFSET: 1030 case RADEON_DST_PITCH_OFFSET:
854 case RADEON_SRC_PITCH_OFFSET: 1031 case RADEON_SRC_PITCH_OFFSET:
855 r = r100_cs_packet_next_reloc(p, &reloc); 1032 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -907,6 +1084,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
907 case R300_TX_OFFSET_0+52: 1084 case R300_TX_OFFSET_0+52:
908 case R300_TX_OFFSET_0+56: 1085 case R300_TX_OFFSET_0+56:
909 case R300_TX_OFFSET_0+60: 1086 case R300_TX_OFFSET_0+60:
1087 i = (reg - R300_TX_OFFSET_0) >> 2;
910 r = r100_cs_packet_next_reloc(p, &reloc); 1088 r = r100_cs_packet_next_reloc(p, &reloc);
911 if (r) { 1089 if (r) {
912 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1090 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -915,11 +1093,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
915 return r; 1093 return r;
916 } 1094 }
917 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1095 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1096 track->textures[i].robj = reloc->robj;
918 break; 1097 break;
919 /* Tracked registers */ 1098 /* Tracked registers */
1099 case 0x2084:
1100 /* VAP_VF_CNTL */
1101 track->vap_vf_cntl = ib_chunk->kdata[idx];
1102 break;
1103 case 0x20B4:
1104 /* VAP_VTX_SIZE */
1105 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
1106 break;
1107 case 0x2134:
1108 /* VAP_VF_MAX_VTX_INDX */
1109 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
1110 break;
920 case 0x43E4: 1111 case 0x43E4:
921 /* SC_SCISSOR1 */ 1112 /* SC_SCISSOR1 */
922
923 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; 1113 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
924 if (p->rdev->family < CHIP_RV515) { 1114 if (p->rdev->family < CHIP_RV515) {
925 track->maxy -= 1440; 1115 track->maxy -= 1440;
@@ -994,8 +1184,166 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
994 /* ZB_DEPTHPITCH */ 1184 /* ZB_DEPTHPITCH */
995 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 1185 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
996 break; 1186 break;
1187 case 0x4104:
1188 for (i = 0; i < 16; i++) {
1189 bool enabled;
1190
1191 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
1192 track->textures[i].enabled = enabled;
1193 }
1194 break;
1195 case 0x44C0:
1196 case 0x44C4:
1197 case 0x44C8:
1198 case 0x44CC:
1199 case 0x44D0:
1200 case 0x44D4:
1201 case 0x44D8:
1202 case 0x44DC:
1203 case 0x44E0:
1204 case 0x44E4:
1205 case 0x44E8:
1206 case 0x44EC:
1207 case 0x44F0:
1208 case 0x44F4:
1209 case 0x44F8:
1210 case 0x44FC:
1211 /* TX_FORMAT1_[0-15] */
1212 i = (reg - 0x44C0) >> 2;
1213 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
1214 track->textures[i].tex_coord_type = tmp;
1215 switch ((ib_chunk->kdata[idx] & 0x1F)) {
1216 case 0:
1217 case 2:
1218 case 5:
1219 case 18:
1220 case 20:
1221 case 21:
1222 track->textures[i].cpp = 1;
1223 break;
1224 case 1:
1225 case 3:
1226 case 6:
1227 case 7:
1228 case 10:
1229 case 11:
1230 case 19:
1231 case 22:
1232 case 24:
1233 track->textures[i].cpp = 2;
1234 break;
1235 case 4:
1236 case 8:
1237 case 9:
1238 case 12:
1239 case 13:
1240 case 23:
1241 case 25:
1242 case 27:
1243 case 30:
1244 track->textures[i].cpp = 4;
1245 break;
1246 case 14:
1247 case 26:
1248 case 28:
1249 track->textures[i].cpp = 8;
1250 break;
1251 case 29:
1252 track->textures[i].cpp = 16;
1253 break;
1254 default:
1255 DRM_ERROR("Invalid texture format %u\n",
1256 (ib_chunk->kdata[idx] & 0x1F));
1257 return -EINVAL;
1258 break;
1259 }
1260 break;
1261 case 0x4400:
1262 case 0x4404:
1263 case 0x4408:
1264 case 0x440C:
1265 case 0x4410:
1266 case 0x4414:
1267 case 0x4418:
1268 case 0x441C:
1269 case 0x4420:
1270 case 0x4424:
1271 case 0x4428:
1272 case 0x442C:
1273 case 0x4430:
1274 case 0x4434:
1275 case 0x4438:
1276 case 0x443C:
1277 /* TX_FILTER0_[0-15] */
1278 i = (reg - 0x4400) >> 2;
1279 tmp = ib_chunk->kdata[idx] & 0x7;;
1280 if (tmp == 2 || tmp == 4 || tmp == 6) {
1281 track->textures[i].roundup_w = false;
1282 }
1283 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
1284 if (tmp == 2 || tmp == 4 || tmp == 6) {
1285 track->textures[i].roundup_h = false;
1286 }
1287 break;
1288 case 0x4500:
1289 case 0x4504:
1290 case 0x4508:
1291 case 0x450C:
1292 case 0x4510:
1293 case 0x4514:
1294 case 0x4518:
1295 case 0x451C:
1296 case 0x4520:
1297 case 0x4524:
1298 case 0x4528:
1299 case 0x452C:
1300 case 0x4530:
1301 case 0x4534:
1302 case 0x4538:
1303 case 0x453C:
1304 /* TX_FORMAT2_[0-15] */
1305 i = (reg - 0x4500) >> 2;
1306 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1307 track->textures[i].pitch = tmp + 1;
1308 if (p->rdev->family >= CHIP_RV515) {
1309 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1310 track->textures[i].width_11 = tmp;
1311 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1312 track->textures[i].height_11 = tmp;
1313 }
1314 break;
1315 case 0x4480:
1316 case 0x4484:
1317 case 0x4488:
1318 case 0x448C:
1319 case 0x4490:
1320 case 0x4494:
1321 case 0x4498:
1322 case 0x449C:
1323 case 0x44A0:
1324 case 0x44A4:
1325 case 0x44A8:
1326 case 0x44AC:
1327 case 0x44B0:
1328 case 0x44B4:
1329 case 0x44B8:
1330 case 0x44BC:
1331 /* TX_FORMAT0_[0-15] */
1332 i = (reg - 0x4480) >> 2;
1333 tmp = ib_chunk->kdata[idx] & 0x7FF;
1334 track->textures[i].width = tmp + 1;
1335 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1336 track->textures[i].height = tmp + 1;
1337 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1338 track->textures[i].num_levels = tmp;
1339 tmp = ib_chunk->kdata[idx] & (1 << 31);
1340 track->textures[i].use_pitch = !!tmp;
1341 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1342 track->textures[i].txdepth = tmp;
1343 break;
997 default: 1344 default:
998 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx); 1345 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1346 reg, idx);
999 return -EINVAL; 1347 return -EINVAL;
1000 } 1348 }
1001 return 0; 1349 return 0;
@@ -1015,11 +1363,12 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1015 ib = p->ib->ptr; 1363 ib = p->ib->ptr;
1016 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1364 ib_chunk = &p->chunks[p->chunk_ib_idx];
1017 idx = pkt->idx + 1; 1365 idx = pkt->idx + 1;
1018 track = (struct r300_cs_track *)p->track; 1366 track = (struct r300_cs_track*)p->track;
1019 switch (pkt->opcode) { 1367 switch(pkt->opcode) {
1020 case PACKET3_3D_LOAD_VBPNTR: 1368 case PACKET3_3D_LOAD_VBPNTR:
1021 c = ib_chunk->kdata[idx++]; 1369 c = ib_chunk->kdata[idx++] & 0x1F;
1022 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1370 track->num_arrays = c;
1371 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1023 r = r100_cs_packet_next_reloc(p, &reloc); 1372 r = r100_cs_packet_next_reloc(p, &reloc);
1024 if (r) { 1373 if (r) {
1025 DRM_ERROR("No reloc for packet3 %d\n", 1374 DRM_ERROR("No reloc for packet3 %d\n",
@@ -1028,6 +1377,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1028 return r; 1377 return r;
1029 } 1378 }
1030 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1379 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1380 track->arrays[i + 0].robj = reloc->robj;
1381 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1382 track->arrays[i + 0].esize &= 0x7F;
1031 r = r100_cs_packet_next_reloc(p, &reloc); 1383 r = r100_cs_packet_next_reloc(p, &reloc);
1032 if (r) { 1384 if (r) {
1033 DRM_ERROR("No reloc for packet3 %d\n", 1385 DRM_ERROR("No reloc for packet3 %d\n",
@@ -1036,6 +1388,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1036 return r; 1388 return r;
1037 } 1389 }
1038 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); 1390 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1391 track->arrays[i + 1].robj = reloc->robj;
1392 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1393 track->arrays[i + 1].esize &= 0x7F;
1039 } 1394 }
1040 if (c & 1) { 1395 if (c & 1) {
1041 r = r100_cs_packet_next_reloc(p, &reloc); 1396 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1046,6 +1401,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1046 return r; 1401 return r;
1047 } 1402 }
1048 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1403 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1404 track->arrays[i + 0].robj = reloc->robj;
1405 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1406 track->arrays[i + 0].esize &= 0x7F;
1049 } 1407 }
1050 break; 1408 break;
1051 case PACKET3_INDX_BUFFER: 1409 case PACKET3_INDX_BUFFER:
@@ -1056,14 +1414,65 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1056 return r; 1414 return r;
1057 } 1415 }
1058 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1416 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1417 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1418 if (r) {
1419 return r;
1420 }
1059 break; 1421 break;
1060 /* Draw packet */ 1422 /* Draw packet */
1061 case PACKET3_3D_DRAW_VBUF:
1062 case PACKET3_3D_DRAW_IMMD: 1423 case PACKET3_3D_DRAW_IMMD:
1063 case PACKET3_3D_DRAW_INDX: 1424 /* Number of dwords is vtx_size * (num_vertices - 1)
1064 case PACKET3_3D_DRAW_VBUF_2: 1425 * PRIM_WALK must be equal to 3 vertex data in embedded
1426 * in cmd stream */
1427 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1428 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1429 return -EINVAL;
1430 }
1431 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1432 track->immd_dwords = pkt->count - 1;
1433 r = r300_cs_track_check(p->rdev, track);
1434 if (r) {
1435 return r;
1436 }
1437 break;
1065 case PACKET3_3D_DRAW_IMMD_2: 1438 case PACKET3_3D_DRAW_IMMD_2:
1439 /* Number of dwords is vtx_size * (num_vertices - 1)
1440 * PRIM_WALK must be equal to 3 vertex data in embedded
1441 * in cmd stream */
1442 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1443 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1444 return -EINVAL;
1445 }
1446 track->vap_vf_cntl = ib_chunk->kdata[idx];
1447 track->immd_dwords = pkt->count;
1448 r = r300_cs_track_check(p->rdev, track);
1449 if (r) {
1450 return r;
1451 }
1452 break;
1453 case PACKET3_3D_DRAW_VBUF:
1454 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1455 r = r300_cs_track_check(p->rdev, track);
1456 if (r) {
1457 return r;
1458 }
1459 break;
1460 case PACKET3_3D_DRAW_VBUF_2:
1461 track->vap_vf_cntl = ib_chunk->kdata[idx];
1462 r = r300_cs_track_check(p->rdev, track);
1463 if (r) {
1464 return r;
1465 }
1466 break;
1467 case PACKET3_3D_DRAW_INDX:
1468 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1469 r = r300_cs_track_check(p->rdev, track);
1470 if (r) {
1471 return r;
1472 }
1473 break;
1066 case PACKET3_3D_DRAW_INDX_2: 1474 case PACKET3_3D_DRAW_INDX_2:
1475 track->vap_vf_cntl = ib_chunk->kdata[idx];
1067 r = r300_cs_track_check(p->rdev, track); 1476 r = r300_cs_track_check(p->rdev, track);
1068 if (r) { 1477 if (r) {
1069 return r; 1478 return r;
@@ -1095,8 +1504,8 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1095 switch (pkt.type) { 1504 switch (pkt.type) {
1096 case PACKET_TYPE0: 1505 case PACKET_TYPE0:
1097 r = r100_cs_parse_packet0(p, &pkt, 1506 r = r100_cs_parse_packet0(p, &pkt,
1098 r300_auth_reg, 1507 p->rdev->config.r300.reg_safe_bm,
1099 ARRAY_SIZE(r300_auth_reg), 1508 p->rdev->config.r300.reg_safe_bm_size,
1100 &r300_packet0_check); 1509 &r300_packet0_check);
1101 break; 1510 break;
1102 case PACKET_TYPE2: 1511 case PACKET_TYPE2:
@@ -1114,3 +1523,10 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1114 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1523 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1115 return 0; 1524 return 0;
1116} 1525}
1526
1527int r300_init(struct radeon_device *rdev)
1528{
1529 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1530 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1531 return 0;
1532}
diff --git a/drivers/gpu/drm/radeon/r300.h b/drivers/gpu/drm/radeon/r300.h
new file mode 100644
index 000000000000..8486b4da9d69
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef R300_H
29#define R300_H
30
31struct r300_asic {
32 const unsigned *reg_safe_bm;
33 unsigned reg_safe_bm_size;
34};
35
36#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c3f24cc56009..d61f2fc61df5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -51,7 +51,7 @@
51 51
52#include "radeon_mode.h" 52#include "radeon_mode.h"
53#include "radeon_reg.h" 53#include "radeon_reg.h"
54 54#include "r300.h"
55 55
56/* 56/*
57 * Modules parameters. 57 * Modules parameters.
@@ -496,6 +496,7 @@ int r100_debugfs_cp_init(struct radeon_device *rdev);
496 * ASIC specific functions. 496 * ASIC specific functions.
497 */ 497 */
498struct radeon_asic { 498struct radeon_asic {
499 int (*init)(struct radeon_device *rdev);
499 void (*errata)(struct radeon_device *rdev); 500 void (*errata)(struct radeon_device *rdev);
500 void (*vram_info)(struct radeon_device *rdev); 501 void (*vram_info)(struct radeon_device *rdev);
501 int (*gpu_reset)(struct radeon_device *rdev); 502 int (*gpu_reset)(struct radeon_device *rdev);
@@ -536,6 +537,10 @@ struct radeon_asic {
536 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 537 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
537}; 538};
538 539
540union radeon_asic_config {
541 struct r300_asic r300;
542};
543
539 544
540/* 545/*
541 * IOCTL. 546 * IOCTL.
@@ -573,6 +578,7 @@ struct radeon_device {
573 struct drm_device *ddev; 578 struct drm_device *ddev;
574 struct pci_dev *pdev; 579 struct pci_dev *pdev;
575 /* ASIC */ 580 /* ASIC */
581 union radeon_asic_config config;
576 enum radeon_family family; 582 enum radeon_family family;
577 unsigned long flags; 583 unsigned long flags;
578 int usec_timeout; 584 int usec_timeout;
@@ -763,6 +769,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
763/* 769/*
764 * ASICs macro. 770 * ASICs macro.
765 */ 771 */
772#define radeon_init(rdev) (rdev)->asic->init((rdev))
766#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 773#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
767#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) 774#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
768#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) 775#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e57d8a784e9f..e2e567395df8 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -41,6 +41,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
41/* 41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */ 43 */
44int r100_init(struct radeon_device *rdev);
44uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 45uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
45void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 46void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
46void r100_errata(struct radeon_device *rdev); 47void r100_errata(struct radeon_device *rdev);
@@ -72,6 +73,7 @@ int r100_copy_blit(struct radeon_device *rdev,
72 struct radeon_fence *fence); 73 struct radeon_fence *fence);
73 74
74static struct radeon_asic r100_asic = { 75static struct radeon_asic r100_asic = {
76 .init = &r100_init,
75 .errata = &r100_errata, 77 .errata = &r100_errata,
76 .vram_info = &r100_vram_info, 78 .vram_info = &r100_vram_info,
77 .gpu_reset = &r100_gpu_reset, 79 .gpu_reset = &r100_gpu_reset,
@@ -104,6 +106,7 @@ static struct radeon_asic r100_asic = {
104/* 106/*
105 * r300,r350,rv350,rv380 107 * r300,r350,rv350,rv380
106 */ 108 */
109int r300_init(struct radeon_device *rdev);
107void r300_errata(struct radeon_device *rdev); 110void r300_errata(struct radeon_device *rdev);
108void r300_vram_info(struct radeon_device *rdev); 111void r300_vram_info(struct radeon_device *rdev);
109int r300_gpu_reset(struct radeon_device *rdev); 112int r300_gpu_reset(struct radeon_device *rdev);
@@ -126,6 +129,7 @@ int r300_copy_dma(struct radeon_device *rdev,
126 unsigned num_pages, 129 unsigned num_pages,
127 struct radeon_fence *fence); 130 struct radeon_fence *fence);
128static struct radeon_asic r300_asic = { 131static struct radeon_asic r300_asic = {
132 .init = &r300_init,
129 .errata = &r300_errata, 133 .errata = &r300_errata,
130 .vram_info = &r300_vram_info, 134 .vram_info = &r300_vram_info,
131 .gpu_reset = &r300_gpu_reset, 135 .gpu_reset = &r300_gpu_reset,
@@ -162,6 +166,7 @@ void r420_vram_info(struct radeon_device *rdev);
162int r420_mc_init(struct radeon_device *rdev); 166int r420_mc_init(struct radeon_device *rdev);
163void r420_mc_fini(struct radeon_device *rdev); 167void r420_mc_fini(struct radeon_device *rdev);
164static struct radeon_asic r420_asic = { 168static struct radeon_asic r420_asic = {
169 .init = &r300_init,
165 .errata = &r420_errata, 170 .errata = &r420_errata,
166 .vram_info = &r420_vram_info, 171 .vram_info = &r420_vram_info,
167 .gpu_reset = &r300_gpu_reset, 172 .gpu_reset = &r300_gpu_reset,
@@ -205,6 +210,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
205uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 210uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
206void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 211void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
207static struct radeon_asic rs400_asic = { 212static struct radeon_asic rs400_asic = {
213 .init = &r300_init,
208 .errata = &rs400_errata, 214 .errata = &rs400_errata,
209 .vram_info = &rs400_vram_info, 215 .vram_info = &rs400_vram_info,
210 .gpu_reset = &r300_gpu_reset, 216 .gpu_reset = &r300_gpu_reset,
@@ -249,6 +255,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
249uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 255uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
250void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 256void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
251static struct radeon_asic rs600_asic = { 257static struct radeon_asic rs600_asic = {
258 .init = &r300_init,
252 .errata = &rs600_errata, 259 .errata = &rs600_errata,
253 .vram_info = &rs600_vram_info, 260 .vram_info = &rs600_vram_info,
254 .gpu_reset = &r300_gpu_reset, 261 .gpu_reset = &r300_gpu_reset,
@@ -288,6 +295,7 @@ void rs690_mc_fini(struct radeon_device *rdev);
288uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 295uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
289void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 296void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
290static struct radeon_asic rs690_asic = { 297static struct radeon_asic rs690_asic = {
298 .init = &r300_init,
291 .errata = &rs690_errata, 299 .errata = &rs690_errata,
292 .vram_info = &rs690_vram_info, 300 .vram_info = &rs690_vram_info,
293 .gpu_reset = &r300_gpu_reset, 301 .gpu_reset = &r300_gpu_reset,
@@ -320,6 +328,7 @@ static struct radeon_asic rs690_asic = {
320/* 328/*
321 * rv515 329 * rv515
322 */ 330 */
331int rv515_init(struct radeon_device *rdev);
323void rv515_errata(struct radeon_device *rdev); 332void rv515_errata(struct radeon_device *rdev);
324void rv515_vram_info(struct radeon_device *rdev); 333void rv515_vram_info(struct radeon_device *rdev);
325int rv515_gpu_reset(struct radeon_device *rdev); 334int rv515_gpu_reset(struct radeon_device *rdev);
@@ -331,6 +340,7 @@ void rv515_ring_start(struct radeon_device *rdev);
331uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 340uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
332void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 341void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
333static struct radeon_asic rv515_asic = { 342static struct radeon_asic rv515_asic = {
343 .init = &rv515_init,
334 .errata = &rv515_errata, 344 .errata = &rv515_errata,
335 .vram_info = &rv515_vram_info, 345 .vram_info = &rv515_vram_info,
336 .gpu_reset = &rv515_gpu_reset, 346 .gpu_reset = &rv515_gpu_reset,
@@ -349,7 +359,7 @@ static struct radeon_asic rv515_asic = {
349 .irq_set = &r100_irq_set, 359 .irq_set = &r100_irq_set,
350 .irq_process = &r100_irq_process, 360 .irq_process = &r100_irq_process,
351 .fence_ring_emit = &r300_fence_ring_emit, 361 .fence_ring_emit = &r300_fence_ring_emit,
352 .cs_parse = &r100_cs_parse, 362 .cs_parse = &r300_cs_parse,
353 .copy_blit = &r100_copy_blit, 363 .copy_blit = &r100_copy_blit,
354 .copy_dma = &r300_copy_dma, 364 .copy_dma = &r300_copy_dma,
355 .copy = &r100_copy_blit, 365 .copy = &r100_copy_blit,
@@ -368,6 +378,7 @@ void r520_vram_info(struct radeon_device *rdev);
368int r520_mc_init(struct radeon_device *rdev); 378int r520_mc_init(struct radeon_device *rdev);
369void r520_mc_fini(struct radeon_device *rdev); 379void r520_mc_fini(struct radeon_device *rdev);
370static struct radeon_asic r520_asic = { 380static struct radeon_asic r520_asic = {
381 .init = &rv515_init,
371 .errata = &r520_errata, 382 .errata = &r520_errata,
372 .vram_info = &r520_vram_info, 383 .vram_info = &r520_vram_info,
373 .gpu_reset = &rv515_gpu_reset, 384 .gpu_reset = &rv515_gpu_reset,
@@ -386,7 +397,7 @@ static struct radeon_asic r520_asic = {
386 .irq_set = &r100_irq_set, 397 .irq_set = &r100_irq_set,
387 .irq_process = &r100_irq_process, 398 .irq_process = &r100_irq_process,
388 .fence_ring_emit = &r300_fence_ring_emit, 399 .fence_ring_emit = &r300_fence_ring_emit,
389 .cs_parse = &r100_cs_parse, 400 .cs_parse = &r300_cs_parse,
390 .copy_blit = &r100_copy_blit, 401 .copy_blit = &r100_copy_blit,
391 .copy_dma = &r300_copy_dma, 402 .copy_dma = &r300_copy_dma,
392 .copy = &r100_copy_blit, 403 .copy = &r100_copy_blit,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 786632d3e378..1f5a1a490984 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -835,7 +835,6 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
835 struct _COMPASSIONATE_DATA *dac_info; 835 struct _COMPASSIONATE_DATA *dac_info;
836 uint8_t frev, crev; 836 uint8_t frev, crev;
837 uint8_t bg, dac; 837 uint8_t bg, dac;
838 int i;
839 struct radeon_encoder_primary_dac *p_dac = NULL; 838 struct radeon_encoder_primary_dac *p_dac = NULL;
840 839
841 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 840 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
@@ -867,7 +866,6 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
867 struct _COMPASSIONATE_DATA *dac_info; 866 struct _COMPASSIONATE_DATA *dac_info;
868 uint8_t frev, crev; 867 uint8_t frev, crev;
869 uint8_t bg, dac; 868 uint8_t bg, dac;
870 int i;
871 struct radeon_encoder_tv_dac *tv_dac = NULL; 869 struct radeon_encoder_tv_dac *tv_dac = NULL;
872 870
873 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 871 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 06e8038bc4ac..afc4db280b94 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -799,6 +799,7 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
799 struct radeon_encoder_lvds *lvds = NULL; 799 struct radeon_encoder_lvds *lvds = NULL;
800 uint32_t fp_vert_stretch, fp_horz_stretch; 800 uint32_t fp_vert_stretch, fp_horz_stretch;
801 uint32_t ppll_div_sel, ppll_val; 801 uint32_t ppll_div_sel, ppll_val;
802 uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
802 803
803 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL); 804 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
804 805
@@ -808,6 +809,14 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
808 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH); 809 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
809 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH); 810 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
810 811
812 /* These should be fail-safe defaults, fingers crossed */
813 lvds->panel_pwr_delay = 200;
814 lvds->panel_vcc_delay = 2000;
815
816 lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
817 lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
818 lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
819
811 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) 820 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
812 lvds->native_mode.panel_yres = 821 lvds->native_mode.panel_yres =
813 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> 822 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 89c4c44169f7..d8356827ef17 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2045,11 +2045,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2045 drm_radeon_private_t *dev_priv; 2045 drm_radeon_private_t *dev_priv;
2046 int ret = 0; 2046 int ret = 0;
2047 2047
2048 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2048 dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
2049 if (dev_priv == NULL) 2049 if (dev_priv == NULL)
2050 return -ENOMEM; 2050 return -ENOMEM;
2051 2051
2052 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
2053 dev->dev_private = (void *)dev_priv; 2052 dev->dev_private = (void *)dev_priv;
2054 dev_priv->flags = flags; 2053 dev_priv->flags = flags;
2055 2054
@@ -2103,7 +2102,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2103 unsigned long sareapage; 2102 unsigned long sareapage;
2104 int ret; 2103 int ret;
2105 2104
2106 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 2105 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
2107 if (!master_priv) 2106 if (!master_priv)
2108 return -ENOMEM; 2107 return -ENOMEM;
2109 2108
@@ -2137,7 +2136,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
2137 if (master_priv->sarea) 2136 if (master_priv->sarea)
2138 drm_rmmap_locked(dev, master_priv->sarea); 2137 drm_rmmap_locked(dev, master_priv->sarea);
2139 2138
2140 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 2139 kfree(master_priv);
2141 2140
2142 master->driver_priv = NULL; 2141 master->driver_priv = NULL;
2143} 2142}
@@ -2171,7 +2170,7 @@ int radeon_driver_unload(struct drm_device *dev)
2171 2170
2172 drm_rmmap(dev, dev_priv->mmio); 2171 drm_rmmap(dev, dev_priv->mmio);
2173 2172
2174 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2173 kfree(dev_priv);
2175 2174
2176 dev->dev_private = NULL; 2175 dev->dev_private = NULL;
2177 return 0; 2176 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5fd2b639bf66..f97563db4e59 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,23 @@
35#include "atom.h" 35#include "atom.h"
36 36
37/* 37/*
38 * Clear GPU surface registers.
39 */
40static void radeon_surface_init(struct radeon_device *rdev)
41{
42 /* FIXME: check this out */
43 if (rdev->family < CHIP_R600) {
44 int i;
45
46 for (i = 0; i < 8; i++) {
47 WREG32(RADEON_SURFACE0_INFO +
48 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49 0);
50 }
51 }
52}
53
54/*
38 * GPU scratch registers helpers function. 55 * GPU scratch registers helpers function.
39 */ 56 */
40static void radeon_scratch_init(struct radeon_device *rdev) 57static void radeon_scratch_init(struct radeon_device *rdev)
@@ -470,6 +487,10 @@ int radeon_device_init(struct radeon_device *rdev,
470 if (r) { 487 if (r) {
471 return r; 488 return r;
472 } 489 }
490 r = radeon_init(rdev);
491 if (r) {
492 return r;
493 }
473 494
474 /* Report DMA addressing limitation */ 495 /* Report DMA addressing limitation */
475 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 496 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
@@ -492,6 +513,8 @@ int radeon_device_init(struct radeon_device *rdev,
492 radeon_errata(rdev); 513 radeon_errata(rdev);
493 /* Initialize scratch registers */ 514 /* Initialize scratch registers */
494 radeon_scratch_init(rdev); 515 radeon_scratch_init(rdev);
516 /* Initialize surface registers */
517 radeon_surface_init(rdev);
495 518
496 /* TODO: disable VGA need to use VGA request */ 519 /* TODO: disable VGA need to use VGA request */
497 /* BIOS*/ 520 /* BIOS*/
@@ -600,9 +623,6 @@ int radeon_device_init(struct radeon_device *rdev,
600 if (r) { 623 if (r) {
601 return r; 624 return r;
602 } 625 }
603 if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
604 rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
605 }
606 if (!ret) { 626 if (!ret) {
607 DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); 627 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
608 } 628 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5452bb9d925e..3efcf1a526be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -351,7 +351,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
351 radeon_i2c_do_lock(radeon_connector, 0); 351 radeon_i2c_do_lock(radeon_connector, 0);
352 if (edid) { 352 if (edid) {
353 /* update digital bits here */ 353 /* update digital bits here */
354 if (edid->digital) 354 if (edid->input & DRM_EDID_INPUT_DIGITAL)
355 radeon_connector->use_digital = 1; 355 radeon_connector->use_digital = 1;
356 else 356 else
357 radeon_connector->use_digital = 0; 357 radeon_connector->use_digital = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c815a2cbf7b3..84ba69f48784 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -313,7 +313,7 @@ static int __init radeon_init(void)
313{ 313{
314 driver = &driver_old; 314 driver = &driver_old;
315 driver->num_ioctls = radeon_max_ioctl; 315 driver->num_ioctls = radeon_max_ioctl;
316#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86) 316#if defined(CONFIG_DRM_RADEON_KMS)
317 /* if enabled by default */ 317 /* if enabled by default */
318 if (radeon_modeset == -1) { 318 if (radeon_modeset == -1) {
319 DRM_INFO("radeon default to kernel modesetting.\n"); 319 DRM_INFO("radeon default to kernel modesetting.\n");
@@ -345,7 +345,7 @@ static void __exit radeon_exit(void)
345 drm_exit(driver); 345 drm_exit(driver);
346} 346}
347 347
348late_initcall(radeon_init); 348module_init(radeon_init);
349module_exit(radeon_exit); 349module_exit(radeon_exit);
350 350
351MODULE_AUTHOR(DRIVER_AUTHOR); 351MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d398945e..9e8f191eb64a 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -478,14 +478,16 @@ int radeonfb_create(struct radeon_device *rdev,
478{ 478{
479 struct fb_info *info; 479 struct fb_info *info;
480 struct radeon_fb_device *rfbdev; 480 struct radeon_fb_device *rfbdev;
481 struct drm_framebuffer *fb; 481 struct drm_framebuffer *fb = NULL;
482 struct radeon_framebuffer *rfb; 482 struct radeon_framebuffer *rfb;
483 struct drm_mode_fb_cmd mode_cmd; 483 struct drm_mode_fb_cmd mode_cmd;
484 struct drm_gem_object *gobj = NULL; 484 struct drm_gem_object *gobj = NULL;
485 struct radeon_object *robj = NULL; 485 struct radeon_object *robj = NULL;
486 struct device *device = &rdev->pdev->dev; 486 struct device *device = &rdev->pdev->dev;
487 int size, aligned_size, ret; 487 int size, aligned_size, ret;
488 u64 fb_gpuaddr;
488 void *fbptr = NULL; 489 void *fbptr = NULL;
490 unsigned long tmp;
489 491
490 mode_cmd.width = surface_width; 492 mode_cmd.width = surface_width;
491 mode_cmd.height = surface_height; 493 mode_cmd.height = surface_height;
@@ -498,11 +500,12 @@ int radeonfb_create(struct radeon_device *rdev,
498 aligned_size = ALIGN(size, PAGE_SIZE); 500 aligned_size = ALIGN(size, PAGE_SIZE);
499 501
500 ret = radeon_gem_object_create(rdev, aligned_size, 0, 502 ret = radeon_gem_object_create(rdev, aligned_size, 0,
501 RADEON_GEM_DOMAIN_VRAM, 503 RADEON_GEM_DOMAIN_VRAM,
502 false, ttm_bo_type_kernel, 504 false, ttm_bo_type_kernel,
503 false, &gobj); 505 false, &gobj);
504 if (ret) { 506 if (ret) {
505 printk(KERN_ERR "failed to allocate framebuffer\n"); 507 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
508 surface_width, surface_height);
506 ret = -ENOMEM; 509 ret = -ENOMEM;
507 goto out; 510 goto out;
508 } 511 }
@@ -515,12 +518,19 @@ int radeonfb_create(struct radeon_device *rdev,
515 ret = -ENOMEM; 518 ret = -ENOMEM;
516 goto out_unref; 519 goto out_unref;
517 } 520 }
521 ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
522 if (ret) {
523 printk(KERN_ERR "failed to pin framebuffer\n");
524 ret = -ENOMEM;
525 goto out_unref;
526 }
518 527
519 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); 528 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
520 529
521 rfb = to_radeon_framebuffer(fb); 530 rfb = to_radeon_framebuffer(fb);
522 *rfb_p = rfb; 531 *rfb_p = rfb;
523 rdev->fbdev_rfb = rfb; 532 rdev->fbdev_rfb = rfb;
533 rdev->fbdev_robj = robj;
524 534
525 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 535 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
526 if (info == NULL) { 536 if (info == NULL) {
@@ -541,13 +551,13 @@ int radeonfb_create(struct radeon_device *rdev,
541 info->fix.xpanstep = 1; /* doing it in hw */ 551 info->fix.xpanstep = 1; /* doing it in hw */
542 info->fix.ypanstep = 1; /* doing it in hw */ 552 info->fix.ypanstep = 1; /* doing it in hw */
543 info->fix.ywrapstep = 0; 553 info->fix.ywrapstep = 0;
544 info->fix.accel = FB_ACCEL_I830; 554 info->fix.accel = FB_ACCEL_NONE;
545 info->fix.type_aux = 0; 555 info->fix.type_aux = 0;
546 info->flags = FBINFO_DEFAULT; 556 info->flags = FBINFO_DEFAULT;
547 info->fbops = &radeonfb_ops; 557 info->fbops = &radeonfb_ops;
548 info->fix.line_length = fb->pitch; 558 info->fix.line_length = fb->pitch;
549 info->screen_base = fbptr; 559 tmp = fb_gpuaddr - rdev->mc.vram_location;
550 info->fix.smem_start = (unsigned long)fbptr; 560 info->fix.smem_start = rdev->mc.aper_base + tmp;
551 info->fix.smem_len = size; 561 info->fix.smem_len = size;
552 info->screen_base = fbptr; 562 info->screen_base = fbptr;
553 info->screen_size = size; 563 info->screen_size = size;
@@ -562,8 +572,8 @@ int radeonfb_create(struct radeon_device *rdev,
562 info->var.width = -1; 572 info->var.width = -1;
563 info->var.xres = fb_width; 573 info->var.xres = fb_width;
564 info->var.yres = fb_height; 574 info->var.yres = fb_height;
565 info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); 575 info->fix.mmio_start = 0;
566 info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); 576 info->fix.mmio_len = 0;
567 info->pixmap.size = 64*1024; 577 info->pixmap.size = 64*1024;
568 info->pixmap.buf_align = 8; 578 info->pixmap.buf_align = 8;
569 info->pixmap.access_align = 32; 579 info->pixmap.access_align = 32;
@@ -644,7 +654,7 @@ out_unref:
644 if (robj) { 654 if (robj) {
645 radeon_object_kunmap(robj); 655 radeon_object_kunmap(robj);
646 } 656 }
647 if (ret) { 657 if (fb && ret) {
648 list_del(&fb->filp_head); 658 list_del(&fb->filp_head);
649 drm_gem_object_unreference(gobj); 659 drm_gem_object_unreference(gobj);
650 drm_framebuffer_cleanup(fb); 660 drm_framebuffer_cleanup(fb);
@@ -813,6 +823,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
813 robj = rfb->obj->driver_private; 823 robj = rfb->obj->driver_private;
814 unregister_framebuffer(info); 824 unregister_framebuffer(info);
815 radeon_object_kunmap(robj); 825 radeon_object_kunmap(robj);
826 radeon_object_unpin(robj);
816 framebuffer_release(info); 827 framebuffer_release(info);
817 } 828 }
818 829
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 71465ed2688a..dd438d32e5c0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -162,7 +162,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
162 struct radeon_i2c_chan *i2c; 162 struct radeon_i2c_chan *i2c;
163 int ret; 163 int ret;
164 164
165 i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 165 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
166 if (i2c == NULL) 166 if (i2c == NULL)
167 return NULL; 167 return NULL;
168 168
@@ -189,7 +189,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
189 189
190 return i2c; 190 return i2c;
191out_free: 191out_free:
192 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 192 kfree(i2c);
193 return NULL; 193 return NULL;
194 194
195} 195}
@@ -200,7 +200,7 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
200 return; 200 return;
201 201
202 i2c_del_adapter(&i2c->adapter); 202 i2c_del_adapter(&i2c->adapter);
203 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 203 kfree(i2c);
204} 204}
205 205
206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) 206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 64f42b19cbfa..4612a7c146d1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -169,7 +169,7 @@ int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
169 unsigned long sareapage; 169 unsigned long sareapage;
170 int ret; 170 int ret;
171 171
172 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 172 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
173 if (master_priv == NULL) { 173 if (master_priv == NULL) {
174 return -ENOMEM; 174 return -ENOMEM;
175 } 175 }
@@ -199,7 +199,7 @@ void radeon_master_destroy_kms(struct drm_device *dev,
199 if (master_priv->sarea) { 199 if (master_priv->sarea) {
200 drm_rmmap_locked(dev, master_priv->sarea); 200 drm_rmmap_locked(dev, master_priv->sarea);
201 } 201 }
202 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 202 kfree(master_priv);
203 master->driver_priv = NULL; 203 master->driver_priv = NULL;
204} 204}
205 205
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index 4af5286a36fb..ed95155c4b1d 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -43,8 +43,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
43{ 43{
44 /* Maybe cut off the start of an existing block */ 44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) { 45 if (start > p->start) {
46 struct mem_block *newblock = 46 struct mem_block *newblock = kmalloc(sizeof(*newblock),
47 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 47 GFP_KERNEL);
48 if (!newblock) 48 if (!newblock)
49 goto out; 49 goto out;
50 newblock->start = start; 50 newblock->start = start;
@@ -60,8 +60,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
60 60
61 /* Maybe cut off the end of an existing block */ 61 /* Maybe cut off the end of an existing block */
62 if (size < p->size) { 62 if (size < p->size) {
63 struct mem_block *newblock = 63 struct mem_block *newblock = kmalloc(sizeof(*newblock),
64 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 64 GFP_KERNEL);
65 if (!newblock) 65 if (!newblock)
66 goto out; 66 goto out;
67 newblock->start = start + size; 67 newblock->start = start + size;
@@ -118,7 +118,7 @@ static void free_block(struct mem_block *p)
118 p->size += q->size; 118 p->size += q->size;
119 p->next = q->next; 119 p->next = q->next;
120 p->next->prev = p; 120 p->next->prev = p;
121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 121 kfree(q);
122 } 122 }
123 123
124 if (p->prev->file_priv == NULL) { 124 if (p->prev->file_priv == NULL) {
@@ -126,7 +126,7 @@ static void free_block(struct mem_block *p)
126 q->size += p->size; 126 q->size += p->size;
127 q->next = p->next; 127 q->next = p->next;
128 q->next->prev = q; 128 q->next->prev = q;
129 drm_free(p, sizeof(*q), DRM_MEM_BUFS); 129 kfree(p);
130 } 130 }
131} 131}
132 132
@@ -134,14 +134,14 @@ static void free_block(struct mem_block *p)
134 */ 134 */
135static int init_heap(struct mem_block **heap, int start, int size) 135static int init_heap(struct mem_block **heap, int start, int size)
136{ 136{
137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 137 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
138 138
139 if (!blocks) 139 if (!blocks)
140 return -ENOMEM; 140 return -ENOMEM;
141 141
142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 142 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
143 if (!*heap) { 143 if (!*heap) {
144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 144 kfree(blocks);
145 return -ENOMEM; 145 return -ENOMEM;
146 } 146 }
147 147
@@ -179,7 +179,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
179 p->size += q->size; 179 p->size += q->size;
180 p->next = q->next; 180 p->next = q->next;
181 p->next->prev = p; 181 p->next->prev = p;
182 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 182 kfree(q);
183 } 183 }
184 } 184 }
185} 185}
@@ -196,10 +196,10 @@ void radeon_mem_takedown(struct mem_block **heap)
196 for (p = (*heap)->next; p != *heap;) { 196 for (p = (*heap)->next; p != *heap;) {
197 struct mem_block *q = p; 197 struct mem_block *q = p;
198 p = p->next; 198 p = p->next;
199 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 199 kfree(q);
200 } 200 }
201 201
202 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); 202 kfree(*heap);
203 *heap = NULL; 203 *heap = NULL;
204} 204}
205 205
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df5e000..bac0d06c52ac 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
223{ 223{
224 uint32_t flags; 224 uint32_t flags;
225 uint32_t tmp; 225 uint32_t tmp;
226 void *fbptr;
227 int r; 226 int r;
228 227
229 flags = radeon_object_flags_from_domain(domain); 228 flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); 241 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
243 return r; 242 return r;
244 } 243 }
245 if (robj->rdev->fbdev_robj == robj) {
246 mutex_lock(&robj->rdev->fbdev_info->lock);
247 radeon_object_kunmap(robj);
248 }
249 tmp = robj->tobj.mem.placement; 244 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 245 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 246 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
261 DRM_ERROR("radeon: failed to pin object.\n"); 256 DRM_ERROR("radeon: failed to pin object.\n");
262 } 257 }
263 radeon_object_unreserve(robj); 258 radeon_object_unreserve(robj);
264 if (robj->rdev->fbdev_robj == robj) {
265 if (!r) {
266 r = radeon_object_kmap(robj, &fbptr);
267 }
268 if (!r) {
269 robj->rdev->fbdev_info->screen_base = fbptr;
270 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
271 }
272 mutex_unlock(&robj->rdev->fbdev_info->lock);
273 }
274 return r; 259 return r;
275} 260}
276 261
277void radeon_object_unpin(struct radeon_object *robj) 262void radeon_object_unpin(struct radeon_object *robj)
278{ 263{
279 uint32_t flags; 264 uint32_t flags;
280 void *fbptr;
281 int r; 265 int r;
282 266
283 spin_lock(&robj->tobj.lock); 267 spin_lock(&robj->tobj.lock);
@@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj)
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); 281 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
298 return; 282 return;
299 } 283 }
300 if (robj->rdev->fbdev_robj == robj) {
301 mutex_lock(&robj->rdev->fbdev_info->lock);
302 radeon_object_kunmap(robj);
303 }
304 flags = robj->tobj.mem.placement; 284 flags = robj->tobj.mem.placement;
305 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; 285 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
306 r = ttm_buffer_object_validate(&robj->tobj, 286 r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj)
310 DRM_ERROR("radeon: failed to unpin buffer.\n"); 290 DRM_ERROR("radeon: failed to unpin buffer.\n");
311 } 291 }
312 radeon_object_unreserve(robj); 292 radeon_object_unreserve(robj);
313 if (robj->rdev->fbdev_robj == robj) {
314 if (!r) {
315 r = radeon_object_kmap(robj, &fbptr);
316 }
317 if (!r) {
318 robj->rdev->fbdev_info->screen_base = fbptr;
319 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
320 }
321 mutex_unlock(&robj->rdev->fbdev_info->lock);
322 }
323} 293}
324 294
325int radeon_object_wait(struct radeon_object *robj) 295int radeon_object_wait(struct radeon_object *robj)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d3d90406a24..e1b618574461 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3184,6 +3184,7 @@
3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0) 3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3185# define RADEON_RB_BLKSZ_SHIFT 8 3185# define RADEON_RB_BLKSZ_SHIFT 8
3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8) 3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3187# define RADEON_BUF_SWAP_32BIT (1 << 17)
3187# define RADEON_MAX_FETCH_SHIFT 18 3188# define RADEON_MAX_FETCH_SHIFT 18
3188# define RADEON_MAX_FETCH_MASK (0x3 << 18) 3189# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3189# define RADEON_RB_NO_UPDATE (1 << 27) 3190# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index fa728ec6ed34..46645f3e0328 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -2866,12 +2866,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2866 */ 2866 */
2867 orig_bufsz = cmdbuf->bufsz; 2867 orig_bufsz = cmdbuf->bufsz;
2868 if (orig_bufsz != 0) { 2868 if (orig_bufsz != 0) {
2869 kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); 2869 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
2870 if (kbuf == NULL) 2870 if (kbuf == NULL)
2871 return -ENOMEM; 2871 return -ENOMEM;
2872 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2872 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
2873 cmdbuf->bufsz)) { 2873 cmdbuf->bufsz)) {
2874 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2874 kfree(kbuf);
2875 return -EFAULT; 2875 return -EFAULT;
2876 } 2876 }
2877 cmdbuf->buf = kbuf; 2877 cmdbuf->buf = kbuf;
@@ -2884,7 +2884,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2884 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2884 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2885 2885
2886 if (orig_bufsz != 0) 2886 if (orig_bufsz != 0)
2887 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2887 kfree(kbuf);
2888 2888
2889 return temp; 2889 return temp;
2890 } 2890 }
@@ -2991,7 +2991,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2991 } 2991 }
2992 2992
2993 if (orig_bufsz != 0) 2993 if (orig_bufsz != 0)
2994 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2994 kfree(kbuf);
2995 2995
2996 DRM_DEBUG("DONE\n"); 2996 DRM_DEBUG("DONE\n");
2997 COMMIT_RING(); 2997 COMMIT_RING();
@@ -2999,7 +2999,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2999 2999
3000 err: 3000 err:
3001 if (orig_bufsz != 0) 3001 if (orig_bufsz != 0)
3002 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 3002 kfree(kbuf);
3003 return -EINVAL; 3003 return -EINVAL;
3004} 3004}
3005 3005
@@ -3175,9 +3175,7 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3175 struct drm_radeon_driver_file_fields *radeon_priv; 3175 struct drm_radeon_driver_file_fields *radeon_priv;
3176 3176
3177 DRM_DEBUG("\n"); 3177 DRM_DEBUG("\n");
3178 radeon_priv = 3178 radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL);
3179 (struct drm_radeon_driver_file_fields *)
3180 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
3181 3179
3182 if (!radeon_priv) 3180 if (!radeon_priv)
3183 return -ENOMEM; 3181 return -ENOMEM;
@@ -3196,7 +3194,7 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3196 struct drm_radeon_driver_file_fields *radeon_priv = 3194 struct drm_radeon_driver_file_fields *radeon_priv =
3197 file_priv->driver_priv; 3195 file_priv->driver_priv;
3198 3196
3199 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3197 kfree(radeon_priv);
3200} 3198}
3201 3199
3202struct drm_ioctl_desc radeon_ioctls[] = { 3200struct drm_ioctl_desc radeon_ioctls[] = {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 4c087c1510d7..1227a97f5169 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -133,6 +133,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
133 man->gpu_offset = 0; 133 man->gpu_offset = 0;
134 man->available_caching = TTM_PL_MASK_CACHING; 134 man->available_caching = TTM_PL_MASK_CACHING;
135 man->default_caching = TTM_PL_FLAG_CACHED; 135 man->default_caching = TTM_PL_FLAG_CACHED;
136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
136#if __OS_HAS_AGP 137#if __OS_HAS_AGP
137 if (rdev->flags & RADEON_IS_AGP) { 138 if (rdev->flags & RADEON_IS_AGP) {
138 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { 139 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
@@ -143,8 +144,9 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
143 man->io_offset = rdev->mc.agp_base; 144 man->io_offset = rdev->mc.agp_base;
144 man->io_size = rdev->mc.gtt_size; 145 man->io_size = rdev->mc.gtt_size;
145 man->io_addr = NULL; 146 man->io_addr = NULL;
146 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 147 if (!rdev->ddev->agp->cant_use_aperture)
147 TTM_MEMTYPE_FLAG_MAPPABLE; 148 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
149 TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_FLAG_UNCACHED | 150 man->available_caching = TTM_PL_FLAG_UNCACHED |
149 TTM_PL_FLAG_WC; 151 TTM_PL_FLAG_WC;
150 man->default_caching = TTM_PL_FLAG_WC; 152 man->default_caching = TTM_PL_FLAG_WC;
@@ -154,8 +156,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
154 man->io_offset = 0; 156 man->io_offset = 0;
155 man->io_size = 0; 157 man->io_size = 0;
156 man->io_addr = NULL; 158 man->io_addr = NULL;
157 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
158 TTM_MEMTYPE_FLAG_CMA;
159 } 159 }
160 break; 160 break;
161 case TTM_PL_VRAM: 161 case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7eab95db58ac..ffea37b1b3e2 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -225,6 +225,8 @@ void rv515_ring_start(struct radeon_device *rdev)
225 radeon_ring_write(rdev, 225 radeon_ring_write(rdev,
226 R300_GEOMETRY_ROUND_NEAREST | 226 R300_GEOMETRY_ROUND_NEAREST |
227 R300_COLOR_ROUND_NEAREST); 227 R300_COLOR_ROUND_NEAREST);
228 radeon_ring_write(rdev, PACKET0(0x20C8, 0));
229 radeon_ring_write(rdev, 0);
228 radeon_ring_unlock_commit(rdev); 230 radeon_ring_unlock_commit(rdev);
229} 231}
230 232
@@ -502,3 +504,59 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
502 return 0; 504 return 0;
503#endif 505#endif
504} 506}
507
508
509/*
510 * Asic initialization
511 */
512static const unsigned r500_reg_safe_bm[159] = {
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
515 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
523 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
524 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
525 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
526 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
527 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
528 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
529 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
530 0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
531 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
532 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545 0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
546 0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
547 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
548 0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
549 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
550 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x0003FC01, 0x3FFFFCF8, 0xFE800B19,
553};
554
555
556
557int rv515_init(struct radeon_device *rdev)
558{
559 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
560 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
561 return 0;
562}
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 456cd040f31a..bff6fc2524c8 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -298,8 +298,8 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
298 298
299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / 299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
300 (SAVAGE_DMA_PAGE_SIZE * 4); 300 (SAVAGE_DMA_PAGE_SIZE * 4);
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 301 dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 302 dev_priv->nr_dma_pages, GFP_KERNEL);
303 if (dev_priv->dma_pages == NULL) 303 if (dev_priv->dma_pages == NULL)
304 return -ENOMEM; 304 return -ENOMEM;
305 305
@@ -539,7 +539,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539{ 539{
540 drm_savage_private_t *dev_priv; 540 drm_savage_private_t *dev_priv;
541 541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 542 dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
543 if (dev_priv == NULL) 543 if (dev_priv == NULL)
544 return -ENOMEM; 544 return -ENOMEM;
545 545
@@ -671,7 +671,7 @@ int savage_driver_unload(struct drm_device *dev)
671{ 671{
672 drm_savage_private_t *dev_priv = dev->dev_private; 672 drm_savage_private_t *dev_priv = dev->dev_private;
673 673
674 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 674 kfree(dev_priv);
675 675
676 return 0; 676 return 0;
677} 677}
@@ -804,8 +804,8 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
804 dev_priv->fake_dma.offset = 0; 804 dev_priv->fake_dma.offset = 0;
805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; 805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
806 dev_priv->fake_dma.type = _DRM_SHM; 806 dev_priv->fake_dma.type = _DRM_SHM;
807 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, 807 dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
808 DRM_MEM_DRIVER); 808 GFP_KERNEL);
809 if (!dev_priv->fake_dma.handle) { 809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev); 811 savage_do_cleanup_bci(dev);
@@ -903,9 +903,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
903 drm_savage_private_t *dev_priv = dev->dev_private; 903 drm_savage_private_t *dev_priv = dev->dev_private;
904 904
905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) { 905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
906 if (dev_priv->fake_dma.handle) 906 kfree(dev_priv->fake_dma.handle);
907 drm_free(dev_priv->fake_dma.handle,
908 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
909 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 907 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
910 dev_priv->cmd_dma->type == _DRM_AGP && 908 dev_priv->cmd_dma->type == _DRM_AGP &&
911 dev_priv->dma_type == SAVAGE_DMA_AGP) 909 dev_priv->dma_type == SAVAGE_DMA_AGP)
@@ -920,10 +918,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
920 dev->agp_buffer_map = NULL; 918 dev->agp_buffer_map = NULL;
921 } 919 }
922 920
923 if (dev_priv->dma_pages) 921 kfree(dev_priv->dma_pages);
924 drm_free(dev_priv->dma_pages,
925 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
926 DRM_MEM_DRIVER);
927 922
928 return 0; 923 return 0;
929} 924}
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 5f6238fdf1fa..8a3e31599c94 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -988,20 +988,20 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
988 * for locking on FreeBSD. 988 * for locking on FreeBSD.
989 */ 989 */
990 if (cmdbuf->size) { 990 if (cmdbuf->size) {
991 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); 991 kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
992 if (kcmd_addr == NULL) 992 if (kcmd_addr == NULL)
993 return -ENOMEM; 993 return -ENOMEM;
994 994
995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, 995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
996 cmdbuf->size * 8)) 996 cmdbuf->size * 8))
997 { 997 {
998 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 998 kfree(kcmd_addr);
999 return -EFAULT; 999 return -EFAULT;
1000 } 1000 }
1001 cmdbuf->cmd_addr = kcmd_addr; 1001 cmdbuf->cmd_addr = kcmd_addr;
1002 } 1002 }
1003 if (cmdbuf->vb_size) { 1003 if (cmdbuf->vb_size) {
1004 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); 1004 kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
1005 if (kvb_addr == NULL) { 1005 if (kvb_addr == NULL) {
1006 ret = -ENOMEM; 1006 ret = -ENOMEM;
1007 goto done; 1007 goto done;
@@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
1015 cmdbuf->vb_addr = kvb_addr; 1015 cmdbuf->vb_addr = kvb_addr;
1016 } 1016 }
1017 if (cmdbuf->nbox) { 1017 if (cmdbuf->nbox) {
1018 kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1018 kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
1019 DRM_MEM_DRIVER); 1019 GFP_KERNEL);
1020 if (kbox_addr == NULL) { 1020 if (kbox_addr == NULL) {
1021 ret = -ENOMEM; 1021 ret = -ENOMEM;
1022 goto done; 1022 goto done;
@@ -1154,10 +1154,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
1154 1154
1155done: 1155done:
1156 /* If we didn't need to allocate them, these'll be NULL */ 1156 /* If we didn't need to allocate them, these'll be NULL */
1157 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 1157 kfree(kcmd_addr);
1158 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); 1158 kfree(kvb_addr);
1159 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), 1159 kfree(kbox_addr);
1160 DRM_MEM_DRIVER);
1161 1160
1162 return ret; 1161 return ret;
1163} 1162}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7dacc64e9b56..e725cc0b1155 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -40,7 +40,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
40 drm_sis_private_t *dev_priv; 40 drm_sis_private_t *dev_priv;
41 int ret; 41 int ret;
42 42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 43 dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
44 if (dev_priv == NULL) 44 if (dev_priv == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 46
@@ -48,7 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
48 dev_priv->chipset = chipset; 48 dev_priv->chipset = chipset;
49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
50 if (ret) { 50 if (ret) {
51 drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); 51 kfree(dev_priv);
52 } 52 }
53 53
54 return ret; 54 return ret;
@@ -59,7 +59,7 @@ static int sis_driver_unload(struct drm_device *dev)
59 drm_sis_private_t *dev_priv = dev->dev_private; 59 drm_sis_private_t *dev_priv = dev->dev_private;
60 60
61 drm_sman_takedown(&dev_priv->sman); 61 drm_sman_takedown(&dev_priv->sman);
62 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 62 kfree(dev_priv);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index e8f6d2229d8c..4648ed2f0143 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -63,8 +63,7 @@ static int ttm_agp_populate(struct ttm_backend *backend,
63 if (!page) 63 if (!page)
64 page = dummy_read_page; 64 page = dummy_read_page;
65 65
66 mem->memory[mem->page_count++] = 66 mem->pages[mem->page_count++] = page;
67 phys_to_gart(page_to_phys(page));
68 } 67 }
69 agp_be->mem = mem; 68 agp_be->mem = mem;
70 return 0; 69 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1587aeca7bea..c1c407f7cca3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -282,7 +282,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
282 282
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret) 284 if (ret)
285 return ret; 285 goto out_err;
286 286
287 if (mem->mem_type != TTM_PL_SYSTEM) { 287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem); 288 ret = ttm_tt_bind(bo->ttm, mem);
@@ -527,9 +527,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock); 528 spin_unlock(&bo->lock);
529 529
530 if (ret && ret != -ERESTART) { 530 if (unlikely(ret != 0)) {
531 printk(KERN_ERR TTM_PFX "Failed to expire sync object before " 531 if (ret != -ERESTART) {
532 "buffer eviction.\n"); 532 printk(KERN_ERR TTM_PFX
533 "Failed to expire sync object before "
534 "buffer eviction.\n");
535 }
533 goto out; 536 goto out;
534 } 537 }
535 538
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c84559633..bdec583901eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/version.h>
38#include <linux/module.h> 37#include <linux/module.h>
39 38
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 39void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c54fbc..40b75032ea47 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
32#include <ttm/ttm_bo_driver.h> 32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h> 33#include <ttm/ttm_placement.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/version.h>
36#include <linux/rbtree.h> 35#include <linux/rbtree.h>
37#include <linux/module.h> 36#include <linux/module.h>
38#include <linux/uaccess.h> 37#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index c27ab3a877ad..75dc8bd24592 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30 30
31#include <linux/version.h>
32#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
33#include <linux/sched.h> 32#include <linux/sched.h>
34#include <linux/highmem.h> 33#include <linux/highmem.h>
@@ -68,7 +67,7 @@ static void ttm_tt_cache_flush_clflush(struct page *pages[],
68 ttm_tt_clflush_page(*pages++); 67 ttm_tt_clflush_page(*pages++);
69 mb(); 68 mb();
70} 69}
71#else 70#elif !defined(__powerpc__)
72static void ttm_tt_ipi_handler(void *null) 71static void ttm_tt_ipi_handler(void *null)
73{ 72{
74 ; 73 ;
@@ -83,6 +82,15 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
83 ttm_tt_cache_flush_clflush(pages, num_pages); 82 ttm_tt_cache_flush_clflush(pages, num_pages);
84 return; 83 return;
85 } 84 }
85#elif defined(__powerpc__)
86 unsigned long i;
87
88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) {
90 unsigned long start = (unsigned long)page_address(pages[i]);
91 flush_dcache_range(start, start + PAGE_SIZE);
92 }
93 }
86#else 94#else
87 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88 printk(KERN_ERR TTM_PFX 96 printk(KERN_ERR TTM_PFX
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 2c4f0b485792..6e6f91591639 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -96,7 +96,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
96 drm_via_private_t *dev_priv; 96 drm_via_private_t *dev_priv;
97 int ret = 0; 97 int ret = 0;
98 98
99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 99 dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
100 if (dev_priv == NULL) 100 if (dev_priv == NULL)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
@@ -106,14 +106,14 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
106 106
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) { 108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 109 kfree(dev_priv);
110 return ret; 110 return ret;
111 } 111 }
112 112
113 ret = drm_vblank_init(dev, 1); 113 ret = drm_vblank_init(dev, 1);
114 if (ret) { 114 if (ret) {
115 drm_sman_takedown(&dev_priv->sman); 115 drm_sman_takedown(&dev_priv->sman);
116 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 116 kfree(dev_priv);
117 return ret; 117 return ret;
118 } 118 }
119 119
@@ -126,7 +126,7 @@ int via_driver_unload(struct drm_device *dev)
126 126
127 drm_sman_takedown(&dev_priv->sman); 127 drm_sman_takedown(&dev_priv->sman);
128 128
129 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 129 kfree(dev_priv);
130 130
131 return 0; 131 return 0;
132} 132}