aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c235
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_info.c328
-rw-r--r--drivers/gpu/drm/drm_proc.c721
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c257
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c334
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
20 files changed, 2335 insertions, 1300 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4a5c12..4ec5061fa584 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o 13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o
14 15
15drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
16 17
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 000000000000..c77c6c6d9d2c
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
1/**
2 * \file drm_debugfs.c
3 * debugfs support for DRM
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include "drmP.h"
36
37#if defined(CONFIG_DEBUG_FS)
38
39/***************************************************
40 * Initialization, etc.
41 **************************************************/
42
43static struct drm_info_list drm_debugfs_list[] = {
44 {"name", drm_name_info, 0},
45 {"vm", drm_vm_info, 0},
46 {"clients", drm_clients_info, 0},
47 {"queues", drm_queues_info, 0},
48 {"bufs", drm_bufs_info, 0},
49 {"gem_names", drm_gem_name_info, DRIVER_GEM},
50 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
51#if DRM_DEBUG_CODE
52 {"vma", drm_vma_info, 0},
53#endif
54};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56
57
58static int drm_debugfs_open(struct inode *inode, struct file *file)
59{
60 struct drm_info_node *node = inode->i_private;
61
62 return single_open(file, node->info_ent->show, node);
63}
64
65
66static const struct file_operations drm_debugfs_fops = {
67 .owner = THIS_MODULE,
68 .open = drm_debugfs_open,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73
74
75/**
76 * Initialize a given set of debugfs files for a device
77 *
78 * \param files The array of files to create
79 * \param count The number of files given
80 * \param root DRI debugfs dir entry.
81 * \param minor device minor number
82 * \return Zero on success, non-zero on failure
83 *
84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory.
86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor)
89{
90 struct drm_device *dev = minor->dev;
91 struct dentry *ent;
92 struct drm_info_node *tmp;
93 char name[64];
94 int i, ret;
95
96 for (i = 0; i < count; i++) {
97 u32 features = files[i].driver_features;
98
99 if (features != 0 &&
100 (dev->driver->driver_features & features) != features)
101 continue;
102
103 tmp = drm_alloc(sizeof(struct drm_info_node),
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops);
107 if (!ent) {
108 DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
109 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node),
111 _DRM_DRIVER);
112 ret = -1;
113 goto fail;
114 }
115
116 tmp->minor = minor;
117 tmp->dent = ent;
118 tmp->info_ent = &files[i];
119 list_add(&(tmp->list), &(minor->debugfs_nodes.list));
120 }
121 return 0;
122
123fail:
124 drm_debugfs_remove_files(files, count, minor);
125 return ret;
126}
127EXPORT_SYMBOL(drm_debugfs_create_files);
128
129/**
130 * Initialize the DRI debugfs filesystem for a device
131 *
132 * \param dev DRM device
133 * \param minor device minor number
134 * \param root DRI debugfs dir entry.
135 *
136 * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
137 * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
138 * "/debugfs/dri/%minor%/%name%".
139 */
140int drm_debugfs_init(struct drm_minor *minor, int minor_id,
141 struct dentry *root)
142{
143 struct drm_device *dev = minor->dev;
144 char name[64];
145 int ret;
146
147 INIT_LIST_HEAD(&minor->debugfs_nodes.list);
148 sprintf(name, "%d", minor_id);
149 minor->debugfs_root = debugfs_create_dir(name, root);
150 if (!minor->debugfs_root) {
151 DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
152 return -1;
153 }
154
155 ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
156 minor->debugfs_root, minor);
157 if (ret) {
158 debugfs_remove(minor->debugfs_root);
159 minor->debugfs_root = NULL;
160 DRM_ERROR("Failed to create core drm debugfs files\n");
161 return ret;
162 }
163
164 if (dev->driver->debugfs_init) {
165 ret = dev->driver->debugfs_init(minor);
166 if (ret) {
167 DRM_ERROR("DRM: Driver failed to initialize "
168 "/debugfs/dri.\n");
169 return ret;
170 }
171 }
172 return 0;
173}
174
175
176/**
177 * Remove a list of debugfs files
178 *
179 * \param files The list of files
180 * \param count The number of files
181 * \param minor The minor of which we should remove the files
182 * \return always zero.
183 *
184 * Remove all debugfs entries created by debugfs_init().
185 */
186int drm_debugfs_remove_files(struct drm_info_list *files, int count,
187 struct drm_minor *minor)
188{
189 struct list_head *pos, *q;
190 struct drm_info_node *tmp;
191 int i;
192
193 for (i = 0; i < count; i++) {
194 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
195 tmp = list_entry(pos, struct drm_info_node, list);
196 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent);
198 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node),
200 _DRM_DRIVER);
201 }
202 }
203 }
204 return 0;
205}
206EXPORT_SYMBOL(drm_debugfs_remove_files);
207
208/**
209 * Cleanup the debugfs filesystem resources.
210 *
211 * \param minor device minor number.
212 * \return always zero.
213 *
214 * Remove all debugfs entries created by debugfs_init().
215 */
216int drm_debugfs_cleanup(struct drm_minor *minor)
217{
218 struct drm_device *dev = minor->dev;
219
220 if (!minor->debugfs_root)
221 return 0;
222
223 if (dev->driver->debugfs_cleanup)
224 dev->driver->debugfs_cleanup(minor);
225
226 drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
227
228 debugfs_remove(minor->debugfs_root);
229 minor->debugfs_root = NULL;
230
231 return 0;
232}
233
234#endif /* CONFIG_DEBUG_FS */
235
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23dc157..ed32edb17166 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
46 * OTHER DEALINGS IN THE SOFTWARE. 46 * OTHER DEALINGS IN THE SOFTWARE.
47 */ 47 */
48 48
49#include <linux/debugfs.h>
49#include "drmP.h" 50#include "drmP.h"
50#include "drm_core.h" 51#include "drm_core.h"
51 52
53
52static int drm_version(struct drm_device *dev, void *data, 54static int drm_version(struct drm_device *dev, void *data,
53 struct drm_file *file_priv); 55 struct drm_file *file_priv);
54 56
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
178 180
179 /* Clear AGP information */ 181 /* Clear AGP information */
180 if (drm_core_has_AGP(dev) && dev->agp && 182 if (drm_core_has_AGP(dev) && dev->agp &&
181 !drm_core_check_feature(dev, DRIVER_MODESET)) { 183 !drm_core_check_feature(dev, DRIVER_MODESET)) {
182 struct drm_agp_mem *entry, *tempe; 184 struct drm_agp_mem *entry, *tempe;
183 185
184 /* Remove AGP resources, but leave dev->agp 186 /* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
382 goto err_p3; 384 goto err_p3;
383 } 385 }
384 386
387 drm_debugfs_root = debugfs_create_dir("dri", NULL);
388 if (!drm_debugfs_root) {
389 DRM_ERROR("Cannot create /debugfs/dri\n");
390 ret = -1;
391 goto err_p3;
392 }
393
385 drm_mem_init(); 394 drm_mem_init();
386 395
387 DRM_INFO("Initialized %s %d.%d.%d %s\n", 396 DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
400static void __exit drm_core_exit(void) 409static void __exit drm_core_exit(void)
401{ 410{
402 remove_proc_entry("dri", NULL); 411 remove_proc_entry("dri", NULL);
412 debugfs_remove(drm_debugfs_root);
403 drm_sysfs_destroy(); 413 drm_sysfs_destroy();
404 414
405 unregister_chrdev(DRM_MAJOR, "drm"); 415 unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 000000000000..fc98952b9033
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
1/**
2 * \file drm_info.c
3 * DRM info file implementations
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/seq_file.h>
37#include "drmP.h"
38
39/**
40 * Called when "/proc/dri/.../name" is read.
41 *
42 * Prints the device name together with the bus id if available.
43 */
44int drm_name_info(struct seq_file *m, void *data)
45{
46 struct drm_info_node *node = (struct drm_info_node *) m->private;
47 struct drm_minor *minor = node->minor;
48 struct drm_device *dev = minor->dev;
49 struct drm_master *master = minor->master;
50
51 if (!master)
52 return 0;
53
54 if (master->unique) {
55 seq_printf(m, "%s %s %s\n",
56 dev->driver->pci_driver.name,
57 pci_name(dev->pdev), master->unique);
58 } else {
59 seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
60 pci_name(dev->pdev));
61 }
62
63 return 0;
64}
65
66/**
67 * Called when "/proc/dri/.../vm" is read.
68 *
69 * Prints information about all mappings in drm_device::maplist.
70 */
71int drm_vm_info(struct seq_file *m, void *data)
72{
73 struct drm_info_node *node = (struct drm_info_node *) m->private;
74 struct drm_device *dev = node->minor->dev;
75 struct drm_map *map;
76 struct drm_map_list *r_list;
77
78 /* Hardcoded from _DRM_FRAME_BUFFER,
79 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
80 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
81 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
82 const char *type;
83 int i;
84
85 mutex_lock(&dev->struct_mutex);
86 seq_printf(m, "slot offset size type flags address mtrr\n\n");
87 i = 0;
88 list_for_each_entry(r_list, &dev->maplist, head) {
89 map = r_list->map;
90 if (!map)
91 continue;
92 if (map->type < 0 || map->type > 5)
93 type = "??";
94 else
95 type = types[map->type];
96
97 seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
98 i,
99 map->offset,
100 map->size, type, map->flags,
101 (unsigned long) r_list->user_token);
102 if (map->mtrr < 0)
103 seq_printf(m, "none\n");
104 else
105 seq_printf(m, "%4d\n", map->mtrr);
106 i++;
107 }
108 mutex_unlock(&dev->struct_mutex);
109 return 0;
110}
111
112/**
113 * Called when "/proc/dri/.../queues" is read.
114 */
115int drm_queues_info(struct seq_file *m, void *data)
116{
117 struct drm_info_node *node = (struct drm_info_node *) m->private;
118 struct drm_device *dev = node->minor->dev;
119 int i;
120 struct drm_queue *q;
121
122 mutex_lock(&dev->struct_mutex);
123 seq_printf(m, " ctx/flags use fin"
124 " blk/rw/rwf wait flushed queued"
125 " locks\n\n");
126 for (i = 0; i < dev->queue_count; i++) {
127 q = dev->queuelist[i];
128 atomic_inc(&q->use_count);
129 seq_printf(m, "%5d/0x%03x %5d %5d"
130 " %5d/%c%c/%c%c%c %5Zd\n",
131 i,
132 q->flags,
133 atomic_read(&q->use_count),
134 atomic_read(&q->finalization),
135 atomic_read(&q->block_count),
136 atomic_read(&q->block_read) ? 'r' : '-',
137 atomic_read(&q->block_write) ? 'w' : '-',
138 waitqueue_active(&q->read_queue) ? 'r' : '-',
139 waitqueue_active(&q->write_queue) ? 'w' : '-',
140 waitqueue_active(&q->flush_queue) ? 'f' : '-',
141 DRM_BUFCOUNT(&q->waitlist));
142 atomic_dec(&q->use_count);
143 }
144 mutex_unlock(&dev->struct_mutex);
145 return 0;
146}
147
148/**
149 * Called when "/proc/dri/.../bufs" is read.
150 */
151int drm_bufs_info(struct seq_file *m, void *data)
152{
153 struct drm_info_node *node = (struct drm_info_node *) m->private;
154 struct drm_device *dev = node->minor->dev;
155 struct drm_device_dma *dma;
156 int i, seg_pages;
157
158 mutex_lock(&dev->struct_mutex);
159 dma = dev->dma;
160 if (!dma) {
161 mutex_unlock(&dev->struct_mutex);
162 return 0;
163 }
164
165 seq_printf(m, " o size count free segs pages kB\n\n");
166 for (i = 0; i <= DRM_MAX_ORDER; i++) {
167 if (dma->bufs[i].buf_count) {
168 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
169 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
170 i,
171 dma->bufs[i].buf_size,
172 dma->bufs[i].buf_count,
173 atomic_read(&dma->bufs[i].freelist.count),
174 dma->bufs[i].seg_count,
175 seg_pages,
176 seg_pages * PAGE_SIZE / 1024);
177 }
178 }
179 seq_printf(m, "\n");
180 for (i = 0; i < dma->buf_count; i++) {
181 if (i && !(i % 32))
182 seq_printf(m, "\n");
183 seq_printf(m, " %d", dma->buflist[i]->list);
184 }
185 seq_printf(m, "\n");
186 mutex_unlock(&dev->struct_mutex);
187 return 0;
188}
189
190/**
191 * Called when "/proc/dri/.../vblank" is read.
192 */
193int drm_vblank_info(struct seq_file *m, void *data)
194{
195 struct drm_info_node *node = (struct drm_info_node *) m->private;
196 struct drm_device *dev = node->minor->dev;
197 int crtc;
198
199 mutex_lock(&dev->struct_mutex);
200 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
201 seq_printf(m, "CRTC %d enable: %d\n",
202 crtc, atomic_read(&dev->vblank_refcount[crtc]));
203 seq_printf(m, "CRTC %d counter: %d\n",
204 crtc, drm_vblank_count(dev, crtc));
205 seq_printf(m, "CRTC %d last wait: %d\n",
206 crtc, dev->last_vblank_wait[crtc]);
207 seq_printf(m, "CRTC %d in modeset: %d\n",
208 crtc, dev->vblank_inmodeset[crtc]);
209 }
210 mutex_unlock(&dev->struct_mutex);
211 return 0;
212}
213
214/**
215 * Called when "/proc/dri/.../clients" is read.
216 *
217 */
218int drm_clients_info(struct seq_file *m, void *data)
219{
220 struct drm_info_node *node = (struct drm_info_node *) m->private;
221 struct drm_device *dev = node->minor->dev;
222 struct drm_file *priv;
223
224 mutex_lock(&dev->struct_mutex);
225 seq_printf(m, "a dev pid uid magic ioctls\n\n");
226 list_for_each_entry(priv, &dev->filelist, lhead) {
227 seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
228 priv->authenticated ? 'y' : 'n',
229 priv->minor->index,
230 priv->pid,
231 priv->uid, priv->magic, priv->ioctl_count);
232 }
233 mutex_unlock(&dev->struct_mutex);
234 return 0;
235}
236
237
238int drm_gem_one_name_info(int id, void *ptr, void *data)
239{
240 struct drm_gem_object *obj = ptr;
241 struct seq_file *m = data;
242
243 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
244
245 seq_printf(m, "%6d %8zd %7d %8d\n",
246 obj->name, obj->size,
247 atomic_read(&obj->handlecount.refcount),
248 atomic_read(&obj->refcount.refcount));
249 return 0;
250}
251
252int drm_gem_name_info(struct seq_file *m, void *data)
253{
254 struct drm_info_node *node = (struct drm_info_node *) m->private;
255 struct drm_device *dev = node->minor->dev;
256
257 seq_printf(m, " name size handles refcount\n");
258 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
259 return 0;
260}
261
262int drm_gem_object_info(struct seq_file *m, void* data)
263{
264 struct drm_info_node *node = (struct drm_info_node *) m->private;
265 struct drm_device *dev = node->minor->dev;
266
267 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
268 seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
269 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
270 seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
271 seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
272 seq_printf(m, "%d gtt total\n", dev->gtt_total);
273 return 0;
274}
275
276#if DRM_DEBUG_CODE
277
278int drm_vma_info(struct seq_file *m, void *data)
279{
280 struct drm_info_node *node = (struct drm_info_node *) m->private;
281 struct drm_device *dev = node->minor->dev;
282 struct drm_vma_entry *pt;
283 struct vm_area_struct *vma;
284#if defined(__i386__)
285 unsigned int pgprot;
286#endif
287
288 mutex_lock(&dev->struct_mutex);
289 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
290 atomic_read(&dev->vma_count),
291 high_memory, virt_to_phys(high_memory));
292
293 list_for_each_entry(pt, &dev->vmalist, head) {
294 vma = pt->vma;
295 if (!vma)
296 continue;
297 seq_printf(m,
298 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
299 pt->pid, vma->vm_start, vma->vm_end,
300 vma->vm_flags & VM_READ ? 'r' : '-',
301 vma->vm_flags & VM_WRITE ? 'w' : '-',
302 vma->vm_flags & VM_EXEC ? 'x' : '-',
303 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
304 vma->vm_flags & VM_LOCKED ? 'l' : '-',
305 vma->vm_flags & VM_IO ? 'i' : '-',
306 vma->vm_pgoff);
307
308#if defined(__i386__)
309 pgprot = pgprot_val(vma->vm_page_prot);
310 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
311 pgprot & _PAGE_PRESENT ? 'p' : '-',
312 pgprot & _PAGE_RW ? 'w' : 'r',
313 pgprot & _PAGE_USER ? 'u' : 's',
314 pgprot & _PAGE_PWT ? 't' : 'b',
315 pgprot & _PAGE_PCD ? 'u' : 'c',
316 pgprot & _PAGE_ACCESSED ? 'a' : '-',
317 pgprot & _PAGE_DIRTY ? 'd' : '-',
318 pgprot & _PAGE_PSE ? 'm' : 'k',
319 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
320#endif
321 seq_printf(m, "\n");
322 }
323 mutex_unlock(&dev->struct_mutex);
324 return 0;
325}
326
327#endif
328
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 8df849f66830..9b3c5af61e98 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,697 +37,196 @@
37 * OTHER DEALINGS IN THE SOFTWARE. 37 * OTHER DEALINGS IN THE SOFTWARE.
38 */ 38 */
39 39
40#include <linux/seq_file.h>
40#include "drmP.h" 41#include "drmP.h"
41 42
42static int drm_name_info(char *buf, char **start, off_t offset, 43
43 int request, int *eof, void *data); 44/***************************************************
44static int drm_vm_info(char *buf, char **start, off_t offset, 45 * Initialization, etc.
45 int request, int *eof, void *data); 46 **************************************************/
46static int drm_clients_info(char *buf, char **start, off_t offset,
47 int request, int *eof, void *data);
48static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data);
52static int drm_vblank_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_name_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
56static int drm_gem_object_info(char *buf, char **start, off_t offset,
57 int request, int *eof, void *data);
58#if DRM_DEBUG_CODE
59static int drm_vma_info(char *buf, char **start, off_t offset,
60 int request, int *eof, void *data);
61#endif
62 47
63/** 48/**
64 * Proc file list. 49 * Proc file list.
65 */ 50 */
66static struct drm_proc_list { 51static struct drm_info_list drm_proc_list[] = {
67 const char *name; /**< file name */
68 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
69 u32 driver_features; /**< Required driver features for this entry */
70} drm_proc_list[] = {
71 {"name", drm_name_info, 0}, 52 {"name", drm_name_info, 0},
72 {"mem", drm_mem_info, 0},
73 {"vm", drm_vm_info, 0}, 53 {"vm", drm_vm_info, 0},
74 {"clients", drm_clients_info, 0}, 54 {"clients", drm_clients_info, 0},
75 {"queues", drm_queues_info, 0}, 55 {"queues", drm_queues_info, 0},
76 {"bufs", drm_bufs_info, 0}, 56 {"bufs", drm_bufs_info, 0},
77 {"vblank", drm_vblank_info, 0},
78 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
79 {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 58 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
80#if DRM_DEBUG_CODE 59#if DRM_DEBUG_CODE
81 {"vma", drm_vma_info}, 60 {"vma", drm_vma_info, 0},
82#endif 61#endif
83}; 62};
84
85#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) 63#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
86 64
65static int drm_proc_open(struct inode *inode, struct file *file)
66{
67 struct drm_info_node* node = PDE(inode)->data;
68
69 return single_open(file, node->info_ent->show, node);
70}
71
72static const struct file_operations drm_proc_fops = {
73 .owner = THIS_MODULE,
74 .open = drm_proc_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80
87/** 81/**
88 * Initialize the DRI proc filesystem for a device. 82 * Initialize a given set of proc files for a device
89 * 83 *
90 * \param dev DRM device. 84 * \param files The array of files to create
91 * \param minor device minor number. 85 * \param count The number of files given
92 * \param root DRI proc dir entry. 86 * \param root DRI proc dir entry.
93 * \param dev_root resulting DRI device proc dir entry. 87 * \param minor device minor number
94 * \return root entry pointer on success, or NULL on failure. 88 * \return Zero on success, non-zero on failure
95 * 89 *
96 * Create the DRI proc root entry "/proc/dri", the device proc root entry 90 * Create a given set of proc files represented by an array of
97 * "/proc/dri/%minor%/", and each entry in proc_list as 91 * gdm_proc_lists in the given root directory.
98 * "/proc/dri/%minor%/%name%".
99 */ 92 */
100int drm_proc_init(struct drm_minor *minor, int minor_id, 93int drm_proc_create_files(struct drm_info_list *files, int count,
101 struct proc_dir_entry *root) 94 struct proc_dir_entry *root, struct drm_minor *minor)
102{ 95{
103 struct drm_device *dev = minor->dev; 96 struct drm_device *dev = minor->dev;
104 struct proc_dir_entry *ent; 97 struct proc_dir_entry *ent;
105 int i, j, ret; 98 struct drm_info_node *tmp;
106 char name[64]; 99 char name[64];
100 int i, ret;
107 101
108 sprintf(name, "%d", minor_id); 102 for (i = 0; i < count; i++) {
109 minor->dev_root = proc_mkdir(name, root); 103 u32 features = files[i].driver_features;
110 if (!minor->dev_root) {
111 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
112 return -1;
113 }
114
115 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
116 u32 features = drm_proc_list[i].driver_features;
117 104
118 if (features != 0 && 105 if (features != 0 &&
119 (dev->driver->driver_features & features) != features) 106 (dev->driver->driver_features & features) != features)
120 continue; 107 continue;
121 108
122 ent = create_proc_entry(drm_proc_list[i].name, 109 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
123 S_IFREG | S_IRUGO, minor->dev_root); 110 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
124 if (!ent) { 111 if (!ent) {
125 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 112 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
126 name, drm_proc_list[i].name); 113 name, files[i].name);
114 drm_free(tmp, sizeof(struct drm_info_node),
115 _DRM_DRIVER);
127 ret = -1; 116 ret = -1;
128 goto fail; 117 goto fail;
129 } 118 }
130 ent->read_proc = drm_proc_list[i].f;
131 ent->data = minor;
132 }
133 119
134 if (dev->driver->proc_init) { 120 ent->proc_fops = &drm_proc_fops;
135 ret = dev->driver->proc_init(minor); 121 ent->data = tmp;
136 if (ret) { 122 tmp->minor = minor;
137 DRM_ERROR("DRM: Driver failed to initialize " 123 tmp->info_ent = &files[i];
138 "/proc/dri.\n"); 124 list_add(&(tmp->list), &(minor->proc_nodes.list));
139 goto fail;
140 }
141 } 125 }
142
143 return 0; 126 return 0;
144 fail:
145 127
146 for (j = 0; j < i; j++) 128fail:
147 remove_proc_entry(drm_proc_list[i].name, 129 for (i = 0; i < count; i++)
148 minor->dev_root); 130 remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
149 remove_proc_entry(name, root);
150 minor->dev_root = NULL;
151 return ret; 131 return ret;
152} 132}
153 133
154/** 134/**
155 * Cleanup the proc filesystem resources. 135 * Initialize the DRI proc filesystem for a device
156 * 136 *
157 * \param minor device minor number. 137 * \param dev DRM device
138 * \param minor device minor number
158 * \param root DRI proc dir entry. 139 * \param root DRI proc dir entry.
159 * \param dev_root DRI device proc dir entry. 140 * \param dev_root resulting DRI device proc dir entry.
160 * \return always zero. 141 * \return root entry pointer on success, or NULL on failure.
161 * 142 *
162 * Remove all proc entries created by proc_init(). 143 * Create the DRI proc root entry "/proc/dri", the device proc root entry
144 * "/proc/dri/%minor%/", and each entry in proc_list as
145 * "/proc/dri/%minor%/%name%".
163 */ 146 */
164int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 147int drm_proc_init(struct drm_minor *minor, int minor_id,
148 struct proc_dir_entry *root)
165{ 149{
166 struct drm_device *dev = minor->dev; 150 struct drm_device *dev = minor->dev;
167 int i;
168 char name[64]; 151 char name[64];
152 int ret;
169 153
170 if (!root || !minor->dev_root) 154 INIT_LIST_HEAD(&minor->proc_nodes.list);
171 return 0; 155 sprintf(name, "%d", minor_id);
172 156 minor->proc_root = proc_mkdir(name, root);
173 if (dev->driver->proc_cleanup) 157 if (!minor->proc_root) {
174 dev->driver->proc_cleanup(minor); 158 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
175 159 return -1;
176 for (i = 0; i < DRM_PROC_ENTRIES; i++)
177 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
178 sprintf(name, "%d", minor->index);
179 remove_proc_entry(name, root);
180
181 return 0;
182}
183
184/**
185 * Called when "/proc/dri/.../name" is read.
186 *
187 * \param buf output buffer.
188 * \param start start of output data.
189 * \param offset requested start offset.
190 * \param request requested number of bytes.
191 * \param eof whether there is no more data to return.
192 * \param data private data.
193 * \return number of written bytes.
194 *
195 * Prints the device name together with the bus id if available.
196 */
197static int drm_name_info(char *buf, char **start, off_t offset, int request,
198 int *eof, void *data)
199{
200 struct drm_minor *minor = (struct drm_minor *) data;
201 struct drm_master *master = minor->master;
202 struct drm_device *dev = minor->dev;
203 int len = 0;
204
205 if (offset > DRM_PROC_LIMIT) {
206 *eof = 1;
207 return 0;
208 } 160 }
209 161
210 if (!master) 162 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
211 return 0; 163 minor->proc_root, minor);
212 164 if (ret) {
213 *start = &buf[offset]; 165 remove_proc_entry(name, root);
214 *eof = 0; 166 minor->proc_root = NULL;
215 167 DRM_ERROR("Failed to create core drm proc files\n");
216 if (master->unique) { 168 return ret;
217 DRM_PROC_PRINT("%s %s %s\n",
218 dev->driver->pci_driver.name,
219 pci_name(dev->pdev), master->unique);
220 } else {
221 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
222 pci_name(dev->pdev));
223 } 169 }
224 170
225 if (len > request + offset) 171 if (dev->driver->proc_init) {
226 return request; 172 ret = dev->driver->proc_init(minor);
227 *eof = 1; 173 if (ret) {
228 return len - offset; 174 DRM_ERROR("DRM: Driver failed to initialize "
229} 175 "/proc/dri.\n");
230 176 return ret;
231/**
232 * Called when "/proc/dri/.../vm" is read.
233 *
234 * \param buf output buffer.
235 * \param start start of output data.
236 * \param offset requested start offset.
237 * \param request requested number of bytes.
238 * \param eof whether there is no more data to return.
239 * \param data private data.
240 * \return number of written bytes.
241 *
242 * Prints information about all mappings in drm_device::maplist.
243 */
244static int drm__vm_info(char *buf, char **start, off_t offset, int request,
245 int *eof, void *data)
246{
247 struct drm_minor *minor = (struct drm_minor *) data;
248 struct drm_device *dev = minor->dev;
249 int len = 0;
250 struct drm_map *map;
251 struct drm_map_list *r_list;
252
253 /* Hardcoded from _DRM_FRAME_BUFFER,
254 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
255 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
256 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
257 const char *type;
258 int i;
259
260 if (offset > DRM_PROC_LIMIT) {
261 *eof = 1;
262 return 0;
263 }
264
265 *start = &buf[offset];
266 *eof = 0;
267
268 DRM_PROC_PRINT("slot offset size type flags "
269 "address mtrr\n\n");
270 i = 0;
271 list_for_each_entry(r_list, &dev->maplist, head) {
272 map = r_list->map;
273 if (!map)
274 continue;
275 if (map->type < 0 || map->type > 5)
276 type = "??";
277 else
278 type = types[map->type];
279 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
280 i,
281 map->offset,
282 map->size, type, map->flags,
283 (unsigned long) r_list->user_token);
284 if (map->mtrr < 0) {
285 DRM_PROC_PRINT("none\n");
286 } else {
287 DRM_PROC_PRINT("%4d\n", map->mtrr);
288 } 177 }
289 i++;
290 }
291
292 if (len > request + offset)
293 return request;
294 *eof = 1;
295 return len - offset;
296}
297
298/**
299 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
300 */
301static int drm_vm_info(char *buf, char **start, off_t offset, int request,
302 int *eof, void *data)
303{
304 struct drm_minor *minor = (struct drm_minor *) data;
305 struct drm_device *dev = minor->dev;
306 int ret;
307
308 mutex_lock(&dev->struct_mutex);
309 ret = drm__vm_info(buf, start, offset, request, eof, data);
310 mutex_unlock(&dev->struct_mutex);
311 return ret;
312}
313
314/**
315 * Called when "/proc/dri/.../queues" is read.
316 *
317 * \param buf output buffer.
318 * \param start start of output data.
319 * \param offset requested start offset.
320 * \param request requested number of bytes.
321 * \param eof whether there is no more data to return.
322 * \param data private data.
323 * \return number of written bytes.
324 */
325static int drm__queues_info(char *buf, char **start, off_t offset,
326 int request, int *eof, void *data)
327{
328 struct drm_minor *minor = (struct drm_minor *) data;
329 struct drm_device *dev = minor->dev;
330 int len = 0;
331 int i;
332 struct drm_queue *q;
333
334 if (offset > DRM_PROC_LIMIT) {
335 *eof = 1;
336 return 0;
337 } 178 }
338 179 return 0;
339 *start = &buf[offset];
340 *eof = 0;
341
342 DRM_PROC_PRINT(" ctx/flags use fin"
343 " blk/rw/rwf wait flushed queued"
344 " locks\n\n");
345 for (i = 0; i < dev->queue_count; i++) {
346 q = dev->queuelist[i];
347 atomic_inc(&q->use_count);
348 DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
349 "%5d/0x%03x %5d %5d"
350 " %5d/%c%c/%c%c%c %5Zd\n",
351 i,
352 q->flags,
353 atomic_read(&q->use_count),
354 atomic_read(&q->finalization),
355 atomic_read(&q->block_count),
356 atomic_read(&q->block_read) ? 'r' : '-',
357 atomic_read(&q->block_write) ? 'w' : '-',
358 waitqueue_active(&q->read_queue) ? 'r' : '-',
359 waitqueue_active(&q->
360 write_queue) ? 'w' : '-',
361 waitqueue_active(&q->
362 flush_queue) ? 'f' : '-',
363 DRM_BUFCOUNT(&q->waitlist));
364 atomic_dec(&q->use_count);
365 }
366
367 if (len > request + offset)
368 return request;
369 *eof = 1;
370 return len - offset;
371}
372
373/**
374 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
375 */
376static int drm_queues_info(char *buf, char **start, off_t offset, int request,
377 int *eof, void *data)
378{
379 struct drm_minor *minor = (struct drm_minor *) data;
380 struct drm_device *dev = minor->dev;
381 int ret;
382
383 mutex_lock(&dev->struct_mutex);
384 ret = drm__queues_info(buf, start, offset, request, eof, data);
385 mutex_unlock(&dev->struct_mutex);
386 return ret;
387} 180}
388 181
389/** 182int drm_proc_remove_files(struct drm_info_list *files, int count,
390 * Called when "/proc/dri/.../bufs" is read. 183 struct drm_minor *minor)
391 *
392 * \param buf output buffer.
393 * \param start start of output data.
394 * \param offset requested start offset.
395 * \param request requested number of bytes.
396 * \param eof whether there is no more data to return.
397 * \param data private data.
398 * \return number of written bytes.
399 */
400static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
401 int *eof, void *data)
402{ 184{
403 struct drm_minor *minor = (struct drm_minor *) data; 185 struct list_head *pos, *q;
404 struct drm_device *dev = minor->dev; 186 struct drm_info_node *tmp;
405 int len = 0;
406 struct drm_device_dma *dma = dev->dma;
407 int i; 187 int i;
408 188
409 if (!dma || offset > DRM_PROC_LIMIT) { 189 for (i = 0; i < count; i++) {
410 *eof = 1; 190 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
411 return 0; 191 tmp = list_entry(pos, struct drm_info_node, list);
412 } 192 if (tmp->info_ent == &files[i]) {
413 193 remove_proc_entry(files[i].name,
414 *start = &buf[offset]; 194 minor->proc_root);
415 *eof = 0; 195 list_del(pos);
416 196 drm_free(tmp, sizeof(struct drm_info_node),
417 DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); 197 _DRM_DRIVER);
418 for (i = 0; i <= DRM_MAX_ORDER; i++) { 198 }
419 if (dma->bufs[i].buf_count) 199 }
420 DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
421 i,
422 dma->bufs[i].buf_size,
423 dma->bufs[i].buf_count,
424 atomic_read(&dma->bufs[i]
425 .freelist.count),
426 dma->bufs[i].seg_count,
427 dma->bufs[i].seg_count
428 * (1 << dma->bufs[i].page_order),
429 (dma->bufs[i].seg_count
430 * (1 << dma->bufs[i].page_order))
431 * PAGE_SIZE / 1024);
432 }
433 DRM_PROC_PRINT("\n");
434 for (i = 0; i < dma->buf_count; i++) {
435 if (i && !(i % 32))
436 DRM_PROC_PRINT("\n");
437 DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
438 } 200 }
439 DRM_PROC_PRINT("\n"); 201 return 0;
440
441 if (len > request + offset)
442 return request;
443 *eof = 1;
444 return len - offset;
445}
446
447/**
448 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
449 */
450static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
451 int *eof, void *data)
452{
453 struct drm_minor *minor = (struct drm_minor *) data;
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 ret = drm__bufs_info(buf, start, offset, request, eof, data);
459 mutex_unlock(&dev->struct_mutex);
460 return ret;
461} 202}
462 203
463/** 204/**
464 * Called when "/proc/dri/.../vblank" is read. 205 * Cleanup the proc filesystem resources.
465 * 206 *
466 * \param buf output buffer. 207 * \param minor device minor number.
467 * \param start start of output data. 208 * \param root DRI proc dir entry.
468 * \param offset requested start offset. 209 * \param dev_root DRI device proc dir entry.
469 * \param request requested number of bytes. 210 * \return always zero.
470 * \param eof whether there is no more data to return.
471 * \param data private data.
472 * \return number of written bytes.
473 */
474static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
475 int *eof, void *data)
476{
477 struct drm_minor *minor = (struct drm_minor *) data;
478 struct drm_device *dev = minor->dev;
479 int len = 0;
480 int crtc;
481
482 if (offset > DRM_PROC_LIMIT) {
483 *eof = 1;
484 return 0;
485 }
486
487 *start = &buf[offset];
488 *eof = 0;
489
490 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
491 DRM_PROC_PRINT("CRTC %d enable: %d\n",
492 crtc, atomic_read(&dev->vblank_refcount[crtc]));
493 DRM_PROC_PRINT("CRTC %d counter: %d\n",
494 crtc, drm_vblank_count(dev, crtc));
495 DRM_PROC_PRINT("CRTC %d last wait: %d\n",
496 crtc, dev->last_vblank_wait[crtc]);
497 DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
498 crtc, dev->vblank_inmodeset[crtc]);
499 }
500
501 if (len > request + offset)
502 return request;
503 *eof = 1;
504 return len - offset;
505}
506
507/**
508 * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
509 */
510static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
511 int *eof, void *data)
512{
513 struct drm_minor *minor = (struct drm_minor *) data;
514 struct drm_device *dev = minor->dev;
515 int ret;
516
517 mutex_lock(&dev->struct_mutex);
518 ret = drm__vblank_info(buf, start, offset, request, eof, data);
519 mutex_unlock(&dev->struct_mutex);
520 return ret;
521}
522
523/**
524 * Called when "/proc/dri/.../clients" is read.
525 * 211 *
526 * \param buf output buffer. 212 * Remove all proc entries created by proc_init().
527 * \param start start of output data.
528 * \param offset requested start offset.
529 * \param request requested number of bytes.
530 * \param eof whether there is no more data to return.
531 * \param data private data.
532 * \return number of written bytes.
533 */ 213 */
534static int drm__clients_info(char *buf, char **start, off_t offset, 214int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
535 int request, int *eof, void *data)
536{ 215{
537 struct drm_minor *minor = (struct drm_minor *) data;
538 struct drm_device *dev = minor->dev; 216 struct drm_device *dev = minor->dev;
539 int len = 0; 217 char name[64];
540 struct drm_file *priv;
541 218
542 if (offset > DRM_PROC_LIMIT) { 219 if (!root || !minor->proc_root)
543 *eof = 1;
544 return 0; 220 return 0;
545 }
546
547 *start = &buf[offset];
548 *eof = 0;
549
550 DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
551 list_for_each_entry(priv, &dev->filelist, lhead) {
552 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
553 priv->authenticated ? 'y' : 'n',
554 priv->minor->index,
555 priv->pid,
556 priv->uid, priv->magic, priv->ioctl_count);
557 }
558 221
559 if (len > request + offset) 222 if (dev->driver->proc_cleanup)
560 return request; 223 dev->driver->proc_cleanup(minor);
561 *eof = 1;
562 return len - offset;
563}
564
565/**
566 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
567 */
568static int drm_clients_info(char *buf, char **start, off_t offset,
569 int request, int *eof, void *data)
570{
571 struct drm_minor *minor = (struct drm_minor *) data;
572 struct drm_device *dev = minor->dev;
573 int ret;
574
575 mutex_lock(&dev->struct_mutex);
576 ret = drm__clients_info(buf, start, offset, request, eof, data);
577 mutex_unlock(&dev->struct_mutex);
578 return ret;
579}
580
581struct drm_gem_name_info_data {
582 int len;
583 char *buf;
584 int eof;
585};
586 224
587static int drm_gem_one_name_info(int id, void *ptr, void *data) 225 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
588{
589 struct drm_gem_object *obj = ptr;
590 struct drm_gem_name_info_data *nid = data;
591 226
592 DRM_INFO("name %d size %zd\n", obj->name, obj->size); 227 sprintf(name, "%d", minor->index);
593 if (nid->eof) 228 remove_proc_entry(name, root);
594 return 0;
595 229
596 nid->len += sprintf(&nid->buf[nid->len],
597 "%6d %8zd %7d %8d\n",
598 obj->name, obj->size,
599 atomic_read(&obj->handlecount.refcount),
600 atomic_read(&obj->refcount.refcount));
601 if (nid->len > DRM_PROC_LIMIT) {
602 nid->eof = 1;
603 return 0;
604 }
605 return 0; 230 return 0;
606} 231}
607 232
608static int drm_gem_name_info(char *buf, char **start, off_t offset,
609 int request, int *eof, void *data)
610{
611 struct drm_minor *minor = (struct drm_minor *) data;
612 struct drm_device *dev = minor->dev;
613 struct drm_gem_name_info_data nid;
614
615 if (offset > DRM_PROC_LIMIT) {
616 *eof = 1;
617 return 0;
618 }
619
620 nid.len = sprintf(buf, " name size handles refcount\n");
621 nid.buf = buf;
622 nid.eof = 0;
623 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
624
625 *start = &buf[offset];
626 *eof = 0;
627 if (nid.len > request + offset)
628 return request;
629 *eof = 1;
630 return nid.len - offset;
631}
632
633static int drm_gem_object_info(char *buf, char **start, off_t offset,
634 int request, int *eof, void *data)
635{
636 struct drm_minor *minor = (struct drm_minor *) data;
637 struct drm_device *dev = minor->dev;
638 int len = 0;
639
640 if (offset > DRM_PROC_LIMIT) {
641 *eof = 1;
642 return 0;
643 }
644
645 *start = &buf[offset];
646 *eof = 0;
647 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
648 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
649 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
650 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
651 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
652 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
653 if (len > request + offset)
654 return request;
655 *eof = 1;
656 return len - offset;
657}
658
659#if DRM_DEBUG_CODE
660
661static int drm__vma_info(char *buf, char **start, off_t offset, int request,
662 int *eof, void *data)
663{
664 struct drm_minor *minor = (struct drm_minor *) data;
665 struct drm_device *dev = minor->dev;
666 int len = 0;
667 struct drm_vma_entry *pt;
668 struct vm_area_struct *vma;
669#if defined(__i386__)
670 unsigned int pgprot;
671#endif
672
673 if (offset > DRM_PROC_LIMIT) {
674 *eof = 1;
675 return 0;
676 }
677
678 *start = &buf[offset];
679 *eof = 0;
680
681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
682 atomic_read(&dev->vma_count),
683 high_memory, virt_to_phys(high_memory));
684 list_for_each_entry(pt, &dev->vmalist, head) {
685 if (!(vma = pt->vma))
686 continue;
687 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
688 pt->pid,
689 vma->vm_start,
690 vma->vm_end,
691 vma->vm_flags & VM_READ ? 'r' : '-',
692 vma->vm_flags & VM_WRITE ? 'w' : '-',
693 vma->vm_flags & VM_EXEC ? 'x' : '-',
694 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
695 vma->vm_flags & VM_LOCKED ? 'l' : '-',
696 vma->vm_flags & VM_IO ? 'i' : '-',
697 vma->vm_pgoff);
698
699#if defined(__i386__)
700 pgprot = pgprot_val(vma->vm_page_prot);
701 DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
702 pgprot & _PAGE_PRESENT ? 'p' : '-',
703 pgprot & _PAGE_RW ? 'w' : 'r',
704 pgprot & _PAGE_USER ? 'u' : 's',
705 pgprot & _PAGE_PWT ? 't' : 'b',
706 pgprot & _PAGE_PCD ? 'u' : 'c',
707 pgprot & _PAGE_ACCESSED ? 'a' : '-',
708 pgprot & _PAGE_DIRTY ? 'd' : '-',
709 pgprot & _PAGE_PSE ? 'm' : 'k',
710 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
711#endif
712 DRM_PROC_PRINT("\n");
713 }
714
715 if (len > request + offset)
716 return request;
717 *eof = 1;
718 return len - offset;
719}
720
721static int drm_vma_info(char *buf, char **start, off_t offset, int request,
722 int *eof, void *data)
723{
724 struct drm_minor *minor = (struct drm_minor *) data;
725 struct drm_device *dev = minor->dev;
726 int ret;
727
728 mutex_lock(&dev->struct_mutex);
729 ret = drm__vma_info(buf, start, offset, request, eof, data);
730 mutex_unlock(&dev->struct_mutex);
731 return ret;
732}
733#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b22bf2..48f33be8fd0f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
50 50
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root;
53 54
54static int drm_minor_get_id(struct drm_device *dev, int type) 55static int drm_minor_get_id(struct drm_device *dev, int type)
55{ 56{
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
313 goto err_mem; 314 goto err_mem;
314 } 315 }
315 } else 316 } else
316 new_minor->dev_root = NULL; 317 new_minor->proc_root = NULL;
318
319#if defined(CONFIG_DEBUG_FS)
320 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
321 if (ret) {
322 DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
323 goto err_g2;
324 }
325#endif
317 326
318 ret = drm_sysfs_device_add(new_minor); 327 ret = drm_sysfs_device_add(new_minor);
319 if (ret) { 328 if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
451 460
452 if (minor->type == DRM_MINOR_LEGACY) 461 if (minor->type == DRM_MINOR_LEGACY)
453 drm_proc_cleanup(minor, drm_proc_root); 462 drm_proc_cleanup(minor, drm_proc_root);
463#if defined(CONFIG_DEBUG_FS)
464 drm_debugfs_cleanup(minor);
465#endif
466
454 drm_sysfs_device_remove(minor); 467 drm_sysfs_device_remove(minor);
455 468
456 idr_remove(&drm_minors_idr, minor->index); 469 idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba39d832..51c5a050aa73 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_suspend.o \ 7 i915_suspend.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_debugfs.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 intel_display.o \ 12 intel_display.o \
13 intel_crt.o \ 13 intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e48b89..a818b377e1f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
41int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42{ 42{
43 drm_i915_private_t *dev_priv = dev->dev_private; 43 drm_i915_private_t *dev_priv = dev->dev_private;
44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 44 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 45 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
47 u32 last_acthd = I915_READ(acthd_reg); 46 u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
58 if (ring->space >= n) 57 if (ring->space >= n)
59 return 0; 58 return 0;
60 59
61 if (master_priv->sarea_priv) 60 if (dev->primary->master) {
62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 61 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62 if (master_priv->sarea_priv)
63 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64 }
65
63 66
64 if (ring->head != last_head) 67 if (ring->head != last_head)
65 i = 0; 68 i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
356 return ret; 359 return ret;
357} 360}
358 361
359static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) 362static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
360{ 363{
361 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
362 int i; 365 int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
370 for (i = 0; i < dwords;) { 373 for (i = 0; i < dwords;) {
371 int cmd, sz; 374 int cmd, sz;
372 375
373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 376 cmd = buffer[i];
374 return -EINVAL;
375 377
376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 378 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
377 return -EINVAL; 379 return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
379 OUT_RING(cmd); 381 OUT_RING(cmd);
380 382
381 while (++i, --sz) { 383 while (++i, --sz) {
382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 384 OUT_RING(buffer[i]);
383 sizeof(cmd))) {
384 return -EINVAL;
385 }
386 OUT_RING(cmd);
387 } 385 }
388 } 386 }
389 387
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
397 395
398int 396int
399i915_emit_box(struct drm_device *dev, 397i915_emit_box(struct drm_device *dev,
400 struct drm_clip_rect __user *boxes, 398 struct drm_clip_rect *boxes,
401 int i, int DR1, int DR4) 399 int i, int DR1, int DR4)
402{ 400{
403 drm_i915_private_t *dev_priv = dev->dev_private; 401 drm_i915_private_t *dev_priv = dev->dev_private;
404 struct drm_clip_rect box; 402 struct drm_clip_rect box = boxes[i];
405 RING_LOCALS; 403 RING_LOCALS;
406 404
407 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
408 return -EFAULT;
409 }
410
411 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 405 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
412 DRM_ERROR("Bad box %d,%d..%d,%d\n", 406 DRM_ERROR("Bad box %d,%d..%d,%d\n",
413 box.x1, box.y1, box.x2, box.y2); 407 box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
460} 454}
461 455
462static int i915_dispatch_cmdbuffer(struct drm_device * dev, 456static int i915_dispatch_cmdbuffer(struct drm_device * dev,
463 drm_i915_cmdbuffer_t * cmd) 457 drm_i915_cmdbuffer_t *cmd,
458 struct drm_clip_rect *cliprects,
459 void *cmdbuf)
464{ 460{
465 int nbox = cmd->num_cliprects; 461 int nbox = cmd->num_cliprects;
466 int i = 0, count, ret; 462 int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
476 472
477 for (i = 0; i < count; i++) { 473 for (i = 0; i < count; i++) {
478 if (i < nbox) { 474 if (i < nbox) {
479 ret = i915_emit_box(dev, cmd->cliprects, i, 475 ret = i915_emit_box(dev, cliprects, i,
480 cmd->DR1, cmd->DR4); 476 cmd->DR1, cmd->DR4);
481 if (ret) 477 if (ret)
482 return ret; 478 return ret;
483 } 479 }
484 480
485 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); 481 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
486 if (ret) 482 if (ret)
487 return ret; 483 return ret;
488 } 484 }
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
492} 488}
493 489
494static int i915_dispatch_batchbuffer(struct drm_device * dev, 490static int i915_dispatch_batchbuffer(struct drm_device * dev,
495 drm_i915_batchbuffer_t * batch) 491 drm_i915_batchbuffer_t * batch,
492 struct drm_clip_rect *cliprects)
496{ 493{
497 drm_i915_private_t *dev_priv = dev->dev_private; 494 drm_i915_private_t *dev_priv = dev->dev_private;
498 struct drm_clip_rect __user *boxes = batch->cliprects;
499 int nbox = batch->num_cliprects; 495 int nbox = batch->num_cliprects;
500 int i = 0, count; 496 int i = 0, count;
501 RING_LOCALS; 497 RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 507
512 for (i = 0; i < count; i++) { 508 for (i = 0; i < count; i++) {
513 if (i < nbox) { 509 if (i < nbox) {
514 int ret = i915_emit_box(dev, boxes, i, 510 int ret = i915_emit_box(dev, cliprects, i,
515 batch->DR1, batch->DR4); 511 batch->DR1, batch->DR4);
516 if (ret) 512 if (ret)
517 return ret; 513 return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
626 master_priv->sarea_priv; 622 master_priv->sarea_priv;
627 drm_i915_batchbuffer_t *batch = data; 623 drm_i915_batchbuffer_t *batch = data;
628 int ret; 624 int ret;
625 struct drm_clip_rect *cliprects = NULL;
629 626
630 if (!dev_priv->allow_batchbuffer) { 627 if (!dev_priv->allow_batchbuffer) {
631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 628 DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
637 634
638 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
639 636
640 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 637 if (batch->num_cliprects < 0)
641 batch->num_cliprects * 638 return -EINVAL;
642 sizeof(struct drm_clip_rect))) 639
643 return -EFAULT; 640 if (batch->num_cliprects) {
641 cliprects = drm_calloc(batch->num_cliprects,
642 sizeof(struct drm_clip_rect),
643 DRM_MEM_DRIVER);
644 if (cliprects == NULL)
645 return -ENOMEM;
646
647 ret = copy_from_user(cliprects, batch->cliprects,
648 batch->num_cliprects *
649 sizeof(struct drm_clip_rect));
650 if (ret != 0)
651 goto fail_free;
652 }
644 653
645 mutex_lock(&dev->struct_mutex); 654 mutex_lock(&dev->struct_mutex);
646 ret = i915_dispatch_batchbuffer(dev, batch); 655 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
647 mutex_unlock(&dev->struct_mutex); 656 mutex_unlock(&dev->struct_mutex);
648 657
649 if (sarea_priv) 658 if (sarea_priv)
650 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 659 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661fail_free:
662 drm_free(cliprects,
663 batch->num_cliprects * sizeof(struct drm_clip_rect),
664 DRM_MEM_DRIVER);
665
651 return ret; 666 return ret;
652} 667}
653 668
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
659 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 674 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
660 master_priv->sarea_priv; 675 master_priv->sarea_priv;
661 drm_i915_cmdbuffer_t *cmdbuf = data; 676 drm_i915_cmdbuffer_t *cmdbuf = data;
677 struct drm_clip_rect *cliprects = NULL;
678 void *batch_data;
662 int ret; 679 int ret;
663 680
664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
666 683
667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668 685
669 if (cmdbuf->num_cliprects && 686 if (cmdbuf->num_cliprects < 0)
670 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 687 return -EINVAL;
671 cmdbuf->num_cliprects * 688
672 sizeof(struct drm_clip_rect))) { 689 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
673 DRM_ERROR("Fault accessing cliprects\n"); 690 if (batch_data == NULL)
674 return -EFAULT; 691 return -ENOMEM;
692
693 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
694 if (ret != 0)
695 goto fail_batch_free;
696
697 if (cmdbuf->num_cliprects) {
698 cliprects = drm_calloc(cmdbuf->num_cliprects,
699 sizeof(struct drm_clip_rect),
700 DRM_MEM_DRIVER);
701 if (cliprects == NULL)
702 goto fail_batch_free;
703
704 ret = copy_from_user(cliprects, cmdbuf->cliprects,
705 cmdbuf->num_cliprects *
706 sizeof(struct drm_clip_rect));
707 if (ret != 0)
708 goto fail_clip_free;
675 } 709 }
676 710
677 mutex_lock(&dev->struct_mutex); 711 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 712 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
679 mutex_unlock(&dev->struct_mutex); 713 mutex_unlock(&dev->struct_mutex);
680 if (ret) { 714 if (ret) {
681 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
682 return ret; 716 goto fail_batch_free;
683 } 717 }
684 718
685 if (sarea_priv) 719 if (sarea_priv)
686 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
687 return 0; 721
722fail_batch_free:
723 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724fail_clip_free:
725 drm_free(cliprects,
726 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727 DRM_MEM_DRIVER);
728
729 return ret;
688} 730}
689 731
690static int i915_flip_bufs(struct drm_device *dev, void *data, 732static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0bae71..dcb91f5df6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
150 .get_reg_ofs = drm_core_get_reg_ofs, 150 .get_reg_ofs = drm_core_get_reg_ofs,
151 .master_create = i915_master_create, 151 .master_create = i915_master_create,
152 .master_destroy = i915_master_destroy, 152 .master_destroy = i915_master_destroy,
153 .proc_init = i915_gem_proc_init, 153#if defined(CONFIG_DEBUG_FS)
154 .proc_cleanup = i915_gem_proc_cleanup, 154 .debugfs_init = i915_gem_debugfs_init,
155 .debugfs_cleanup = i915_gem_debugfs_cleanup,
156#endif
155 .gem_init_object = i915_gem_init_object, 157 .gem_init_object = i915_gem_init_object,
156 .gem_free_object = i915_gem_free_object, 158 .gem_free_object = i915_gem_free_object,
157 .gem_vm_ops = &i915_gem_vm_ops, 159 .gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..c1685d0c704f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
404 /** AGP memory structure for our GTT binding. */ 404 /** AGP memory structure for our GTT binding. */
405 DRM_AGP_MEM *agp_mem; 405 DRM_AGP_MEM *agp_mem;
406 406
407 struct page **page_list; 407 struct page **pages;
408 int pages_refcount;
408 409
409 /** 410 /**
410 * Current offset of the object in GTT space. 411 * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
519extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 520extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
520 unsigned long arg); 521 unsigned long arg);
521extern int i915_emit_box(struct drm_device *dev, 522extern int i915_emit_box(struct drm_device *dev,
522 struct drm_clip_rect __user *boxes, 523 struct drm_clip_rect *boxes,
523 int i, int DR1, int DR4); 524 int i, int DR1, int DR4);
524 525
525/* i915_irq.c */ 526/* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
604int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 605int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
605 struct drm_file *file_priv); 606 struct drm_file *file_priv);
606void i915_gem_load(struct drm_device *dev); 607void i915_gem_load(struct drm_device *dev);
607int i915_gem_proc_init(struct drm_minor *minor);
608void i915_gem_proc_cleanup(struct drm_minor *minor);
609int i915_gem_init_object(struct drm_gem_object *obj); 608int i915_gem_init_object(struct drm_gem_object *obj);
610void i915_gem_free_object(struct drm_gem_object *obj); 609void i915_gem_free_object(struct drm_gem_object *obj);
611int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 610int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
649 const char *where, uint32_t mark); 648 const char *where, uint32_t mark);
650void i915_dump_lru(struct drm_device *dev, const char *where); 649void i915_dump_lru(struct drm_device *dev, const char *where);
651 650
651/* i915_debugfs.c */
652int i915_gem_debugfs_init(struct drm_minor *minor);
653void i915_gem_debugfs_cleanup(struct drm_minor *minor);
654
652/* i915_suspend.c */ 655/* i915_suspend.c */
653extern int i915_save_state(struct drm_device *dev); 656extern int i915_save_state(struct drm_device *dev);
654extern int i915_restore_state(struct drm_device *dev); 657extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E22 || \ 787 (dev)->pci_device == 0x2E22 || \
785 IS_GM45(dev)) 788 IS_GM45(dev))
786 789
790#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
791#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
792#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
793
787#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 794#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
788 (dev)->pci_device == 0x29B2 || \ 795 (dev)->pci_device == 0x29B2 || \
789 (dev)->pci_device == 0x29D2) 796 (dev)->pci_device == 0x29D2 || \
797 (IS_IGD(dev)))
790 798
791#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 799#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
792 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 800 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
793 801
794#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 802#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
795 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 803 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
804 IS_IGD(dev))
796 805
797#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 806#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
798/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 807/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4016cb..b52cba0f16d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return 0; 136 return 0;
137} 137}
138 138
139static inline int
140fast_shmem_read(struct page **pages,
141 loff_t page_base, int page_offset,
142 char __user *data,
143 int length)
144{
145 char __iomem *vaddr;
146 int ret;
147
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL)
150 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0);
153
154 return ret;
155}
156
157static inline int
158slow_shmem_copy(struct page *dst_page,
159 int dst_offset,
160 struct page *src_page,
161 int src_offset,
162 int length)
163{
164 char *dst_vaddr, *src_vaddr;
165
166 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
167 if (dst_vaddr == NULL)
168 return -ENOMEM;
169
170 src_vaddr = kmap_atomic(src_page, KM_USER1);
171 if (src_vaddr == NULL) {
172 kunmap_atomic(dst_vaddr, KM_USER0);
173 return -ENOMEM;
174 }
175
176 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
177
178 kunmap_atomic(src_vaddr, KM_USER1);
179 kunmap_atomic(dst_vaddr, KM_USER0);
180
181 return 0;
182}
183
184/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
188 */
189static int
190i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
191 struct drm_i915_gem_pread *args,
192 struct drm_file *file_priv)
193{
194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
195 ssize_t remain;
196 loff_t offset, page_base;
197 char __user *user_data;
198 int page_offset, page_length;
199 int ret;
200
201 user_data = (char __user *) (uintptr_t) args->data_ptr;
202 remain = args->size;
203
204 mutex_lock(&dev->struct_mutex);
205
206 ret = i915_gem_object_get_pages(obj);
207 if (ret != 0)
208 goto fail_unlock;
209
210 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
211 args->size);
212 if (ret != 0)
213 goto fail_put_pages;
214
215 obj_priv = obj->driver_private;
216 offset = args->offset;
217
218 while (remain > 0) {
219 /* Operation in this page
220 *
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
224 */
225 page_base = (offset & ~(PAGE_SIZE-1));
226 page_offset = offset & (PAGE_SIZE-1);
227 page_length = remain;
228 if ((page_offset + remain) > PAGE_SIZE)
229 page_length = PAGE_SIZE - page_offset;
230
231 ret = fast_shmem_read(obj_priv->pages,
232 page_base, page_offset,
233 user_data, page_length);
234 if (ret)
235 goto fail_put_pages;
236
237 remain -= page_length;
238 user_data += page_length;
239 offset += page_length;
240 }
241
242fail_put_pages:
243 i915_gem_object_put_pages(obj);
244fail_unlock:
245 mutex_unlock(&dev->struct_mutex);
246
247 return ret;
248}
249
250/**
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
255 */
256static int
257i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 struct mm_struct *mm = current->mm;
263 struct page **user_pages;
264 ssize_t remain;
265 loff_t offset, pinned_pages, i;
266 loff_t first_data_page, last_data_page, num_pages;
267 int shmem_page_index, shmem_page_offset;
268 int data_page_index, data_page_offset;
269 int page_length;
270 int ret;
271 uint64_t data_ptr = args->data_ptr;
272
273 remain = args->size;
274
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
278 */
279 first_data_page = data_ptr / PAGE_SIZE;
280 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
281 num_pages = last_data_page - first_data_page + 1;
282
283 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
284 if (user_pages == NULL)
285 return -ENOMEM;
286
287 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) {
292 ret = -EFAULT;
293 goto fail_put_user_pages;
294 }
295
296 mutex_lock(&dev->struct_mutex);
297
298 ret = i915_gem_object_get_pages(obj);
299 if (ret != 0)
300 goto fail_unlock;
301
302 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
303 args->size);
304 if (ret != 0)
305 goto fail_put_pages;
306
307 obj_priv = obj->driver_private;
308 offset = args->offset;
309
310 while (remain > 0) {
311 /* Operation in this page
312 *
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
318 */
319 shmem_page_index = offset / PAGE_SIZE;
320 shmem_page_offset = offset & ~PAGE_MASK;
321 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
322 data_page_offset = data_ptr & ~PAGE_MASK;
323
324 page_length = remain;
325 if ((shmem_page_offset + page_length) > PAGE_SIZE)
326 page_length = PAGE_SIZE - shmem_page_offset;
327 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset;
329
330 ret = slow_shmem_copy(user_pages[data_page_index],
331 data_page_offset,
332 obj_priv->pages[shmem_page_index],
333 shmem_page_offset,
334 page_length);
335 if (ret)
336 goto fail_put_pages;
337
338 remain -= page_length;
339 data_ptr += page_length;
340 offset += page_length;
341 }
342
343fail_put_pages:
344 i915_gem_object_put_pages(obj);
345fail_unlock:
346 mutex_unlock(&dev->struct_mutex);
347fail_put_user_pages:
348 for (i = 0; i < pinned_pages; i++) {
349 SetPageDirty(user_pages[i]);
350 page_cache_release(user_pages[i]);
351 }
352 kfree(user_pages);
353
354 return ret;
355}
356
139/** 357/**
140 * Reads data from the object referenced by handle. 358 * Reads data from the object referenced by handle.
141 * 359 *
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
148 struct drm_i915_gem_pread *args = data; 366 struct drm_i915_gem_pread *args = data;
149 struct drm_gem_object *obj; 367 struct drm_gem_object *obj;
150 struct drm_i915_gem_object *obj_priv; 368 struct drm_i915_gem_object *obj_priv;
151 ssize_t read;
152 loff_t offset;
153 int ret; 369 int ret;
154 370
155 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
167 return -EINVAL; 383 return -EINVAL;
168 } 384 }
169 385
170 mutex_lock(&dev->struct_mutex); 386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
171 387 if (ret != 0)
172 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
173 args->size);
174 if (ret != 0) {
175 drm_gem_object_unreference(obj);
176 mutex_unlock(&dev->struct_mutex);
177 return ret;
178 }
179
180 offset = args->offset;
181
182 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183 args->size, &offset);
184 if (read != args->size) {
185 drm_gem_object_unreference(obj);
186 mutex_unlock(&dev->struct_mutex);
187 if (read < 0)
188 return read;
189 else
190 return -EINVAL;
191 }
192 389
193 drm_gem_object_unreference(obj); 390 drm_gem_object_unreference(obj);
194 mutex_unlock(&dev->struct_mutex);
195 391
196 return 0; 392 return ret;
197} 393}
198 394
199/* This is the fast write path which cannot handle 395/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
223 */ 419 */
224 420
225static inline int 421static inline int
226slow_user_write(struct io_mapping *mapping, 422slow_kernel_write(struct io_mapping *mapping,
227 loff_t page_base, int page_offset, 423 loff_t gtt_base, int gtt_offset,
228 char __user *user_data, 424 struct page *user_page, int user_offset,
229 int length) 425 int length)
230{ 426{
231 char __iomem *vaddr; 427 char *src_vaddr, *dst_vaddr;
232 unsigned long unwritten; 428 unsigned long unwritten;
233 429
234 vaddr = io_mapping_map_wc(mapping, page_base); 430 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
235 if (vaddr == NULL) 431 src_vaddr = kmap_atomic(user_page, KM_USER1);
236 return -EFAULT; 432 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
237 unwritten = __copy_from_user(vaddr + page_offset, 433 src_vaddr + user_offset,
238 user_data, length); 434 length);
239 io_mapping_unmap(vaddr); 435 kunmap_atomic(src_vaddr, KM_USER1);
436 io_mapping_unmap_atomic(dst_vaddr);
240 if (unwritten) 437 if (unwritten)
241 return -EFAULT; 438 return -EFAULT;
242 return 0; 439 return 0;
243} 440}
244 441
442static inline int
443fast_shmem_write(struct page **pages,
444 loff_t page_base, int page_offset,
445 char __user *data,
446 int length)
447{
448 char __iomem *vaddr;
449
450 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
451 if (vaddr == NULL)
452 return -ENOMEM;
453 __copy_from_user_inatomic(vaddr + page_offset, data, length);
454 kunmap_atomic(vaddr, KM_USER0);
455
456 return 0;
457}
458
459/**
460 * This is the fast pwrite path, where we copy the data directly from the
461 * user into the GTT, uncached.
462 */
245static int 463static int
246i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 464i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
247 struct drm_i915_gem_pwrite *args, 465 struct drm_i915_gem_pwrite *args,
248 struct drm_file *file_priv) 466 struct drm_file *file_priv)
249{ 467{
250 struct drm_i915_gem_object *obj_priv = obj->driver_private; 468 struct drm_i915_gem_object *obj_priv = obj->driver_private;
251 drm_i915_private_t *dev_priv = dev->dev_private; 469 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
273 491
274 obj_priv = obj->driver_private; 492 obj_priv = obj->driver_private;
275 offset = obj_priv->gtt_offset + args->offset; 493 offset = obj_priv->gtt_offset + args->offset;
276 obj_priv->dirty = 1;
277 494
278 while (remain > 0) { 495 while (remain > 0) {
279 /* Operation in this page 496 /* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
292 page_offset, user_data, page_length); 509 page_offset, user_data, page_length);
293 510
294 /* If we get a fault while copying data, then (presumably) our 511 /* If we get a fault while copying data, then (presumably) our
295 * source page isn't available. In this case, use the 512 * source page isn't available. Return the error and we'll
296 * non-atomic function 513 * retry in the slow path.
297 */ 514 */
298 if (ret) { 515 if (ret)
299 ret = slow_user_write (dev_priv->mm.gtt_mapping, 516 goto fail;
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail;
304 }
305 517
306 remain -= page_length; 518 remain -= page_length;
307 user_data += page_length; 519 user_data += page_length;
@@ -315,39 +527,284 @@ fail:
315 return ret; 527 return ret;
316} 528}
317 529
530/**
531 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
532 * the memory and maps it using kmap_atomic for copying.
533 *
534 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
535 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
536 */
318static int 537static int
319i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 538i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
320 struct drm_i915_gem_pwrite *args, 539 struct drm_i915_gem_pwrite *args,
321 struct drm_file *file_priv) 540 struct drm_file *file_priv)
322{ 541{
542 struct drm_i915_gem_object *obj_priv = obj->driver_private;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544 ssize_t remain;
545 loff_t gtt_page_base, offset;
546 loff_t first_data_page, last_data_page, num_pages;
547 loff_t pinned_pages, i;
548 struct page **user_pages;
549 struct mm_struct *mm = current->mm;
550 int gtt_page_offset, data_page_offset, data_page_index, page_length;
323 int ret; 551 int ret;
324 loff_t offset; 552 uint64_t data_ptr = args->data_ptr;
325 ssize_t written; 553
554 remain = args->size;
555
556 /* Pin the user pages containing the data. We can't fault while
557 * holding the struct mutex, and all of the pwrite implementations
558 * want to hold it while dereferencing the user data.
559 */
560 first_data_page = data_ptr / PAGE_SIZE;
561 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
562 num_pages = last_data_page - first_data_page + 1;
563
564 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
565 if (user_pages == NULL)
566 return -ENOMEM;
567
568 down_read(&mm->mmap_sem);
569 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
570 num_pages, 0, 0, user_pages, NULL);
571 up_read(&mm->mmap_sem);
572 if (pinned_pages < num_pages) {
573 ret = -EFAULT;
574 goto out_unpin_pages;
575 }
326 576
327 mutex_lock(&dev->struct_mutex); 577 mutex_lock(&dev->struct_mutex);
578 ret = i915_gem_object_pin(obj, 0);
579 if (ret)
580 goto out_unlock;
581
582 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
583 if (ret)
584 goto out_unpin_object;
585
586 obj_priv = obj->driver_private;
587 offset = obj_priv->gtt_offset + args->offset;
588
589 while (remain > 0) {
590 /* Operation in this page
591 *
592 * gtt_page_base = page offset within aperture
593 * gtt_page_offset = offset within page in aperture
594 * data_page_index = page number in get_user_pages return
595 * data_page_offset = offset with data_page_index page.
596 * page_length = bytes to copy for this page
597 */
598 gtt_page_base = offset & PAGE_MASK;
599 gtt_page_offset = offset & ~PAGE_MASK;
600 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
601 data_page_offset = data_ptr & ~PAGE_MASK;
602
603 page_length = remain;
604 if ((gtt_page_offset + page_length) > PAGE_SIZE)
605 page_length = PAGE_SIZE - gtt_page_offset;
606 if ((data_page_offset + page_length) > PAGE_SIZE)
607 page_length = PAGE_SIZE - data_page_offset;
608
609 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
610 gtt_page_base, gtt_page_offset,
611 user_pages[data_page_index],
612 data_page_offset,
613 page_length);
614
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
618 */
619 if (ret)
620 goto out_unpin_object;
621
622 remain -= page_length;
623 offset += page_length;
624 data_ptr += page_length;
625 }
626
627out_unpin_object:
628 i915_gem_object_unpin(obj);
629out_unlock:
630 mutex_unlock(&dev->struct_mutex);
631out_unpin_pages:
632 for (i = 0; i < pinned_pages; i++)
633 page_cache_release(user_pages[i]);
634 kfree(user_pages);
635
636 return ret;
637}
638
639/**
640 * This is the fast shmem pwrite path, which attempts to directly
641 * copy_from_user into the kmapped pages backing the object.
642 */
643static int
644i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
645 struct drm_i915_gem_pwrite *args,
646 struct drm_file *file_priv)
647{
648 struct drm_i915_gem_object *obj_priv = obj->driver_private;
649 ssize_t remain;
650 loff_t offset, page_base;
651 char __user *user_data;
652 int page_offset, page_length;
653 int ret;
654
655 user_data = (char __user *) (uintptr_t) args->data_ptr;
656 remain = args->size;
657
658 mutex_lock(&dev->struct_mutex);
659
660 ret = i915_gem_object_get_pages(obj);
661 if (ret != 0)
662 goto fail_unlock;
328 663
329 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 664 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
330 if (ret) { 665 if (ret != 0)
331 mutex_unlock(&dev->struct_mutex); 666 goto fail_put_pages;
332 return ret; 667
668 obj_priv = obj->driver_private;
669 offset = args->offset;
670 obj_priv->dirty = 1;
671
672 while (remain > 0) {
673 /* Operation in this page
674 *
675 * page_base = page offset within aperture
676 * page_offset = offset within page
677 * page_length = bytes to copy for this page
678 */
679 page_base = (offset & ~(PAGE_SIZE-1));
680 page_offset = offset & (PAGE_SIZE-1);
681 page_length = remain;
682 if ((page_offset + remain) > PAGE_SIZE)
683 page_length = PAGE_SIZE - page_offset;
684
685 ret = fast_shmem_write(obj_priv->pages,
686 page_base, page_offset,
687 user_data, page_length);
688 if (ret)
689 goto fail_put_pages;
690
691 remain -= page_length;
692 user_data += page_length;
693 offset += page_length;
333 } 694 }
334 695
696fail_put_pages:
697 i915_gem_object_put_pages(obj);
698fail_unlock:
699 mutex_unlock(&dev->struct_mutex);
700
701 return ret;
702}
703
704/**
705 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
706 * the memory and maps it using kmap_atomic for copying.
707 *
708 * This avoids taking mmap_sem for faulting on the user's address while the
709 * struct_mutex is held.
710 */
711static int
712i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
713 struct drm_i915_gem_pwrite *args,
714 struct drm_file *file_priv)
715{
716 struct drm_i915_gem_object *obj_priv = obj->driver_private;
717 struct mm_struct *mm = current->mm;
718 struct page **user_pages;
719 ssize_t remain;
720 loff_t offset, pinned_pages, i;
721 loff_t first_data_page, last_data_page, num_pages;
722 int shmem_page_index, shmem_page_offset;
723 int data_page_index, data_page_offset;
724 int page_length;
725 int ret;
726 uint64_t data_ptr = args->data_ptr;
727
728 remain = args->size;
729
730 /* Pin the user pages containing the data. We can't fault while
731 * holding the struct mutex, and all of the pwrite implementations
732 * want to hold it while dereferencing the user data.
733 */
734 first_data_page = data_ptr / PAGE_SIZE;
735 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
736 num_pages = last_data_page - first_data_page + 1;
737
738 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
739 if (user_pages == NULL)
740 return -ENOMEM;
741
742 down_read(&mm->mmap_sem);
743 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
744 num_pages, 0, 0, user_pages, NULL);
745 up_read(&mm->mmap_sem);
746 if (pinned_pages < num_pages) {
747 ret = -EFAULT;
748 goto fail_put_user_pages;
749 }
750
751 mutex_lock(&dev->struct_mutex);
752
753 ret = i915_gem_object_get_pages(obj);
754 if (ret != 0)
755 goto fail_unlock;
756
757 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
758 if (ret != 0)
759 goto fail_put_pages;
760
761 obj_priv = obj->driver_private;
335 offset = args->offset; 762 offset = args->offset;
763 obj_priv->dirty = 1;
336 764
337 written = vfs_write(obj->filp, 765 while (remain > 0) {
338 (char __user *)(uintptr_t) args->data_ptr, 766 /* Operation in this page
339 args->size, &offset); 767 *
340 if (written != args->size) { 768 * shmem_page_index = page number within shmem file
341 mutex_unlock(&dev->struct_mutex); 769 * shmem_page_offset = offset within page in shmem file
342 if (written < 0) 770 * data_page_index = page number in get_user_pages return
343 return written; 771 * data_page_offset = offset with data_page_index page.
344 else 772 * page_length = bytes to copy for this page
345 return -EINVAL; 773 */
774 shmem_page_index = offset / PAGE_SIZE;
775 shmem_page_offset = offset & ~PAGE_MASK;
776 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
777 data_page_offset = data_ptr & ~PAGE_MASK;
778
779 page_length = remain;
780 if ((shmem_page_offset + page_length) > PAGE_SIZE)
781 page_length = PAGE_SIZE - shmem_page_offset;
782 if ((data_page_offset + page_length) > PAGE_SIZE)
783 page_length = PAGE_SIZE - data_page_offset;
784
785 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
786 shmem_page_offset,
787 user_pages[data_page_index],
788 data_page_offset,
789 page_length);
790 if (ret)
791 goto fail_put_pages;
792
793 remain -= page_length;
794 data_ptr += page_length;
795 offset += page_length;
346 } 796 }
347 797
798fail_put_pages:
799 i915_gem_object_put_pages(obj);
800fail_unlock:
348 mutex_unlock(&dev->struct_mutex); 801 mutex_unlock(&dev->struct_mutex);
802fail_put_user_pages:
803 for (i = 0; i < pinned_pages; i++)
804 page_cache_release(user_pages[i]);
805 kfree(user_pages);
349 806
350 return 0; 807 return ret;
351} 808}
352 809
353/** 810/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
388 if (obj_priv->phys_obj) 845 if (obj_priv->phys_obj)
389 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 846 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
390 else if (obj_priv->tiling_mode == I915_TILING_NONE && 847 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
391 dev->gtt_total != 0) 848 dev->gtt_total != 0) {
392 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 849 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
393 else 850 if (ret == -EFAULT) {
394 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 851 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
852 file_priv);
853 }
854 } else {
855 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
856 if (ret == -EFAULT) {
857 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
858 file_priv);
859 }
860 }
395 861
396#if WATCH_PWRITE 862#if WATCH_PWRITE
397 if (ret) 863 if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
816} 1282}
817 1283
818static void 1284static void
819i915_gem_object_free_page_list(struct drm_gem_object *obj) 1285i915_gem_object_put_pages(struct drm_gem_object *obj)
820{ 1286{
821 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
822 int page_count = obj->size / PAGE_SIZE; 1288 int page_count = obj->size / PAGE_SIZE;
823 int i; 1289 int i;
824 1290
825 if (obj_priv->page_list == NULL) 1291 BUG_ON(obj_priv->pages_refcount == 0);
826 return;
827 1292
1293 if (--obj_priv->pages_refcount != 0)
1294 return;
828 1295
829 for (i = 0; i < page_count; i++) 1296 for (i = 0; i < page_count; i++)
830 if (obj_priv->page_list[i] != NULL) { 1297 if (obj_priv->pages[i] != NULL) {
831 if (obj_priv->dirty) 1298 if (obj_priv->dirty)
832 set_page_dirty(obj_priv->page_list[i]); 1299 set_page_dirty(obj_priv->pages[i]);
833 mark_page_accessed(obj_priv->page_list[i]); 1300 mark_page_accessed(obj_priv->pages[i]);
834 page_cache_release(obj_priv->page_list[i]); 1301 page_cache_release(obj_priv->pages[i]);
835 } 1302 }
836 obj_priv->dirty = 0; 1303 obj_priv->dirty = 0;
837 1304
838 drm_free(obj_priv->page_list, 1305 drm_free(obj_priv->pages,
839 page_count * sizeof(struct page *), 1306 page_count * sizeof(struct page *),
840 DRM_MEM_DRIVER); 1307 DRM_MEM_DRIVER);
841 obj_priv->page_list = NULL; 1308 obj_priv->pages = NULL;
842} 1309}
843 1310
844static void 1311static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1290 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1757 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1291 i915_gem_clear_fence_reg(obj); 1758 i915_gem_clear_fence_reg(obj);
1292 1759
1293 i915_gem_object_free_page_list(obj); 1760 i915_gem_object_put_pages(obj);
1294 1761
1295 if (obj_priv->gtt_space) { 1762 if (obj_priv->gtt_space) {
1296 atomic_dec(&dev->gtt_count); 1763 atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1409} 1876}
1410 1877
1411static int 1878static int
1412i915_gem_object_get_page_list(struct drm_gem_object *obj) 1879i915_gem_object_get_pages(struct drm_gem_object *obj)
1413{ 1880{
1414 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1881 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1415 int page_count, i; 1882 int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1418 struct page *page; 1885 struct page *page;
1419 int ret; 1886 int ret;
1420 1887
1421 if (obj_priv->page_list) 1888 if (obj_priv->pages_refcount++ != 0)
1422 return 0; 1889 return 0;
1423 1890
1424 /* Get the list of pages out of our struct file. They'll be pinned 1891 /* Get the list of pages out of our struct file. They'll be pinned
1425 * at this point until we release them. 1892 * at this point until we release them.
1426 */ 1893 */
1427 page_count = obj->size / PAGE_SIZE; 1894 page_count = obj->size / PAGE_SIZE;
1428 BUG_ON(obj_priv->page_list != NULL); 1895 BUG_ON(obj_priv->pages != NULL);
1429 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1896 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1430 DRM_MEM_DRIVER); 1897 DRM_MEM_DRIVER);
1431 if (obj_priv->page_list == NULL) { 1898 if (obj_priv->pages == NULL) {
1432 DRM_ERROR("Faled to allocate page list\n"); 1899 DRM_ERROR("Faled to allocate page list\n");
1900 obj_priv->pages_refcount--;
1433 return -ENOMEM; 1901 return -ENOMEM;
1434 } 1902 }
1435 1903
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1440 if (IS_ERR(page)) { 1908 if (IS_ERR(page)) {
1441 ret = PTR_ERR(page); 1909 ret = PTR_ERR(page);
1442 DRM_ERROR("read_mapping_page failed: %d\n", ret); 1910 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1443 i915_gem_object_free_page_list(obj); 1911 i915_gem_object_put_pages(obj);
1444 return ret; 1912 return ret;
1445 } 1913 }
1446 obj_priv->page_list[i] = page; 1914 obj_priv->pages[i] = page;
1447 } 1915 }
1448 return 0; 1916 return 0;
1449} 1917}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1766 DRM_INFO("Binding object of size %d at 0x%08x\n", 2234 DRM_INFO("Binding object of size %d at 0x%08x\n",
1767 obj->size, obj_priv->gtt_offset); 2235 obj->size, obj_priv->gtt_offset);
1768#endif 2236#endif
1769 ret = i915_gem_object_get_page_list(obj); 2237 ret = i915_gem_object_get_pages(obj);
1770 if (ret) { 2238 if (ret) {
1771 drm_mm_put_block(obj_priv->gtt_space); 2239 drm_mm_put_block(obj_priv->gtt_space);
1772 obj_priv->gtt_space = NULL; 2240 obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1778 * into the GTT. 2246 * into the GTT.
1779 */ 2247 */
1780 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2248 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1781 obj_priv->page_list, 2249 obj_priv->pages,
1782 page_count, 2250 page_count,
1783 obj_priv->gtt_offset, 2251 obj_priv->gtt_offset,
1784 obj_priv->agp_type); 2252 obj_priv->agp_type);
1785 if (obj_priv->agp_mem == NULL) { 2253 if (obj_priv->agp_mem == NULL) {
1786 i915_gem_object_free_page_list(obj); 2254 i915_gem_object_put_pages(obj);
1787 drm_mm_put_block(obj_priv->gtt_space); 2255 drm_mm_put_block(obj_priv->gtt_space);
1788 obj_priv->gtt_space = NULL; 2256 obj_priv->gtt_space = NULL;
1789 return -ENOMEM; 2257 return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1810 * to GPU, and we can ignore the cache flush because it'll happen 2278 * to GPU, and we can ignore the cache flush because it'll happen
1811 * again at bind time. 2279 * again at bind time.
1812 */ 2280 */
1813 if (obj_priv->page_list == NULL) 2281 if (obj_priv->pages == NULL)
1814 return; 2282 return;
1815 2283
1816 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 2284 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1817} 2285}
1818 2286
1819/** Flushes any GPU write domain for the object if it's dirty. */ 2287/** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1913static int 2381static int
1914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2382i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1915{ 2383{
1916 struct drm_device *dev = obj->dev;
1917 int ret; 2384 int ret;
1918 2385
1919 i915_gem_object_flush_gpu_write_domain(obj); 2386 i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1932 /* Flush the CPU cache if it's still invalid. */ 2399 /* Flush the CPU cache if it's still invalid. */
1933 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2400 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1934 i915_gem_clflush_object(obj); 2401 i915_gem_clflush_object(obj);
1935 drm_agp_chipset_flush(dev);
1936 2402
1937 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2403 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1938 } 2404 }
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2144static void 2610static void
2145i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 2611i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2146{ 2612{
2147 struct drm_device *dev = obj->dev;
2148 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2613 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2149 2614
2150 if (!obj_priv->page_cpu_valid) 2615 if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2158 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 2623 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2159 if (obj_priv->page_cpu_valid[i]) 2624 if (obj_priv->page_cpu_valid[i])
2160 continue; 2625 continue;
2161 drm_clflush_pages(obj_priv->page_list + i, 1); 2626 drm_clflush_pages(obj_priv->pages + i, 1);
2162 } 2627 }
2163 drm_agp_chipset_flush(dev);
2164 } 2628 }
2165 2629
2166 /* Free the page_cpu_valid mappings which are now stale, whether 2630 /* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2224 if (obj_priv->page_cpu_valid[i]) 2688 if (obj_priv->page_cpu_valid[i])
2225 continue; 2689 continue;
2226 2690
2227 drm_clflush_pages(obj_priv->page_list + i, 1); 2691 drm_clflush_pages(obj_priv->pages + i, 1);
2228 2692
2229 obj_priv->page_cpu_valid[i] = 1; 2693 obj_priv->page_cpu_valid[i] = 1;
2230 } 2694 }
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2245static int 2709static int
2246i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 2710i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2247 struct drm_file *file_priv, 2711 struct drm_file *file_priv,
2248 struct drm_i915_gem_exec_object *entry) 2712 struct drm_i915_gem_exec_object *entry,
2713 struct drm_i915_gem_relocation_entry *relocs)
2249{ 2714{
2250 struct drm_device *dev = obj->dev; 2715 struct drm_device *dev = obj->dev;
2251 drm_i915_private_t *dev_priv = dev->dev_private; 2716 drm_i915_private_t *dev_priv = dev->dev_private;
2252 struct drm_i915_gem_relocation_entry reloc;
2253 struct drm_i915_gem_relocation_entry __user *relocs;
2254 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2717 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2255 int i, ret; 2718 int i, ret;
2256 void __iomem *reloc_page; 2719 void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2262 2725
2263 entry->offset = obj_priv->gtt_offset; 2726 entry->offset = obj_priv->gtt_offset;
2264 2727
2265 relocs = (struct drm_i915_gem_relocation_entry __user *)
2266 (uintptr_t) entry->relocs_ptr;
2267 /* Apply the relocations, using the GTT aperture to avoid cache 2728 /* Apply the relocations, using the GTT aperture to avoid cache
2268 * flushing requirements. 2729 * flushing requirements.
2269 */ 2730 */
2270 for (i = 0; i < entry->relocation_count; i++) { 2731 for (i = 0; i < entry->relocation_count; i++) {
2732 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2271 struct drm_gem_object *target_obj; 2733 struct drm_gem_object *target_obj;
2272 struct drm_i915_gem_object *target_obj_priv; 2734 struct drm_i915_gem_object *target_obj_priv;
2273 uint32_t reloc_val, reloc_offset; 2735 uint32_t reloc_val, reloc_offset;
2274 uint32_t __iomem *reloc_entry; 2736 uint32_t __iomem *reloc_entry;
2275 2737
2276 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2277 if (ret != 0) {
2278 i915_gem_object_unpin(obj);
2279 return ret;
2280 }
2281
2282 target_obj = drm_gem_object_lookup(obj->dev, file_priv, 2738 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2283 reloc.target_handle); 2739 reloc->target_handle);
2284 if (target_obj == NULL) { 2740 if (target_obj == NULL) {
2285 i915_gem_object_unpin(obj); 2741 i915_gem_object_unpin(obj);
2286 return -EBADF; 2742 return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2292 */ 2748 */
2293 if (target_obj_priv->gtt_space == NULL) { 2749 if (target_obj_priv->gtt_space == NULL) {
2294 DRM_ERROR("No GTT space found for object %d\n", 2750 DRM_ERROR("No GTT space found for object %d\n",
2295 reloc.target_handle); 2751 reloc->target_handle);
2296 drm_gem_object_unreference(target_obj); 2752 drm_gem_object_unreference(target_obj);
2297 i915_gem_object_unpin(obj); 2753 i915_gem_object_unpin(obj);
2298 return -EINVAL; 2754 return -EINVAL;
2299 } 2755 }
2300 2756
2301 if (reloc.offset > obj->size - 4) { 2757 if (reloc->offset > obj->size - 4) {
2302 DRM_ERROR("Relocation beyond object bounds: " 2758 DRM_ERROR("Relocation beyond object bounds: "
2303 "obj %p target %d offset %d size %d.\n", 2759 "obj %p target %d offset %d size %d.\n",
2304 obj, reloc.target_handle, 2760 obj, reloc->target_handle,
2305 (int) reloc.offset, (int) obj->size); 2761 (int) reloc->offset, (int) obj->size);
2306 drm_gem_object_unreference(target_obj); 2762 drm_gem_object_unreference(target_obj);
2307 i915_gem_object_unpin(obj); 2763 i915_gem_object_unpin(obj);
2308 return -EINVAL; 2764 return -EINVAL;
2309 } 2765 }
2310 if (reloc.offset & 3) { 2766 if (reloc->offset & 3) {
2311 DRM_ERROR("Relocation not 4-byte aligned: " 2767 DRM_ERROR("Relocation not 4-byte aligned: "
2312 "obj %p target %d offset %d.\n", 2768 "obj %p target %d offset %d.\n",
2313 obj, reloc.target_handle, 2769 obj, reloc->target_handle,
2314 (int) reloc.offset); 2770 (int) reloc->offset);
2315 drm_gem_object_unreference(target_obj); 2771 drm_gem_object_unreference(target_obj);
2316 i915_gem_object_unpin(obj); 2772 i915_gem_object_unpin(obj);
2317 return -EINVAL; 2773 return -EINVAL;
2318 } 2774 }
2319 2775
2320 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 2776 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2321 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 2777 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2322 DRM_ERROR("reloc with read/write CPU domains: " 2778 DRM_ERROR("reloc with read/write CPU domains: "
2323 "obj %p target %d offset %d " 2779 "obj %p target %d offset %d "
2324 "read %08x write %08x", 2780 "read %08x write %08x",
2325 obj, reloc.target_handle, 2781 obj, reloc->target_handle,
2326 (int) reloc.offset, 2782 (int) reloc->offset,
2327 reloc.read_domains, 2783 reloc->read_domains,
2328 reloc.write_domain); 2784 reloc->write_domain);
2329 drm_gem_object_unreference(target_obj); 2785 drm_gem_object_unreference(target_obj);
2330 i915_gem_object_unpin(obj); 2786 i915_gem_object_unpin(obj);
2331 return -EINVAL; 2787 return -EINVAL;
2332 } 2788 }
2333 2789
2334 if (reloc.write_domain && target_obj->pending_write_domain && 2790 if (reloc->write_domain && target_obj->pending_write_domain &&
2335 reloc.write_domain != target_obj->pending_write_domain) { 2791 reloc->write_domain != target_obj->pending_write_domain) {
2336 DRM_ERROR("Write domain conflict: " 2792 DRM_ERROR("Write domain conflict: "
2337 "obj %p target %d offset %d " 2793 "obj %p target %d offset %d "
2338 "new %08x old %08x\n", 2794 "new %08x old %08x\n",
2339 obj, reloc.target_handle, 2795 obj, reloc->target_handle,
2340 (int) reloc.offset, 2796 (int) reloc->offset,
2341 reloc.write_domain, 2797 reloc->write_domain,
2342 target_obj->pending_write_domain); 2798 target_obj->pending_write_domain);
2343 drm_gem_object_unreference(target_obj); 2799 drm_gem_object_unreference(target_obj);
2344 i915_gem_object_unpin(obj); 2800 i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2351 "presumed %08x delta %08x\n", 2807 "presumed %08x delta %08x\n",
2352 __func__, 2808 __func__,
2353 obj, 2809 obj,
2354 (int) reloc.offset, 2810 (int) reloc->offset,
2355 (int) reloc.target_handle, 2811 (int) reloc->target_handle,
2356 (int) reloc.read_domains, 2812 (int) reloc->read_domains,
2357 (int) reloc.write_domain, 2813 (int) reloc->write_domain,
2358 (int) target_obj_priv->gtt_offset, 2814 (int) target_obj_priv->gtt_offset,
2359 (int) reloc.presumed_offset, 2815 (int) reloc->presumed_offset,
2360 reloc.delta); 2816 reloc->delta);
2361#endif 2817#endif
2362 2818
2363 target_obj->pending_read_domains |= reloc.read_domains; 2819 target_obj->pending_read_domains |= reloc->read_domains;
2364 target_obj->pending_write_domain |= reloc.write_domain; 2820 target_obj->pending_write_domain |= reloc->write_domain;
2365 2821
2366 /* If the relocation already has the right value in it, no 2822 /* If the relocation already has the right value in it, no
2367 * more work needs to be done. 2823 * more work needs to be done.
2368 */ 2824 */
2369 if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 2825 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2370 drm_gem_object_unreference(target_obj); 2826 drm_gem_object_unreference(target_obj);
2371 continue; 2827 continue;
2372 } 2828 }
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2381 /* Map the page containing the relocation we're going to 2837 /* Map the page containing the relocation we're going to
2382 * perform. 2838 * perform.
2383 */ 2839 */
2384 reloc_offset = obj_priv->gtt_offset + reloc.offset; 2840 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2385 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 2841 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2386 (reloc_offset & 2842 (reloc_offset &
2387 ~(PAGE_SIZE - 1))); 2843 ~(PAGE_SIZE - 1)));
2388 reloc_entry = (uint32_t __iomem *)(reloc_page + 2844 reloc_entry = (uint32_t __iomem *)(reloc_page +
2389 (reloc_offset & (PAGE_SIZE - 1))); 2845 (reloc_offset & (PAGE_SIZE - 1)));
2390 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 2846 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2391 2847
2392#if WATCH_BUF 2848#if WATCH_BUF
2393 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 2849 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2394 obj, (unsigned int) reloc.offset, 2850 obj, (unsigned int) reloc->offset,
2395 readl(reloc_entry), reloc_val); 2851 readl(reloc_entry), reloc_val);
2396#endif 2852#endif
2397 writel(reloc_val, reloc_entry); 2853 writel(reloc_val, reloc_entry);
2398 io_mapping_unmap_atomic(reloc_page); 2854 io_mapping_unmap_atomic(reloc_page);
2399 2855
2400 /* Write the updated presumed offset for this entry back out 2856 /* The updated presumed offset for this entry will be
2401 * to the user. 2857 * copied back out to the user.
2402 */ 2858 */
2403 reloc.presumed_offset = target_obj_priv->gtt_offset; 2859 reloc->presumed_offset = target_obj_priv->gtt_offset;
2404 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2405 if (ret != 0) {
2406 drm_gem_object_unreference(target_obj);
2407 i915_gem_object_unpin(obj);
2408 return ret;
2409 }
2410 2860
2411 drm_gem_object_unreference(target_obj); 2861 drm_gem_object_unreference(target_obj);
2412 } 2862 }
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2423static int 2873static int
2424i915_dispatch_gem_execbuffer(struct drm_device *dev, 2874i915_dispatch_gem_execbuffer(struct drm_device *dev,
2425 struct drm_i915_gem_execbuffer *exec, 2875 struct drm_i915_gem_execbuffer *exec,
2876 struct drm_clip_rect *cliprects,
2426 uint64_t exec_offset) 2877 uint64_t exec_offset)
2427{ 2878{
2428 drm_i915_private_t *dev_priv = dev->dev_private; 2879 drm_i915_private_t *dev_priv = dev->dev_private;
2429 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2430 (uintptr_t) exec->cliprects_ptr;
2431 int nbox = exec->num_cliprects; 2880 int nbox = exec->num_cliprects;
2432 int i = 0, count; 2881 int i = 0, count;
2433 uint32_t exec_start, exec_len; 2882 uint32_t exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
2448 2897
2449 for (i = 0; i < count; i++) { 2898 for (i = 0; i < count; i++) {
2450 if (i < nbox) { 2899 if (i < nbox) {
2451 int ret = i915_emit_box(dev, boxes, i, 2900 int ret = i915_emit_box(dev, cliprects, i,
2452 exec->DR1, exec->DR4); 2901 exec->DR1, exec->DR4);
2453 if (ret) 2902 if (ret)
2454 return ret; 2903 return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2504 return ret; 2953 return ret;
2505} 2954}
2506 2955
2956static int
2957i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
2958 uint32_t buffer_count,
2959 struct drm_i915_gem_relocation_entry **relocs)
2960{
2961 uint32_t reloc_count = 0, reloc_index = 0, i;
2962 int ret;
2963
2964 *relocs = NULL;
2965 for (i = 0; i < buffer_count; i++) {
2966 if (reloc_count + exec_list[i].relocation_count < reloc_count)
2967 return -EINVAL;
2968 reloc_count += exec_list[i].relocation_count;
2969 }
2970
2971 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
2972 if (*relocs == NULL)
2973 return -ENOMEM;
2974
2975 for (i = 0; i < buffer_count; i++) {
2976 struct drm_i915_gem_relocation_entry __user *user_relocs;
2977
2978 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
2979
2980 ret = copy_from_user(&(*relocs)[reloc_index],
2981 user_relocs,
2982 exec_list[i].relocation_count *
2983 sizeof(**relocs));
2984 if (ret != 0) {
2985 drm_free(*relocs, reloc_count * sizeof(**relocs),
2986 DRM_MEM_DRIVER);
2987 *relocs = NULL;
2988 return ret;
2989 }
2990
2991 reloc_index += exec_list[i].relocation_count;
2992 }
2993
2994 return ret;
2995}
2996
2997static int
2998i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
2999 uint32_t buffer_count,
3000 struct drm_i915_gem_relocation_entry *relocs)
3001{
3002 uint32_t reloc_count = 0, i;
3003 int ret;
3004
3005 for (i = 0; i < buffer_count; i++) {
3006 struct drm_i915_gem_relocation_entry __user *user_relocs;
3007
3008 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3009
3010 if (ret == 0) {
3011 ret = copy_to_user(user_relocs,
3012 &relocs[reloc_count],
3013 exec_list[i].relocation_count *
3014 sizeof(*relocs));
3015 }
3016
3017 reloc_count += exec_list[i].relocation_count;
3018 }
3019
3020 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3021
3022 return ret;
3023}
3024
2507int 3025int
2508i915_gem_execbuffer(struct drm_device *dev, void *data, 3026i915_gem_execbuffer(struct drm_device *dev, void *data,
2509 struct drm_file *file_priv) 3027 struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2515 struct drm_gem_object **object_list = NULL; 3033 struct drm_gem_object **object_list = NULL;
2516 struct drm_gem_object *batch_obj; 3034 struct drm_gem_object *batch_obj;
2517 struct drm_i915_gem_object *obj_priv; 3035 struct drm_i915_gem_object *obj_priv;
2518 int ret, i, pinned = 0; 3036 struct drm_clip_rect *cliprects = NULL;
3037 struct drm_i915_gem_relocation_entry *relocs;
3038 int ret, ret2, i, pinned = 0;
2519 uint64_t exec_offset; 3039 uint64_t exec_offset;
2520 uint32_t seqno, flush_domains; 3040 uint32_t seqno, flush_domains, reloc_index;
2521 int pin_tries; 3041 int pin_tries;
2522 3042
2523#if WATCH_EXEC 3043#if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2551 goto pre_mutex_err; 3071 goto pre_mutex_err;
2552 } 3072 }
2553 3073
3074 if (args->num_cliprects != 0) {
3075 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3076 DRM_MEM_DRIVER);
3077 if (cliprects == NULL)
3078 goto pre_mutex_err;
3079
3080 ret = copy_from_user(cliprects,
3081 (struct drm_clip_rect __user *)
3082 (uintptr_t) args->cliprects_ptr,
3083 sizeof(*cliprects) * args->num_cliprects);
3084 if (ret != 0) {
3085 DRM_ERROR("copy %d cliprects failed: %d\n",
3086 args->num_cliprects, ret);
3087 goto pre_mutex_err;
3088 }
3089 }
3090
3091 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3092 &relocs);
3093 if (ret != 0)
3094 goto pre_mutex_err;
3095
2554 mutex_lock(&dev->struct_mutex); 3096 mutex_lock(&dev->struct_mutex);
2555 3097
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 3098 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2593 /* Pin and relocate */ 3135 /* Pin and relocate */
2594 for (pin_tries = 0; ; pin_tries++) { 3136 for (pin_tries = 0; ; pin_tries++) {
2595 ret = 0; 3137 ret = 0;
3138 reloc_index = 0;
3139
2596 for (i = 0; i < args->buffer_count; i++) { 3140 for (i = 0; i < args->buffer_count; i++) {
2597 object_list[i]->pending_read_domains = 0; 3141 object_list[i]->pending_read_domains = 0;
2598 object_list[i]->pending_write_domain = 0; 3142 object_list[i]->pending_write_domain = 0;
2599 ret = i915_gem_object_pin_and_relocate(object_list[i], 3143 ret = i915_gem_object_pin_and_relocate(object_list[i],
2600 file_priv, 3144 file_priv,
2601 &exec_list[i]); 3145 &exec_list[i],
3146 &relocs[reloc_index]);
2602 if (ret) 3147 if (ret)
2603 break; 3148 break;
2604 pinned = i + 1; 3149 pinned = i + 1;
3150 reloc_index += exec_list[i].relocation_count;
2605 } 3151 }
2606 /* success */ 3152 /* success */
2607 if (ret == 0) 3153 if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2687#endif 3233#endif
2688 3234
2689 /* Exec the batchbuffer */ 3235 /* Exec the batchbuffer */
2690 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 3236 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
2691 if (ret) { 3237 if (ret) {
2692 DRM_ERROR("dispatch failed %d\n", ret); 3238 DRM_ERROR("dispatch failed %d\n", ret);
2693 goto err; 3239 goto err;
@@ -2751,11 +3297,27 @@ err:
2751 args->buffer_count, ret); 3297 args->buffer_count, ret);
2752 } 3298 }
2753 3299
3300 /* Copy the updated relocations out regardless of current error
3301 * state. Failure to update the relocs would mean that the next
3302 * time userland calls execbuf, it would do so with presumed offset
3303 * state that didn't match the actual object state.
3304 */
3305 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3306 relocs);
3307 if (ret2 != 0) {
3308 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3309
3310 if (ret == 0)
3311 ret = ret2;
3312 }
3313
2754pre_mutex_err: 3314pre_mutex_err:
2755 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3315 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2756 DRM_MEM_DRIVER); 3316 DRM_MEM_DRIVER);
2757 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 3317 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2758 DRM_MEM_DRIVER); 3318 DRM_MEM_DRIVER);
3319 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3320 DRM_MEM_DRIVER);
2759 3321
2760 return ret; 3322 return ret;
2761} 3323}
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
3192 3754
3193 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 3755 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3194 3756
3195 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 3757 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3196 if (dev_priv->hw_status_page == NULL) { 3758 if (dev_priv->hw_status_page == NULL) {
3197 DRM_ERROR("Failed to map status page.\n"); 3759 DRM_ERROR("Failed to map status page.\n");
3198 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3760 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
3222 obj = dev_priv->hws_obj; 3784 obj = dev_priv->hws_obj;
3223 obj_priv = obj->driver_private; 3785 obj_priv = obj->driver_private;
3224 3786
3225 kunmap(obj_priv->page_list[0]); 3787 kunmap(obj_priv->pages[0]);
3226 i915_gem_object_unpin(obj); 3788 i915_gem_object_unpin(obj);
3227 drm_gem_object_unreference(obj); 3789 drm_gem_object_unreference(obj);
3228 dev_priv->hws_obj = NULL; 3790 dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3525 if (!obj_priv->phys_obj) 4087 if (!obj_priv->phys_obj)
3526 return; 4088 return;
3527 4089
3528 ret = i915_gem_object_get_page_list(obj); 4090 ret = i915_gem_object_get_pages(obj);
3529 if (ret) 4091 if (ret)
3530 goto out; 4092 goto out;
3531 4093
3532 page_count = obj->size / PAGE_SIZE; 4094 page_count = obj->size / PAGE_SIZE;
3533 4095
3534 for (i = 0; i < page_count; i++) { 4096 for (i = 0; i < page_count; i++) {
3535 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4097 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3536 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4098 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3537 4099
3538 memcpy(dst, src, PAGE_SIZE); 4100 memcpy(dst, src, PAGE_SIZE);
3539 kunmap_atomic(dst, KM_USER0); 4101 kunmap_atomic(dst, KM_USER0);
3540 } 4102 }
3541 drm_clflush_pages(obj_priv->page_list, page_count); 4103 drm_clflush_pages(obj_priv->pages, page_count);
3542 drm_agp_chipset_flush(dev); 4104 drm_agp_chipset_flush(dev);
3543out: 4105out:
3544 obj_priv->phys_obj->cur_obj = NULL; 4106 obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3581 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4143 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3582 obj_priv->phys_obj->cur_obj = obj; 4144 obj_priv->phys_obj->cur_obj = obj;
3583 4145
3584 ret = i915_gem_object_get_page_list(obj); 4146 ret = i915_gem_object_get_pages(obj);
3585 if (ret) { 4147 if (ret) {
3586 DRM_ERROR("failed to get page list\n"); 4148 DRM_ERROR("failed to get page list\n");
3587 goto out; 4149 goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3590 page_count = obj->size / PAGE_SIZE; 4152 page_count = obj->size / PAGE_SIZE;
3591 4153
3592 for (i = 0; i < page_count; i++) { 4154 for (i = 0; i < page_count; i++) {
3593 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4155 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3594 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4156 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3595 4157
3596 memcpy(dst, src, PAGE_SIZE); 4158 memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 000000000000..455ec970b385
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#define DRM_I915_RING_DEBUG 1
36
37
38#if defined(CONFIG_DEBUG_FS)
39
40#define ACTIVE_LIST 1
41#define FLUSHING_LIST 2
42#define INACTIVE_LIST 3
43
44static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
45{
46 if (obj_priv->user_pin_count > 0)
47 return "P";
48 else if (obj_priv->pin_count > 0)
49 return "p";
50 else
51 return " ";
52}
53
54static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
55{
56 switch (obj_priv->tiling_mode) {
57 default:
58 case I915_TILING_NONE: return " ";
59 case I915_TILING_X: return "X";
60 case I915_TILING_Y: return "Y";
61 }
62}
63
64static int i915_gem_object_list_info(struct seq_file *m, void *data)
65{
66 struct drm_info_node *node = (struct drm_info_node *) m->private;
67 uintptr_t list = (uintptr_t) node->info_ent->data;
68 struct list_head *head;
69 struct drm_device *dev = node->minor->dev;
70 drm_i915_private_t *dev_priv = dev->dev_private;
71 struct drm_i915_gem_object *obj_priv;
72
73 switch (list) {
74 case ACTIVE_LIST:
75 seq_printf(m, "Active:\n");
76 head = &dev_priv->mm.active_list;
77 break;
78 case INACTIVE_LIST:
79 seq_printf(m, "Inctive:\n");
80 head = &dev_priv->mm.inactive_list;
81 break;
82 case FLUSHING_LIST:
83 seq_printf(m, "Flushing:\n");
84 head = &dev_priv->mm.flushing_list;
85 break;
86 default:
87 DRM_INFO("Ooops, unexpected list\n");
88 return 0;
89 }
90
91 list_for_each_entry(obj_priv, head, list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94
95 seq_printf(m, " %p: %s %08x %08x %d",
96 obj,
97 get_pin_flag(obj_priv),
98 obj->read_domains, obj->write_domain,
99 obj_priv->last_rendering_seqno);
100
101 if (obj->name)
102 seq_printf(m, " (name: %d)", obj->name);
103 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
104 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
105 seq_printf(m, "\n");
106 }
107 return 0;
108}
109
110static int i915_gem_request_info(struct seq_file *m, void *data)
111{
112 struct drm_info_node *node = (struct drm_info_node *) m->private;
113 struct drm_device *dev = node->minor->dev;
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 struct drm_i915_gem_request *gem_request;
116
117 seq_printf(m, "Request:\n");
118 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
119 seq_printf(m, " %d @ %d\n",
120 gem_request->seqno,
121 (int) (jiffies - gem_request->emitted_jiffies));
122 }
123 return 0;
124}
125
126static int i915_gem_seqno_info(struct seq_file *m, void *data)
127{
128 struct drm_info_node *node = (struct drm_info_node *) m->private;
129 struct drm_device *dev = node->minor->dev;
130 drm_i915_private_t *dev_priv = dev->dev_private;
131
132 if (dev_priv->hw_status_page != NULL) {
133 seq_printf(m, "Current sequence: %d\n",
134 i915_get_gem_seqno(dev));
135 } else {
136 seq_printf(m, "Current sequence: hws uninitialized\n");
137 }
138 seq_printf(m, "Waiter sequence: %d\n",
139 dev_priv->mm.waiting_gem_seqno);
140 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
141 return 0;
142}
143
144
145static int i915_interrupt_info(struct seq_file *m, void *data)
146{
147 struct drm_info_node *node = (struct drm_info_node *) m->private;
148 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private;
150
151 seq_printf(m, "Interrupt enable: %08x\n",
152 I915_READ(IER));
153 seq_printf(m, "Interrupt identity: %08x\n",
154 I915_READ(IIR));
155 seq_printf(m, "Interrupt mask: %08x\n",
156 I915_READ(IMR));
157 seq_printf(m, "Pipe A stat: %08x\n",
158 I915_READ(PIPEASTAT));
159 seq_printf(m, "Pipe B stat: %08x\n",
160 I915_READ(PIPEBSTAT));
161 seq_printf(m, "Interrupts received: %d\n",
162 atomic_read(&dev_priv->irq_received));
163 if (dev_priv->hw_status_page != NULL) {
164 seq_printf(m, "Current sequence: %d\n",
165 i915_get_gem_seqno(dev));
166 } else {
167 seq_printf(m, "Current sequence: hws uninitialized\n");
168 }
169 seq_printf(m, "Waiter sequence: %d\n",
170 dev_priv->mm.waiting_gem_seqno);
171 seq_printf(m, "IRQ sequence: %d\n",
172 dev_priv->mm.irq_gem_seqno);
173 return 0;
174}
175
176static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
177{
178 struct drm_info_node *node = (struct drm_info_node *) m->private;
179 struct drm_device *dev = node->minor->dev;
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int i;
182
183 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
184 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
185 for (i = 0; i < dev_priv->num_fence_regs; i++) {
186 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
187
188 if (obj == NULL) {
189 seq_printf(m, "Fenced object[%2d] = unused\n", i);
190 } else {
191 struct drm_i915_gem_object *obj_priv;
192
193 obj_priv = obj->driver_private;
194 seq_printf(m, "Fenced object[%2d] = %p: %s "
195 "%08x %08zx %08x %s %08x %08x %d",
196 i, obj, get_pin_flag(obj_priv),
197 obj_priv->gtt_offset,
198 obj->size, obj_priv->stride,
199 get_tiling_flag(obj_priv),
200 obj->read_domains, obj->write_domain,
201 obj_priv->last_rendering_seqno);
202 if (obj->name)
203 seq_printf(m, " (name: %d)", obj->name);
204 seq_printf(m, "\n");
205 }
206 }
207
208 return 0;
209}
210
211static int i915_hws_info(struct seq_file *m, void *data)
212{
213 struct drm_info_node *node = (struct drm_info_node *) m->private;
214 struct drm_device *dev = node->minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int i;
217 volatile u32 *hws;
218
219 hws = (volatile u32 *)dev_priv->hw_status_page;
220 if (hws == NULL)
221 return 0;
222
223 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
224 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
225 i * 4,
226 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
227 }
228 return 0;
229}
230
231static struct drm_info_list i915_gem_debugfs_list[] = {
232 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
233 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
234 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
235 {"i915_gem_request", i915_gem_request_info, 0},
236 {"i915_gem_seqno", i915_gem_seqno_info, 0},
237 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
238 {"i915_gem_interrupt", i915_interrupt_info, 0},
239 {"i915_gem_hws", i915_hws_info, 0},
240};
241#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
242
243int i915_gem_debugfs_init(struct drm_minor *minor)
244{
245 return drm_debugfs_create_files(i915_gem_debugfs_list,
246 I915_GEM_DEBUGFS_ENTRIES,
247 minor->debugfs_root, minor);
248}
249
250void i915_gem_debugfs_cleanup(struct drm_minor *minor)
251{
252 drm_debugfs_remove_files(i915_gem_debugfs_list,
253 I915_GEM_DEBUGFS_ENTRIES, minor);
254}
255
256#endif /* CONFIG_DEBUG_FS */
257
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de0cd8b..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies));
172 }
173 if (len > request + offset)
174 return request;
175 *eof = 1;
176 return len - offset;
177}
178
179static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
180 int request, int *eof, void *data)
181{
182 struct drm_minor *minor = (struct drm_minor *) data;
183 struct drm_device *dev = minor->dev;
184 drm_i915_private_t *dev_priv = dev->dev_private;
185 int len = 0;
186
187 if (offset > DRM_PROC_LIMIT) {
188 *eof = 1;
189 return 0;
190 }
191
192 *start = &buf[offset];
193 *eof = 0;
194 if (dev_priv->hw_status_page != NULL) {
195 DRM_PROC_PRINT("Current sequence: %d\n",
196 i915_get_gem_seqno(dev));
197 } else {
198 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
199 }
200 DRM_PROC_PRINT("Waiter sequence: %d\n",
201 dev_priv->mm.waiting_gem_seqno);
202 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
203 if (len > request + offset)
204 return request;
205 *eof = 1;
206 return len - offset;
207}
208
209
210static int i915_interrupt_info(char *buf, char **start, off_t offset,
211 int request, int *eof, void *data)
212{
213 struct drm_minor *minor = (struct drm_minor *) data;
214 struct drm_device *dev = minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int len = 0;
217
218 if (offset > DRM_PROC_LIMIT) {
219 *eof = 1;
220 return 0;
221 }
222
223 *start = &buf[offset];
224 *eof = 0;
225 DRM_PROC_PRINT("Interrupt enable: %08x\n",
226 I915_READ(IER));
227 DRM_PROC_PRINT("Interrupt identity: %08x\n",
228 I915_READ(IIR));
229 DRM_PROC_PRINT("Interrupt mask: %08x\n",
230 I915_READ(IMR));
231 DRM_PROC_PRINT("Pipe A stat: %08x\n",
232 I915_READ(PIPEASTAT));
233 DRM_PROC_PRINT("Pipe B stat: %08x\n",
234 I915_READ(PIPEBSTAT));
235 DRM_PROC_PRINT("Interrupts received: %d\n",
236 atomic_read(&dev_priv->irq_received));
237 if (dev_priv->hw_status_page != NULL) {
238 DRM_PROC_PRINT("Current sequence: %d\n",
239 i915_get_gem_seqno(dev));
240 } else {
241 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
242 }
243 DRM_PROC_PRINT("Waiter sequence: %d\n",
244 dev_priv->mm.waiting_gem_seqno);
245 DRM_PROC_PRINT("IRQ sequence: %d\n",
246 dev_priv->mm.irq_gem_seqno);
247 if (len > request + offset)
248 return request;
249 *eof = 1;
250 return len - offset;
251}
252
253static int i915_hws_info(char *buf, char **start, off_t offset,
254 int request, int *eof, void *data)
255{
256 struct drm_minor *minor = (struct drm_minor *) data;
257 struct drm_device *dev = minor->dev;
258 drm_i915_private_t *dev_priv = dev->dev_private;
259 int len = 0, i;
260 volatile u32 *hws;
261
262 if (offset > DRM_PROC_LIMIT) {
263 *eof = 1;
264 return 0;
265 }
266
267 hws = (volatile u32 *)dev_priv->hw_status_page;
268 if (hws == NULL) {
269 *eof = 1;
270 return 0;
271 }
272
273 *start = &buf[offset];
274 *eof = 0;
275 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
276 DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
277 i * 4,
278 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
279 }
280 if (len > request + offset)
281 return request;
282 *eof = 1;
283 return len - offset;
284}
285
286static struct drm_proc_list {
287 /** file name */
288 const char *name;
289 /** proc callback*/
290 int (*f) (char *, char **, off_t, int, int *, void *);
291} i915_gem_proc_list[] = {
292 {"i915_gem_active", i915_gem_active_info},
293 {"i915_gem_flushing", i915_gem_flushing_info},
294 {"i915_gem_inactive", i915_gem_inactive_info},
295 {"i915_gem_request", i915_gem_request_info},
296 {"i915_gem_seqno", i915_gem_seqno_info},
297 {"i915_gem_interrupt", i915_interrupt_info},
298 {"i915_gem_hws", i915_hws_info},
299};
300
301#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
302
303int i915_gem_proc_init(struct drm_minor *minor)
304{
305 struct proc_dir_entry *ent;
306 int i, j;
307
308 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
309 ent = create_proc_entry(i915_gem_proc_list[i].name,
310 S_IFREG | S_IRUGO, minor->dev_root);
311 if (!ent) {
312 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
313 i915_gem_proc_list[i].name);
314 for (j = 0; j < i; j++)
315 remove_proc_entry(i915_gem_proc_list[i].name,
316 minor->dev_root);
317 return -1;
318 }
319 ent->read_proc = i915_gem_proc_list[i].f;
320 ent->data = minor;
321 }
322 return 0;
323}
324
325void i915_gem_proc_cleanup(struct drm_minor *minor)
326{
327 int i;
328
329 if (!minor->dev_root)
330 return;
331
332 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
333 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
334}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191ef934..4cce1aef438e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
96 */ 96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || 99 } else if (IS_MOBILE(dev)) {
100 IS_GM45(dev)) {
101 uint32_t dcc; 100 uint32_t dcc;
102 101
103 /* On 915-945 and GM965, channel interleave by the CPU is 102 /* On mobile 9xx chipsets, channel interleave by the CPU is
104 * determined by DCC. The CPU will alternate based on bit 6 103 * determined by DCC. For single-channel, neither the CPU
105 * in interleaved mode, and the GPU will then also alternate 104 * nor the GPU do swizzling. For dual channel interleaved,
106 * on bit 6, 9, and 10 for X, but the CPU may also optionally 105 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
107 * alternate based on bit 17 (XOR not disabled and XOR 106 * 9 for Y tiled. The CPU's interleave is independent, and
108 * bit == 17). 107 * can be based on either bit 11 (haven't seen this yet) or
108 * bit 17 (common).
109 */ 109 */
110 dcc = I915_READ(DCC); 110 dcc = I915_READ(DCC);
111 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 111 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
115 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
116 break; 116 break;
117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
118 if (IS_I915G(dev) || IS_I915GM(dev) || 118 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 /* This is the base swizzling by the GPU for
120 * tiled buffers.
121 */
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 122 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 123 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) && 124 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 125 /* Bit 11 swizzling by the CPU in addition. */
124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
126 */
127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
128 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
129 } else { 128 } else {
130 /* Bit 17 or perhaps other swizzling */ 129 /* Bit 17 swizzling by the CPU in addition. */
131 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
132 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
133 } 132 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d899413..377cc588f5e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
362#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
362 363
363#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 364#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
364#define I915_CRC_ERROR_ENABLE (1UL<<29) 365#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
435 */ 436 */
436#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 437#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
437#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 438#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
439#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
438/* i830, required in DVO non-gang */ 440/* i830, required in DVO non-gang */
439#define PLL_P2_DIVIDE_BY_4 (1 << 23) 441#define PLL_P2_DIVIDE_BY_4 (1 << 23)
440#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 442#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
501#define FPB0 0x06048 503#define FPB0 0x06048
502#define FPB1 0x0604c 504#define FPB1 0x0604c
503#define FP_N_DIV_MASK 0x003f0000 505#define FP_N_DIV_MASK 0x003f0000
506#define FP_N_IGD_DIV_MASK 0x00ff0000
504#define FP_N_DIV_SHIFT 16 507#define FP_N_DIV_SHIFT 16
505#define FP_M1_DIV_MASK 0x00003f00 508#define FP_M1_DIV_MASK 0x00003f00
506#define FP_M1_DIV_SHIFT 8 509#define FP_M1_DIV_SHIFT 8
507#define FP_M2_DIV_MASK 0x0000003f 510#define FP_M2_DIV_MASK 0x0000003f
511#define FP_M2_IGD_DIV_MASK 0x000000ff
508#define FP_M2_DIV_SHIFT 0 512#define FP_M2_DIV_SHIFT 0
509#define DPLL_TEST 0x606c 513#define DPLL_TEST 0x606c
510#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 514#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
629#define TV_HOTPLUG_INT_EN (1 << 18) 633#define TV_HOTPLUG_INT_EN (1 << 18)
630#define CRT_HOTPLUG_INT_EN (1 << 9) 634#define CRT_HOTPLUG_INT_EN (1 << 9)
631#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 635#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
636#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
637/* must use period 64 on GM45 according to docs */
638#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
639#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
640#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
641#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
642#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
643#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
644#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
645#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
646#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
647#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
648#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
649#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
650#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
651
632 652
633#define PORT_HOTPLUG_STAT 0x61114 653#define PORT_HOTPLUG_STAT 0x61114
634#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 654#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
856 */ 876 */
857# define TV_ENC_C0_FIX (1 << 10) 877# define TV_ENC_C0_FIX (1 << 10)
858/** Bits that must be preserved by software */ 878/** Bits that must be preserved by software */
859# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) 879# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
860# define TV_FUSE_STATE_MASK (3 << 4) 880# define TV_FUSE_STATE_MASK (3 << 4)
861/** Read-only state that reports all features enabled */ 881/** Read-only state that reports all features enabled */
862# define TV_FUSE_STATE_ENABLED (0 << 4) 882# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715ace3a0..de621aad85b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
162 u8 panel_type; 162 u8 panel_type;
163 u8 rsvd1; 163 u8 rsvd1;
164 /* LVDS capabilities, stored in a dword */ 164 /* LVDS capabilities, stored in a dword */
165 u8 rsvd2:1;
166 u8 lvds_edid:1;
167 u8 pixel_dither:1;
168 u8 pfit_ratio_auto:1;
169 u8 pfit_gfx_mode_enhanced:1;
170 u8 pfit_text_mode_enhanced:1;
171 u8 pfit_mode:2; 165 u8 pfit_mode:2;
166 u8 pfit_text_mode_enhanced:1;
167 u8 pfit_gfx_mode_enhanced:1;
168 u8 pfit_ratio_auto:1;
169 u8 pixel_dither:1;
170 u8 lvds_edid:1;
171 u8 rsvd2:1;
172 u8 rsvd4; 172 u8 rsvd4;
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed3466e83..2b6d44381c31 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64static int intel_crt_mode_valid(struct drm_connector *connector, 64static int intel_crt_mode_valid(struct drm_connector *connector,
65 struct drm_display_mode *mode) 65 struct drm_display_mode *mode)
66{ 66{
67 struct drm_device *dev = connector->dev;
68
69 int max_clock = 0;
67 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
68 return MODE_NO_DBLESCAN; 71 return MODE_NO_DBLESCAN;
69 72
70 if (mode->clock > 400000 || mode->clock < 25000) 73 if (mode->clock < 25000)
71 return MODE_CLOCK_RANGE; 74 return MODE_CLOCK_LOW;
75
76 if (!IS_I9XX(dev))
77 max_clock = 350000;
78 else
79 max_clock = 400000;
80 if (mode->clock > max_clock)
81 return MODE_CLOCK_HIGH;
72 82
73 return MODE_OK; 83 return MODE_OK;
74} 84}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
114 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
115 125
116 if (intel_crtc->pipe == 0) 126 if (intel_crtc->pipe == 0) {
117 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
118 else 128 I915_WRITE(BCLRPAT_A, 0);
129 } else {
119 adpa |= ADPA_PIPE_B_SELECT; 130 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0);
132 }
120 133
121 I915_WRITE(ADPA, adpa); 134 I915_WRITE(ADPA, adpa);
122} 135}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
133{ 146{
134 struct drm_device *dev = connector->dev; 147 struct drm_device *dev = connector->dev;
135 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 temp; 149 u32 hotplug_en;
137 150 int i, tries = 0;
138 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 151 /*
139 152 * On 4 series desktop, CRT detect sequence need to be done twice
140 temp = I915_READ(PORT_HOTPLUG_EN); 153 * to get a reliable result.
141 154 */
142 I915_WRITE(PORT_HOTPLUG_EN,
143 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
144 155
145 do { 156 if (IS_G4X(dev) && !IS_GM45(dev))
146 if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) 157 tries = 2;
147 break; 158 else
148 msleep(1); 159 tries = 1;
149 } while (time_after(timeout, jiffies)); 160 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
161 hotplug_en &= ~(CRT_HOTPLUG_MASK);
162 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
163
164 if (IS_GM45(dev))
165 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
166
167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
168
169 for (i = 0; i < tries ; i++) {
170 unsigned long timeout;
171 /* turn on the FORCE_DETECT */
172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
173 timeout = jiffies + msecs_to_jiffies(1000);
174 /* wait for FORCE_DETECT to go off */
175 do {
176 if (!(I915_READ(PORT_HOTPLUG_EN) &
177 CRT_HOTPLUG_FORCE_DETECT))
178 break;
179 msleep(1);
180 } while (time_after(timeout, jiffies));
181 }
150 182
151 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 183 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
152 CRT_HOTPLUG_MONITOR_COLOR) 184 CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2834276cb38..d9c50ff94d76 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
56} intel_p2_t; 56} intel_p2_t;
57 57
58#define INTEL_P2_NUM 2 58#define INTEL_P2_NUM 2
59 59typedef struct intel_limit intel_limit_t;
60typedef struct { 60struct intel_limit {
61 intel_range_t dot, vco, n, m, m1, m2, p, p1; 61 intel_range_t dot, vco, n, m, m1, m2, p, p1;
62 intel_p2_t p2; 62 intel_p2_t p2;
63} intel_limit_t; 63 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
64 int, int, intel_clock_t *);
65};
64 66
65#define I8XX_DOT_MIN 25000 67#define I8XX_DOT_MIN 25000
66#define I8XX_DOT_MAX 350000 68#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 92#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 93#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 94#define I9XX_VCO_MAX 2800000
95#define IGD_VCO_MIN 1700000
96#define IGD_VCO_MAX 3500000
93#define I9XX_N_MIN 1 97#define I9XX_N_MIN 1
94#define I9XX_N_MAX 6 98#define I9XX_N_MAX 6
99/* IGD's Ncounter is a ring counter */
100#define IGD_N_MIN 3
101#define IGD_N_MAX 6
95#define I9XX_M_MIN 70 102#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 103#define I9XX_M_MAX 120
104#define IGD_M_MIN 2
105#define IGD_M_MAX 256
97#define I9XX_M1_MIN 10 106#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 22 107#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 108#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 109#define I9XX_M2_MAX 9
110/* IGD M1 is reserved, and must be 0 */
111#define IGD_M1_MIN 0
112#define IGD_M1_MAX 0
113#define IGD_M2_MIN 0
114#define IGD_M2_MAX 254
101#define I9XX_P_SDVO_DAC_MIN 5 115#define I9XX_P_SDVO_DAC_MIN 5
102#define I9XX_P_SDVO_DAC_MAX 80 116#define I9XX_P_SDVO_DAC_MAX 80
103#define I9XX_P_LVDS_MIN 7 117#define I9XX_P_LVDS_MIN 7
104#define I9XX_P_LVDS_MAX 98 118#define I9XX_P_LVDS_MAX 98
119#define IGD_P_LVDS_MIN 7
120#define IGD_P_LVDS_MAX 112
105#define I9XX_P1_MIN 1 121#define I9XX_P1_MIN 1
106#define I9XX_P1_MAX 8 122#define I9XX_P1_MAX 8
107#define I9XX_P2_SDVO_DAC_SLOW 10 123#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
115#define INTEL_LIMIT_I8XX_LVDS 1 131#define INTEL_LIMIT_I8XX_LVDS 1
116#define INTEL_LIMIT_I9XX_SDVO_DAC 2 132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
117#define INTEL_LIMIT_I9XX_LVDS 3 133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140
141/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000
143#define G4X_DOT_SDVO_MAX 270000
144#define G4X_VCO_MIN 1750000
145#define G4X_VCO_MAX 3500000
146#define G4X_N_SDVO_MIN 1
147#define G4X_N_SDVO_MAX 4
148#define G4X_M_SDVO_MIN 104
149#define G4X_M_SDVO_MAX 138
150#define G4X_M1_SDVO_MIN 17
151#define G4X_M1_SDVO_MAX 23
152#define G4X_M2_SDVO_MIN 5
153#define G4X_M2_SDVO_MAX 11
154#define G4X_P_SDVO_MIN 10
155#define G4X_P_SDVO_MAX 30
156#define G4X_P1_SDVO_MIN 1
157#define G4X_P1_SDVO_MAX 3
158#define G4X_P2_SDVO_SLOW 10
159#define G4X_P2_SDVO_FAST 10
160#define G4X_P2_SDVO_LIMIT 270000
161
162/*The parameter is for HDMI_DAC on G4x platform*/
163#define G4X_DOT_HDMI_DAC_MIN 22000
164#define G4X_DOT_HDMI_DAC_MAX 400000
165#define G4X_N_HDMI_DAC_MIN 1
166#define G4X_N_HDMI_DAC_MAX 4
167#define G4X_M_HDMI_DAC_MIN 104
168#define G4X_M_HDMI_DAC_MAX 138
169#define G4X_M1_HDMI_DAC_MIN 16
170#define G4X_M1_HDMI_DAC_MAX 23
171#define G4X_M2_HDMI_DAC_MIN 5
172#define G4X_M2_HDMI_DAC_MAX 11
173#define G4X_P_HDMI_DAC_MIN 5
174#define G4X_P_HDMI_DAC_MAX 80
175#define G4X_P1_HDMI_DAC_MIN 1
176#define G4X_P1_HDMI_DAC_MAX 8
177#define G4X_P2_HDMI_DAC_SLOW 10
178#define G4X_P2_HDMI_DAC_FAST 5
179#define G4X_P2_HDMI_DAC_LIMIT 165000
180
181/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
182#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
184#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
185#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
186#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
187#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
188#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
190#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
192#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
193#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
194#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
196#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
197#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
199
200/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
201#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
203#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
204#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
205#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
206#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
207#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
208#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
209#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
210#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
211#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
212#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
213#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
214#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
215#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218
219static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock);
222static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock);
118 225
119static const intel_limit_t intel_limits[] = { 226static const intel_limit_t intel_limits[] = {
120 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 227 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
128 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 235 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
129 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 236 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
130 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 237 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
238 .find_pll = intel_find_best_PLL,
131 }, 239 },
132 { /* INTEL_LIMIT_I8XX_LVDS */ 240 { /* INTEL_LIMIT_I8XX_LVDS */
133 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 241 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
140 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 248 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
141 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 249 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
142 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 250 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
251 .find_pll = intel_find_best_PLL,
143 }, 252 },
144 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 253 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
145 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
152 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
153 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 262 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
154 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 263 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
264 .find_pll = intel_find_best_PLL,
155 }, 265 },
156 { /* INTEL_LIMIT_I9XX_LVDS */ 266 { /* INTEL_LIMIT_I9XX_LVDS */
157 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 267 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
167 */ 277 */
168 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 278 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
169 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 279 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
280 .find_pll = intel_find_best_PLL,
281 },
282 /* below parameter and function is for G4X Chipset Family*/
283 { /* INTEL_LIMIT_G4X_SDVO */
284 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
285 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
286 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
287 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
288 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
289 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
290 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
291 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
292 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
293 .p2_slow = G4X_P2_SDVO_SLOW,
294 .p2_fast = G4X_P2_SDVO_FAST
295 },
296 .find_pll = intel_g4x_find_best_PLL,
297 },
298 { /* INTEL_LIMIT_G4X_HDMI_DAC */
299 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
300 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
301 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
302 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
303 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
304 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
305 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
306 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
307 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
308 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
309 .p2_fast = G4X_P2_HDMI_DAC_FAST
310 },
311 .find_pll = intel_g4x_find_best_PLL,
312 },
313 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
314 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
315 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
316 .vco = { .min = G4X_VCO_MIN,
317 .max = G4X_VCO_MAX },
318 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
319 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
320 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
321 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
322 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
323 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
324 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
325 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
326 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
327 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
328 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
329 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
330 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
331 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
332 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
333 },
334 .find_pll = intel_g4x_find_best_PLL,
335 },
336 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
337 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
338 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
339 .vco = { .min = G4X_VCO_MIN,
340 .max = G4X_VCO_MAX },
341 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
342 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
343 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
344 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
345 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
346 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
347 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
348 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
349 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
350 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
351 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
352 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
353 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
354 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
355 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
356 },
357 .find_pll = intel_g4x_find_best_PLL,
358 },
359 { /* INTEL_LIMIT_IGD_SDVO */
360 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
361 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
362 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
363 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
364 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
365 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
366 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
170 }, 370 },
371 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
373 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
374 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
375 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
376 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
377 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
378 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
379 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
380 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
383 },
384
171}; 385};
172 386
387static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
388{
389 struct drm_device *dev = crtc->dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391 const intel_limit_t *limit;
392
393 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
394 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
395 LVDS_CLKB_POWER_UP)
396 /* LVDS with dual channel */
397 limit = &intel_limits
398 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
399 else
400 /* LVDS with dual channel */
401 limit = &intel_limits
402 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
403 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
404 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
405 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
406 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
407 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
408 } else /* The option is for other outputs */
409 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
410
411 return limit;
412}
413
173static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 414static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
174{ 415{
175 struct drm_device *dev = crtc->dev; 416 struct drm_device *dev = crtc->dev;
176 const intel_limit_t *limit; 417 const intel_limit_t *limit;
177 418
178 if (IS_I9XX(dev)) { 419 if (IS_G4X(dev)) {
420 limit = intel_g4x_limit(crtc);
421 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
179 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
180 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 423 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
181 else 424 else
182 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 425 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
426 } else if (IS_IGD(dev)) {
427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
429 else
430 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
183 } else { 431 } else {
184 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 432 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
185 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 433 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 437 return limit;
190} 438}
191 439
192static void intel_clock(int refclk, intel_clock_t *clock) 440/* m1 is reserved as 0 in IGD, n is a ring counter */
441static void igd_clock(int refclk, intel_clock_t *clock)
193{ 442{
443 clock->m = clock->m2 + 2;
444 clock->p = clock->p1 * clock->p2;
445 clock->vco = refclk * clock->m / clock->n;
446 clock->dot = clock->vco / clock->p;
447}
448
449static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
450{
451 if (IS_IGD(dev)) {
452 igd_clock(refclk, clock);
453 return;
454 }
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 455 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2; 456 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2); 457 clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
226static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 487static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
227{ 488{
228 const intel_limit_t *limit = intel_limit (crtc); 489 const intel_limit_t *limit = intel_limit (crtc);
490 struct drm_device *dev = crtc->dev;
229 491
230 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 492 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
231 INTELPllInvalid ("p1 out of range\n"); 493 INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
235 INTELPllInvalid ("m2 out of range\n"); 497 INTELPllInvalid ("m2 out of range\n");
236 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 498 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
237 INTELPllInvalid ("m1 out of range\n"); 499 INTELPllInvalid ("m1 out of range\n");
238 if (clock->m1 <= clock->m2) 500 if (clock->m1 <= clock->m2 && !IS_IGD(dev))
239 INTELPllInvalid ("m1 <= m2\n"); 501 INTELPllInvalid ("m1 <= m2\n");
240 if (clock->m < limit->m.min || limit->m.max < clock->m) 502 if (clock->m < limit->m.min || limit->m.max < clock->m)
241 INTELPllInvalid ("m out of range\n"); 503 INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
252 return true; 514 return true;
253} 515}
254 516
255/** 517static bool
256 * Returns a set of divisors for the desired target clock with the given 518intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 * refclk, or FALSE. The returned values represent the clock equation: 519 int target, int refclk, intel_clock_t *best_clock)
258 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 520
259 */
260static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
261 int refclk, intel_clock_t *best_clock)
262{ 521{
263 struct drm_device *dev = crtc->dev; 522 struct drm_device *dev = crtc->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_i915_private *dev_priv = dev->dev_private;
265 intel_clock_t clock; 524 intel_clock_t clock;
266 const intel_limit_t *limit = intel_limit(crtc);
267 int err = target; 525 int err = target;
268 526
269 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
289 memset (best_clock, 0, sizeof (*best_clock)); 547 memset (best_clock, 0, sizeof (*best_clock));
290 548
291 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 549 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
292 for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && 550 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
293 clock.m2 <= limit->m2.max; clock.m2++) { 551 /* m1 is always 0 in IGD */
552 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
553 break;
294 for (clock.n = limit->n.min; clock.n <= limit->n.max; 554 for (clock.n = limit->n.min; clock.n <= limit->n.max;
295 clock.n++) { 555 clock.n++) {
296 for (clock.p1 = limit->p1.min; 556 for (clock.p1 = limit->p1.min;
297 clock.p1 <= limit->p1.max; clock.p1++) { 557 clock.p1 <= limit->p1.max; clock.p1++) {
298 int this_err; 558 int this_err;
299 559
300 intel_clock(refclk, &clock); 560 intel_clock(dev, refclk, &clock);
301 561
302 if (!intel_PLL_is_valid(crtc, &clock)) 562 if (!intel_PLL_is_valid(crtc, &clock))
303 continue; 563 continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
315 return (err != target); 575 return (err != target);
316} 576}
317 577
578static bool
579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
580 int target, int refclk, intel_clock_t *best_clock)
581{
582 struct drm_device *dev = crtc->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 intel_clock_t clock;
585 int max_n;
586 bool found;
587 /* approximately equals target * 0.00488 */
588 int err_most = (target >> 8) + (target >> 10);
589 found = false;
590
591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
592 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
593 LVDS_CLKB_POWER_UP)
594 clock.p2 = limit->p2.p2_fast;
595 else
596 clock.p2 = limit->p2.p2_slow;
597 } else {
598 if (target < limit->p2.dot_limit)
599 clock.p2 = limit->p2.p2_slow;
600 else
601 clock.p2 = limit->p2.p2_fast;
602 }
603
604 memset(best_clock, 0, sizeof(*best_clock));
605 max_n = limit->n.max;
606 /* based on hardware requriment prefer smaller n to precision */
607 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
608 /* based on hardware requirment prefere larger m1,m2, p1 */
609 for (clock.m1 = limit->m1.max;
610 clock.m1 >= limit->m1.min; clock.m1--) {
611 for (clock.m2 = limit->m2.max;
612 clock.m2 >= limit->m2.min; clock.m2--) {
613 for (clock.p1 = limit->p1.max;
614 clock.p1 >= limit->p1.min; clock.p1--) {
615 int this_err;
616
617 intel_clock(dev, refclk, &clock);
618 if (!intel_PLL_is_valid(crtc, &clock))
619 continue;
620 this_err = abs(clock.dot - target) ;
621 if (this_err < err_most) {
622 *best_clock = clock;
623 err_most = this_err;
624 max_n = clock.n;
625 found = true;
626 }
627 }
628 }
629 }
630 }
631
632 return found;
633}
634
318void 635void
319intel_wait_for_vblank(struct drm_device *dev) 636intel_wait_for_vblank(struct drm_device *dev)
320{ 637{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
634 return 400000; 951 return 400000;
635 else if (IS_I915G(dev)) 952 else if (IS_I915G(dev))
636 return 333000; 953 return 333000;
637 else if (IS_I945GM(dev) || IS_845G(dev)) 954 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
638 return 200000; 955 return 200000;
639 else if (IS_I915GM(dev)) { 956 else if (IS_I915GM(dev)) {
640 u16 gcfgc = 0; 957 u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
733 bool is_crt = false, is_lvds = false, is_tv = false; 1050 bool is_crt = false, is_lvds = false, is_tv = false;
734 struct drm_mode_config *mode_config = &dev->mode_config; 1051 struct drm_mode_config *mode_config = &dev->mode_config;
735 struct drm_connector *connector; 1052 struct drm_connector *connector;
1053 const intel_limit_t *limit;
736 int ret; 1054 int ret;
737 1055
738 drm_vblank_pre_modeset(dev, pipe); 1056 drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
776 refclk = 48000; 1094 refclk = 48000;
777 } 1095 }
778 1096
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 1097 /*
1098 * Returns a set of divisors for the desired target clock with the given
1099 * refclk, or FALSE. The returned values represent the clock equation:
1100 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
1101 */
1102 limit = intel_limit(crtc);
1103 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 1104 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return -EINVAL; 1106 return -EINVAL;
783 } 1107 }
784 1108
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1109 if (IS_IGD(dev))
1110 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1111 else
1112 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
786 1113
787 dpll = DPLL_VGA_MODE_DIS; 1114 dpll = DPLL_VGA_MODE_DIS;
788 if (IS_I9XX(dev)) { 1115 if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
799 } 1126 }
800 1127
801 /* compute bitmask from p1 value */ 1128 /* compute bitmask from p1 value */
802 dpll |= (1 << (clock.p1 - 1)) << 16; 1129 if (IS_IGD(dev))
1130 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1131 else
1132 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
803 switch (clock.p2) { 1133 switch (clock.p2) {
804 case 5: 1134 case 5:
805 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1135 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1279 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 1609 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
1280 1610
1281 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 1611 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1282 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 1612 if (IS_IGD(dev)) {
1283 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 1613 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
1614 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
1615 } else {
1616 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1617 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1618 }
1619
1284 if (IS_I9XX(dev)) { 1620 if (IS_I9XX(dev)) {
1285 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 1621 if (IS_IGD(dev))
1622 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
1623 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
1624 else
1625 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
1286 DPLL_FPA01_P1_POST_DIV_SHIFT); 1626 DPLL_FPA01_P1_POST_DIV_SHIFT);
1287 1627
1288 switch (dpll & DPLL_MODE_MASK) { 1628 switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1301 } 1641 }
1302 1642
1303 /* XXX: Handle the 100Mhz refclk */ 1643 /* XXX: Handle the 100Mhz refclk */
1304 intel_clock(96000, &clock); 1644 intel_clock(dev, 96000, &clock);
1305 } else { 1645 } else {
1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1646 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1307 1647
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1313 if ((dpll & PLL_REF_INPUT_MASK) == 1653 if ((dpll & PLL_REF_INPUT_MASK) ==
1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1654 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1315 /* XXX: might not be 66MHz */ 1655 /* XXX: might not be 66MHz */
1316 intel_clock(66000, &clock); 1656 intel_clock(dev, 66000, &clock);
1317 } else 1657 } else
1318 intel_clock(48000, &clock); 1658 intel_clock(dev, 48000, &clock);
1319 } else { 1659 } else {
1320 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1660 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1321 clock.p1 = 2; 1661 clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1328 else 1668 else
1329 clock.p2 = 2; 1669 clock.p2 = 2;
1330 1670
1331 intel_clock(48000, &clock); 1671 intel_clock(dev, 48000, &clock);
1332 } 1672 }
1333 } 1673 }
1334 1674
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
1474 1814
1475 if (IS_I9XX(dev)) { 1815 if (IS_I9XX(dev)) {
1476 int found; 1816 int found;
1817 u32 reg;
1477 1818
1478 if (I915_READ(SDVOB) & SDVO_DETECTED) { 1819 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1479 found = intel_sdvo_init(dev, SDVOB); 1820 found = intel_sdvo_init(dev, SDVOB);
1480 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1821 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1481 intel_hdmi_init(dev, SDVOB); 1822 intel_hdmi_init(dev, SDVOB);
1482 } 1823 }
1483 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { 1824
1825 /* Before G4X SDVOC doesn't have its own detect register */
1826 if (IS_G4X(dev))
1827 reg = SDVOC;
1828 else
1829 reg = SDVOB;
1830
1831 if (I915_READ(reg) & SDVO_DETECTED) {
1484 found = intel_sdvo_init(dev, SDVOC); 1832 found = intel_sdvo_init(dev, SDVOC);
1485 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1833 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1486 intel_hdmi_init(dev, SDVOC); 1834 intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af98854..6619f26e46a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
265 pfit_control = 0; 265 pfit_control = 0;
266 266
267 if (!IS_I965G(dev)) { 267 if (!IS_I965G(dev)) {
268 if (dev_priv->panel_wants_dither) 268 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
269 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 269 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
270 } 270 }
271 else 271 else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d67369b..ceca9471a75a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
217 */ 217 */
218static const struct color_conversion ntsc_m_csc_composite = { 218static const struct color_conversion ntsc_m_csc_composite = {
219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
222}; 222};
223 223
224static const struct video_levels ntsc_m_levels_composite = { 224static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
226}; 226};
227 227
228static const struct color_conversion ntsc_m_csc_svideo = { 228static const struct color_conversion ntsc_m_csc_svideo = {
229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
232}; 232};
233 233
234static const struct video_levels ntsc_m_levels_svideo = { 234static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
237 237
238static const struct color_conversion ntsc_j_csc_composite = { 238static const struct color_conversion ntsc_j_csc_composite = {
239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, 239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, 240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, 241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
242}; 242};
243 243
244static const struct video_levels ntsc_j_levels_composite = { 244static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
247 247
248static const struct color_conversion ntsc_j_csc_svideo = { 248static const struct color_conversion ntsc_j_csc_svideo = {
249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, 249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, 250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, 251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
252}; 252};
253 253
254static const struct video_levels ntsc_j_levels_svideo = { 254static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
257 257
258static const struct color_conversion pal_csc_composite = { 258static const struct color_conversion pal_csc_composite = {
259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, 259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, 260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, 261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
262}; 262};
263 263
264static const struct video_levels pal_levels_composite = { 264static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
267 267
268static const struct color_conversion pal_csc_svideo = { 268static const struct color_conversion pal_csc_svideo = {
269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, 269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, 270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, 271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
272}; 272};
273 273
274static const struct video_levels pal_levels_svideo = { 274static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
277 277
278static const struct color_conversion pal_m_csc_composite = { 278static const struct color_conversion pal_m_csc_composite = {
279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
282}; 282};
283 283
284static const struct video_levels pal_m_levels_composite = { 284static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
286}; 286};
287 287
288static const struct color_conversion pal_m_csc_svideo = { 288static const struct color_conversion pal_m_csc_svideo = {
289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
292}; 292};
293 293
294static const struct video_levels pal_m_levels_svideo = { 294static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
297 297
298static const struct color_conversion pal_n_csc_composite = { 298static const struct color_conversion pal_n_csc_composite = {
299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
302}; 302};
303 303
304static const struct video_levels pal_n_levels_composite = { 304static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
306}; 306};
307 307
308static const struct color_conversion pal_n_csc_svideo = { 308static const struct color_conversion pal_n_csc_svideo = {
309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
312}; 312};
313 313
314static const struct video_levels pal_n_levels_svideo = { 314static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
319 * Component connections 319 * Component connections
320 */ 320 */
321static const struct color_conversion sdtv_csc_yprpb = { 321static const struct color_conversion sdtv_csc_yprpb = {
322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, 322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, 323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, 324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
325}; 325};
326 326
327static const struct color_conversion sdtv_csc_rgb = { 327static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
331}; 331};
332 332
333static const struct color_conversion hdtv_csc_yprpb = { 333static const struct color_conversion hdtv_csc_yprpb = {
334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, 334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, 335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, 336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
337}; 337};
338 338
339static const struct color_conversion hdtv_csc_rgb = { 339static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
414static const struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 108000,
418 .refresh = 29970, 418 .refresh = 29970,
419 .oversample = TV_OVERSAMPLE_8X, 419 .oversample = TV_OVERSAMPLE_8X,
420 .component_only = 0, 420 .component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
442 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 442 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
443 443
444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
445 .dda1_inc = 136, 445 .dda1_inc = 135,
446 .dda2_inc = 7624, .dda2_size = 20013, 446 .dda2_inc = 20800, .dda2_size = 27456,
447 .dda3_inc = 0, .dda3_size = 0, 447 .dda3_inc = 0, .dda3_size = 0,
448 .sc_reset = TV_SC_RESET_EVERY_4, 448 .sc_reset = TV_SC_RESET_EVERY_4,
449 .pal_burst = false, 449 .pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
457 }, 457 },
458 { 458 {
459 .name = "NTSC-443", 459 .name = "NTSC-443",
460 .clock = 107520, 460 .clock = 108000,
461 .refresh = 29970, 461 .refresh = 29970,
462 .oversample = TV_OVERSAMPLE_8X, 462 .oversample = TV_OVERSAMPLE_8X,
463 .component_only = 0, 463 .component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
485 485
486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
487 .dda1_inc = 168, 487 .dda1_inc = 168,
488 .dda2_inc = 18557, .dda2_size = 20625, 488 .dda2_inc = 4093, .dda2_size = 27456,
489 .dda3_inc = 0, .dda3_size = 0, 489 .dda3_inc = 310, .dda3_size = 525,
490 .sc_reset = TV_SC_RESET_EVERY_8, 490 .sc_reset = TV_SC_RESET_NEVER,
491 .pal_burst = true, 491 .pal_burst = false,
492 492
493 .composite_levels = &ntsc_m_levels_composite, 493 .composite_levels = &ntsc_m_levels_composite,
494 .composite_color = &ntsc_m_csc_composite, 494 .composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
499 }, 499 },
500 { 500 {
501 .name = "NTSC-J", 501 .name = "NTSC-J",
502 .clock = 107520, 502 .clock = 108000,
503 .refresh = 29970, 503 .refresh = 29970,
504 .oversample = TV_OVERSAMPLE_8X, 504 .oversample = TV_OVERSAMPLE_8X,
505 .component_only = 0, 505 .component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
527 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 527 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
528 528
529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
530 .dda1_inc = 136, 530 .dda1_inc = 135,
531 .dda2_inc = 7624, .dda2_size = 20013, 531 .dda2_inc = 20800, .dda2_size = 27456,
532 .dda3_inc = 0, .dda3_size = 0, 532 .dda3_inc = 0, .dda3_size = 0,
533 .sc_reset = TV_SC_RESET_EVERY_4, 533 .sc_reset = TV_SC_RESET_EVERY_4,
534 .pal_burst = false, 534 .pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
542 }, 542 },
543 { 543 {
544 .name = "PAL-M", 544 .name = "PAL-M",
545 .clock = 107520, 545 .clock = 108000,
546 .refresh = 29970, 546 .refresh = 29970,
547 .oversample = TV_OVERSAMPLE_8X, 547 .oversample = TV_OVERSAMPLE_8X,
548 .component_only = 0, 548 .component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
570 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 570 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
571 571
572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
573 .dda1_inc = 136, 573 .dda1_inc = 135,
574 .dda2_inc = 7624, .dda2_size = 20013, 574 .dda2_inc = 16704, .dda2_size = 27456,
575 .dda3_inc = 0, .dda3_size = 0, 575 .dda3_inc = 0, .dda3_size = 0,
576 .sc_reset = TV_SC_RESET_EVERY_4, 576 .sc_reset = TV_SC_RESET_EVERY_8,
577 .pal_burst = false, 577 .pal_burst = true,
578 578
579 .composite_levels = &pal_m_levels_composite, 579 .composite_levels = &pal_m_levels_composite,
580 .composite_color = &pal_m_csc_composite, 580 .composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
586 { 586 {
587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
588 .name = "PAL-N", 588 .name = "PAL-N",
589 .clock = 107520, 589 .clock = 108000,
590 .refresh = 25000, 590 .refresh = 25000,
591 .oversample = TV_OVERSAMPLE_8X, 591 .oversample = TV_OVERSAMPLE_8X,
592 .component_only = 0, 592 .component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
615 615
616 616
617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
618 .dda1_inc = 168, 618 .dda1_inc = 135,
619 .dda2_inc = 18557, .dda2_size = 20625, 619 .dda2_inc = 23578, .dda2_size = 27648,
620 .dda3_inc = 0, .dda3_size = 0, 620 .dda3_inc = 134, .dda3_size = 625,
621 .sc_reset = TV_SC_RESET_EVERY_8, 621 .sc_reset = TV_SC_RESET_EVERY_8,
622 .pal_burst = true, 622 .pal_burst = true,
623 623
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
631 { 631 {
632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
633 .name = "PAL", 633 .name = "PAL",
634 .clock = 107520, 634 .clock = 108000,
635 .refresh = 25000, 635 .refresh = 25000,
636 .oversample = TV_OVERSAMPLE_8X, 636 .oversample = TV_OVERSAMPLE_8X,
637 .component_only = 0, 637 .component_only = 0,
638 638
639 .hsync_end = 64, .hblank_end = 128, 639 .hsync_end = 64, .hblank_end = 142,
640 .hblank_start = 844, .htotal = 863, 640 .hblank_start = 844, .htotal = 863,
641 641
642 .progressive = false, .trilevel_sync = false, 642 .progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
659 659
660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
661 .dda1_inc = 168, 661 .dda1_inc = 168,
662 .dda2_inc = 18557, .dda2_size = 20625, 662 .dda2_inc = 4122, .dda2_size = 27648,
663 .dda3_inc = 0, .dda3_size = 0, 663 .dda3_inc = 67, .dda3_size = 625,
664 .sc_reset = TV_SC_RESET_EVERY_8, 664 .sc_reset = TV_SC_RESET_EVERY_8,
665 .pal_burst = true, 665 .pal_burst = true,
666 666
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
689 .veq_ena = false, 689 .veq_ena = false,
690 690
691 .vi_end_f1 = 44, .vi_end_f2 = 44, 691 .vi_end_f1 = 44, .vi_end_f2 = 44,
692 .nbr_end = 496, 692 .nbr_end = 479,
693 693
694 .burst_ena = false, 694 .burst_ena = false,
695 695
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
713 .veq_ena = false, 713 .veq_ena = false,
714 714
715 .vi_end_f1 = 44, .vi_end_f2 = 44, 715 .vi_end_f1 = 44, .vi_end_f2 = 44,
716 .nbr_end = 496, 716 .nbr_end = 479,
717 717
718 .burst_ena = false, 718 .burst_ena = false,
719 719
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
876 .component_only = 1, 876 .component_only = 1,
877 877
878 .hsync_end = 88, .hblank_end = 235, 878 .hsync_end = 88, .hblank_end = 235,
879 .hblank_start = 2155, .htotal = 2200, 879 .hblank_start = 2155, .htotal = 2201,
880 880
881 .progressive = false, .trilevel_sync = true, 881 .progressive = false, .trilevel_sync = true,
882 882
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
1086 return MODE_OK; 1086 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1087 return MODE_CLOCK_RANGE;
1088} 1088}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1135 if (!tv_mode) 1135 if (!tv_mode)
1136 return; /* can't happen (mode_prepare prevents this) */ 1136 return; /* can't happen (mode_prepare prevents this) */
1137 1137
1138 tv_ctl = 0; 1138 tv_ctl = I915_READ(TV_CTL);
1139 tv_ctl &= TV_CTL_SAVE;
1139 1140
1140 switch (tv_priv->type) { 1141 switch (tv_priv->type) {
1141 default: 1142 default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1215 /* dda1 implies valid video levels */ 1216 /* dda1 implies valid video levels */
1216 if (tv_mode->dda1_inc) { 1217 if (tv_mode->dda1_inc) {
1217 scctl1 |= TV_SC_DDA1_EN; 1218 scctl1 |= TV_SC_DDA1_EN;
1218 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1219 } 1219 }
1220 1220
1221 if (tv_mode->dda2_inc) 1221 if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1225 scctl1 |= TV_SC_DDA3_EN; 1225 scctl1 |= TV_SC_DDA3_EN;
1226 1226
1227 scctl1 |= tv_mode->sc_reset; 1227 scctl1 |= tv_mode->sc_reset;
1228 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1228 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1229 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1229 1230
1230 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1231 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1266 color_conversion->av); 1267 color_conversion->av);
1267 } 1268 }
1268 1269
1269 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1270 if (IS_I965G(dev))
1271 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1272 else
1273 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
1274
1270 if (video_levels) 1275 if (video_levels)
1271 I915_WRITE(TV_CLR_LEVEL, 1276 I915_WRITE(TV_CLR_LEVEL,
1272 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1277 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1401 tv_dac = I915_READ(TV_DAC); 1406 tv_dac = I915_READ(TV_DAC);
1402 I915_WRITE(TV_DAC, save_tv_dac); 1407 I915_WRITE(TV_DAC, save_tv_dac);
1403 I915_WRITE(TV_CTL, save_tv_ctl); 1408 I915_WRITE(TV_CTL, save_tv_ctl);
1409 intel_wait_for_vblank(dev);
1404 } 1410 }
1405 /* 1411 /*
1406 * A B C 1412 * A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
1451 mode = reported_modes[0]; 1457 mode = reported_modes[0];
1452 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1458 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1453 1459
1454 if (encoder->crtc) { 1460 if (encoder->crtc && encoder->crtc->enabled) {
1455 type = intel_tv_detect_type(encoder->crtc, intel_output); 1461 type = intel_tv_detect_type(encoder->crtc, intel_output);
1456 } else { 1462 } else {
1457 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1463 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
1462 type = -1; 1468 type = -1;
1463 } 1469 }
1464 1470
1471 tv_priv->type = type;
1472
1465 if (type < 0) 1473 if (type < 0)
1466 return connector_status_disconnected; 1474 return connector_status_disconnected;
1467 1475
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
1495 struct drm_display_mode *mode_ptr; 1503 struct drm_display_mode *mode_ptr;
1496 struct intel_output *intel_output = to_intel_output(connector); 1504 struct intel_output *intel_output = to_intel_output(connector);
1497 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1505 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1498 int j; 1506 int j, count = 0;
1507 u64 tmp;
1499 1508
1500 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); 1509 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
1501 j++) { 1510 j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1510 && !tv_mode->component_only)) 1519 && !tv_mode->component_only))
1511 continue; 1520 continue;
1512 1521
1513 mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), 1522 mode_ptr = drm_mode_create(connector->dev);
1514 DRM_MEM_DRIVER); 1523 if (!mode_ptr)
1524 continue;
1515 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); 1525 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
1516 1526
1517 mode_ptr->hdisplay = hactive_s; 1527 mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
1528 mode_ptr->vsync_end = mode_ptr->vsync_start + 1; 1538 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1529 mode_ptr->vtotal = vactive_s + 33; 1539 mode_ptr->vtotal = vactive_s + 33;
1530 1540
1531 mode_ptr->clock = (int) (tv_mode->refresh * 1541 tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
1532 mode_ptr->vtotal * 1542 tmp *= mode_ptr->htotal;
1533 mode_ptr->htotal / 1000) / 1000; 1543 tmp = div_u64(tmp, 1000000);
1544 mode_ptr->clock = (int) tmp;
1534 1545
1535 mode_ptr->type = DRM_MODE_TYPE_DRIVER; 1546 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1536 drm_mode_probed_add(connector, mode_ptr); 1547 drm_mode_probed_add(connector, mode_ptr);
1548 count++;
1537 } 1549 }
1538 1550
1539 return 0; 1551 return count;
1540} 1552}
1541 1553
1542static void 1554static void