aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-gtt.c19
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c83
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/vga/vgaarb.c2
23 files changed, 202 insertions, 95 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 826ab0939a12..fab3d3265adb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -68,6 +68,7 @@ static struct _intel_private {
68 phys_addr_t gma_bus_addr; 68 phys_addr_t gma_bus_addr;
69 u32 PGETBL_save; 69 u32 PGETBL_save;
70 u32 __iomem *gtt; /* I915G */ 70 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */
71 int num_dcache_entries; 72 int num_dcache_entries;
72 union { 73 union {
73 void __iomem *i9xx_flush_page; 74 void __iomem *i9xx_flush_page;
@@ -869,21 +870,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
869 870
870static int intel_fake_agp_configure(void) 871static int intel_fake_agp_configure(void)
871{ 872{
872 int i;
873
874 if (!intel_enable_gtt()) 873 if (!intel_enable_gtt())
875 return -EIO; 874 return -EIO;
876 875
876 intel_private.clear_fake_agp = true;
877 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 877 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
878 878
879 for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
880 intel_private.driver->write_entry(intel_private.scratch_page_dma,
881 i, 0);
882 }
883 readl(intel_private.gtt+i-1); /* PCI Posting. */
884
885 global_cache_flush();
886
887 return 0; 879 return 0;
888} 880}
889 881
@@ -945,6 +937,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
945{ 937{
946 int ret = -EINVAL; 938 int ret = -EINVAL;
947 939
940 if (intel_private.clear_fake_agp) {
941 int start = intel_private.base.stolen_size / PAGE_SIZE;
942 int end = intel_private.base.gtt_mappable_entries;
943 intel_gtt_clear_range(start, end - start);
944 intel_private.clear_fake_agp = false;
945 }
946
948 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) 947 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
949 return i810_insert_dcache_entries(mem, pg_start, type); 948 return i810_insert_dcache_entries(mem, pg_start, type);
950 949
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bea966f8ac84..0902d4460039 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -100,7 +100,10 @@ config DRM_I830
100config DRM_I915 100config DRM_I915
101 tristate "i915 driver" 101 tristate "i915 driver"
102 depends on AGP_INTEL 102 depends on AGP_INTEL
103 # we need shmfs for the swappable backing store, and in particular
104 # the shmem_readpage() which depends upon tmpfs
103 select SHMEM 105 select SHMEM
106 select TMPFS
104 select DRM_KMS_HELPER 107 select DRM_KMS_HELPER
105 select FB_CFB_FILLRECT 108 select FB_CFB_FILLRECT
106 select FB_CFB_COPYAREA 109 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b04..17bd766f2081 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
152{ 152{
153 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 155 int ret;
156 156
157 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
158 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
163 } 163 }
164 164
165 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
166 if (ring->obj != NULL) { 166 if (LP_RING(dev_priv)->obj != NULL) {
167 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
168 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
169 "GEM mode\n"); 169 "GEM mode\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 ring->size = init->ring_size; 173 ret = intel_render_ring_init_dri(dev,
174 174 init->ring_start,
175 ring->map.offset = init->ring_start; 175 init->ring_size);
176 ring->map.size = init->ring_size; 176 if (ret) {
177 ring->map.type = 0;
178 ring->map.flags = 0;
179 ring->map.mtrr = 0;
180
181 drm_core_ioremap_wc(&ring->map, dev);
182
183 if (ring->map.handle == NULL) {
184 i915_dma_cleanup(dev); 177 i915_dma_cleanup(dev);
185 DRM_ERROR("can not ioremap virtual address for" 178 return ret;
186 " ring buffer\n");
187 return -ENOMEM;
188 } 179 }
189 } 180 }
190 181
191 ring->virtual_start = ring->map.handle;
192
193 dev_priv->cpp = init->cpp; 182 dev_priv->cpp = init->cpp;
194 dev_priv->back_offset = init->back_offset; 183 dev_priv->back_offset = init->back_offset;
195 dev_priv->front_offset = init->front_offset; 184 dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1226 if (ret) 1215 if (ret)
1227 DRM_INFO("failed to find VBIOS tables\n"); 1216 DRM_INFO("failed to find VBIOS tables\n");
1228 1217
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1218 /* If we have > 1 VGA cards, then we need to arbitrate access
1219 * to the common VGA resources.
1220 *
1221 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1222 * then we do not take part in VGA arbitration and the
1223 * vga_client_register() fails with -ENODEV.
1224 */
1230 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1225 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1231 if (ret) 1226 if (ret && ret != -ENODEV)
1232 goto cleanup_ringbuffer; 1227 goto cleanup_ringbuffer;
1233 1228
1234 intel_register_dsm_handler(); 1229 intel_register_dsm_handler();
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 72fea2bcfc4f..66796bb82d3e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,7 +60,7 @@ extern int intel_agp_enabled;
60 60
61#define INTEL_VGA_DEVICE(id, info) { \ 61#define INTEL_VGA_DEVICE(id, info) { \
62 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 62 .class = PCI_CLASS_DISPLAY_VGA << 8, \
63 .class_mask = 0xffff00, \ 63 .class_mask = 0xff0000, \
64 .vendor = 0x8086, \ 64 .vendor = 0x8086, \
65 .device = id, \ 65 .device = id, \
66 .subvendor = PCI_ANY_ID, \ 66 .subvendor = PCI_ANY_ID, \
@@ -752,6 +752,9 @@ static int __init i915_init(void)
752 driver.driver_features &= ~DRIVER_MODESET; 752 driver.driver_features &= ~DRIVER_MODESET;
753#endif 753#endif
754 754
755 if (!(driver.driver_features & DRIVER_MODESET))
756 driver.get_vblank_timestamp = NULL;
757
755 return drm_init(&driver); 758 return drm_init(&driver);
756} 759}
757 760
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5969f46ac2d6..a0149c619cdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
543 /** List of all objects in gtt_space. Used to restore gtt 543 /** List of all objects in gtt_space. Used to restore gtt
544 * mappings on resume */ 544 * mappings on resume */
545 struct list_head gtt_list; 545 struct list_head gtt_list;
546 /** End of mappable part of GTT */ 546
547 /** Usable portion of the GTT for GEM */
548 unsigned long gtt_start;
547 unsigned long gtt_mappable_end; 549 unsigned long gtt_mappable_end;
550 unsigned long gtt_end;
548 551
549 struct io_mapping *gtt_mapping; 552 struct io_mapping *gtt_mapping;
550 int gtt_mtrr; 553 int gtt_mtrr;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff755..cf4f74c7c6fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
140{ 140{
141 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
142 142
143 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
144 end - start);
145 144
145 dev_priv->mm.gtt_start = start;
146 dev_priv->mm.gtt_mappable_end = mappable_end;
147 dev_priv->mm.gtt_end = end;
146 dev_priv->mm.gtt_total = end - start; 148 dev_priv->mm.gtt_total = end - start;
147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 149 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
148 dev_priv->mm.gtt_mappable_end = mappable_end; 150
151 /* Take over this portion of the GTT */
152 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
149} 153}
150 154
151int 155int
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1857 1861
1858 seqno = ring->get_seqno(ring); 1862 seqno = ring->get_seqno(ring);
1859 1863
1860 for (i = 0; i < I915_NUM_RINGS; i++) 1864 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1861 if (seqno >= ring->sync_seqno[i]) 1865 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0; 1866 ring->sync_seqno[i] = 0;
1863 1867
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dcfdf4151b6d..d2f445e825f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1175 goto err; 1175 goto err;
1176 1176
1177 seqno = i915_gem_next_request_seqno(dev, ring); 1177 seqno = i915_gem_next_request_seqno(dev, ring);
1178 for (i = 0; i < I915_NUM_RINGS-1; i++) { 1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1179 if (seqno < ring->sync_seqno[i]) { 1179 if (seqno < ring->sync_seqno[i]) {
1180 /* The GPU can not handle its semaphore value wrapping, 1180 /* The GPU can not handle its semaphore value wrapping,
1181 * so every billion or so execbuffers, we need to stall 1181 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac8..b0abdc64aa9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj; 35 struct drm_i915_gem_object *obj;
36 36
37 /* First fill our portion of the GTT with scratch pages */
38 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj); 42 i915_gem_clflush_object(obj);
39 43
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b8e509ae065e..062f353497e6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
274 return ret; 274 return ret;
275} 275}
276 276
277int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, 277int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
278 int *max_error, 278 int *max_error,
279 struct timeval *vblank_time, 279 struct timeval *vblank_time,
280 unsigned flags) 280 unsigned flags)
281{ 281{
282 struct drm_crtc *drmcrtc; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct drm_crtc *crtc;
283 284
284 if (crtc < 0 || crtc >= dev->num_crtcs) { 285 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
285 DRM_ERROR("Invalid crtc %d\n", crtc); 286 DRM_ERROR("Invalid crtc %d\n", pipe);
286 return -EINVAL; 287 return -EINVAL;
287 } 288 }
288 289
289 /* Get drm_crtc to timestamp: */ 290 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc); 291 crtc = intel_get_crtc_for_pipe(dev, pipe);
292 if (crtc == NULL) {
293 DRM_ERROR("Invalid crtc %d\n", pipe);
294 return -EINVAL;
295 }
296
297 if (!crtc->enabled) {
298 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
299 return -EBUSY;
300 }
291 301
292 /* Helper routine in DRM core does all the work: */ 302 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 303 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
294 vblank_time, flags, drmcrtc); 304 vblank_time, flags,
305 crtc);
295} 306}
296 307
297/* 308/*
@@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring) 359 struct intel_ring_buffer *ring)
349{ 360{
350 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring); 362 u32 seqno;
352 363
364 if (ring->obj == NULL)
365 return;
366
367 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno); 368 trace_i915_gem_request_complete(dev, seqno);
354 369
355 ring->irq_seqno = seqno; 370 ring->irq_seqno = seqno;
@@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev)
831 i++; 846 i++;
832 error->pinned_bo_count = i - error->active_bo_count; 847 error->pinned_bo_count = i - error->active_bo_count;
833 848
849 error->active_bo = NULL;
850 error->pinned_bo = NULL;
834 if (i) { 851 if (i) {
835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 852 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
836 GFP_ATOMIC); 853 GFP_ATOMIC);
@@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1278 if (master_priv->sarea_priv) 1295 if (master_priv->sarea_priv)
1279 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1296 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 1297
1281 ret = -ENODEV;
1282 if (ring->irq_get(ring)) { 1298 if (ring->irq_get(ring)) {
1283 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1299 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1284 READ_BREADCRUMB(dev_priv) >= irq_nr); 1300 READ_BREADCRUMB(dev_priv) >= irq_nr);
1285 ring->irq_put(ring); 1301 ring->irq_put(ring);
1286 } 1302 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1303 ret = -EBUSY;
1287 1304
1288 if (ret == -EBUSY) { 1305 if (ret == -EBUSY) {
1289 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1306 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f61..5cfc68940f17 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -513,6 +513,10 @@
513#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 513#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
515 515
516#define GEN6_BLITTER_ECOSKPD 0x221d0
517#define GEN6_BLITTER_LOCK_SHIFT 16
518#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
519
516#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 520#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
517#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) 521#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
518#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) 522#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -2626,6 +2630,8 @@
2626#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2630#define DISPLAY_PORT_PLL_BIOS_2 0x46014
2627 2631
2628#define PCH_DSPCLK_GATE_D 0x42020 2632#define PCH_DSPCLK_GATE_D 0x42020
2633# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2634# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2629# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) 2635# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2630# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) 2636# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2631 2637
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3b7724..d7f237deaaf0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -6286,7 +6307,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6286 6307
6287 if (IS_GEN5(dev)) { 6308 if (IS_GEN5(dev)) {
6288 /* Required for FBC */ 6309 /* Required for FBC */
6289 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6310 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6311 DPFCRUNIT_CLOCK_GATE_DISABLE |
6312 DPFDUNIT_CLOCK_GATE_DISABLE;
6290 /* Required for CxSR */ 6313 /* Required for CxSR */
6291 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6314 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6292 6315
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf9..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/acpi_io.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31#include "drmP.h" 32#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
476 return -ENOTSUPP; 477 return -ENOTSUPP;
477 } 478 }
478 479
479 base = ioremap(asls, OPREGION_SIZE); 480 base = acpi_os_ioremap(asls, OPREGION_SIZE);
480 if (!base) 481 if (!base)
481 return -ENOMEM; 482 return -ENOMEM;
482 483
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b9baa6a63d..6218fa97aa1e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
928 934
929int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 935int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
930{ 936{
931 int reread = 0;
932 struct drm_device *dev = ring->dev; 937 struct drm_device *dev = ring->dev;
933 struct drm_i915_private *dev_priv = dev->dev_private; 938 struct drm_i915_private *dev_priv = dev->dev_private;
934 unsigned long end; 939 unsigned long end;
935 u32 head; 940 u32 head;
936 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
937 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
938 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
939 do { 955 do {
940 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
941 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
942 */
943 head = intel_read_status_page(ring, 4);
944 if (reread)
945 head = I915_READ_HEAD(ring);
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
961 msleep(1); 969 msleep(1);
962 if (atomic_read(&dev_priv->mm.wedged)) 970 if (atomic_read(&dev_priv->mm.wedged))
963 return -EAGAIN; 971 return -EAGAIN;
964 reread = 1;
965 } while (!time_after(jiffies, end)); 972 } while (!time_after(jiffies, end));
966 trace_i915_ring_wait_end (dev); 973 trace_i915_ring_wait_end (dev);
967 return -EBUSY; 974 return -EBUSY;
@@ -1292,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1292 return intel_init_ring_buffer(dev, ring); 1299 return intel_init_ring_buffer(dev, ring);
1293} 1300}
1294 1301
1302int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1303{
1304 drm_i915_private_t *dev_priv = dev->dev_private;
1305 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1306
1307 *ring = render_ring;
1308 if (INTEL_INFO(dev)->gen >= 6) {
1309 ring->add_request = gen6_add_request;
1310 ring->irq_get = gen6_render_ring_get_irq;
1311 ring->irq_put = gen6_render_ring_put_irq;
1312 } else if (IS_GEN5(dev)) {
1313 ring->add_request = pc_render_add_request;
1314 ring->get_seqno = pc_render_get_seqno;
1315 }
1316
1317 ring->dev = dev;
1318 INIT_LIST_HEAD(&ring->active_list);
1319 INIT_LIST_HEAD(&ring->request_list);
1320 INIT_LIST_HEAD(&ring->gpu_write_list);
1321
1322 ring->size = size;
1323 ring->effective_size = ring->size;
1324 if (IS_I830(ring->dev))
1325 ring->effective_size -= 128;
1326
1327 ring->map.offset = start;
1328 ring->map.size = size;
1329 ring->map.type = 0;
1330 ring->map.flags = 0;
1331 ring->map.mtrr = 0;
1332
1333 drm_core_ioremap_wc(&ring->map, dev);
1334 if (ring->map.handle == NULL) {
1335 DRM_ERROR("can not ioremap virtual address for"
1336 " ring buffer\n");
1337 return -ENOMEM;
1338 }
1339
1340 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1341 return 0;
1342}
1343
1295int intel_init_bsd_ring_buffer(struct drm_device *dev) 1344int intel_init_bsd_ring_buffer(struct drm_device *dev)
1296{ 1345{
1297 drm_i915_private_t *dev_priv = dev->dev_private; 1346 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5b0abfa881fc..6d6fde85a636 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -166,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
167void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 167void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
168 168
169/* DRI warts */
170int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
171
169#endif /* _INTEL_RINGBUFFER_H_ */ 172#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b0ab185b86f6..d3ca17080df7 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -606,14 +606,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
606 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 606 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
607 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 607 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
608 args.v1.ucEncodeMode = encoder_mode; 608 args.v1.ucEncodeMode = encoder_mode;
609 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 609 if (ss_enabled)
610 if (ss_enabled)
611 args.v1.ucConfig |=
612 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
613 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
614 args.v1.ucConfig |= 610 args.v1.ucConfig |=
615 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 611 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
616 }
617 612
618 atom_execute_table(rdev->mode_info.atom_context, 613 atom_execute_table(rdev->mode_info.atom_context,
619 index, (uint32_t *)&args); 614 index, (uint32_t *)&args);
@@ -624,12 +619,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
624 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 619 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
625 args.v3.sInput.ucEncodeMode = encoder_mode; 620 args.v3.sInput.ucEncodeMode = encoder_mode;
626 args.v3.sInput.ucDispPllConfig = 0; 621 args.v3.sInput.ucDispPllConfig = 0;
622 if (ss_enabled)
623 args.v3.sInput.ucDispPllConfig |=
624 DISPPLL_CONFIG_SS_ENABLE;
627 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 625 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
628 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 626 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
629 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 627 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
630 if (ss_enabled)
631 args.v3.sInput.ucDispPllConfig |=
632 DISPPLL_CONFIG_SS_ENABLE;
633 args.v3.sInput.ucDispPllConfig |= 628 args.v3.sInput.ucDispPllConfig |=
634 DISPPLL_CONFIG_COHERENT_MODE; 629 DISPPLL_CONFIG_COHERENT_MODE;
635 /* 16200 or 27000 */ 630 /* 16200 or 27000 */
@@ -649,18 +644,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
649 } 644 }
650 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 645 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
651 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 646 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
652 if (ss_enabled)
653 args.v3.sInput.ucDispPllConfig |=
654 DISPPLL_CONFIG_SS_ENABLE;
655 args.v3.sInput.ucDispPllConfig |= 647 args.v3.sInput.ucDispPllConfig |=
656 DISPPLL_CONFIG_COHERENT_MODE; 648 DISPPLL_CONFIG_COHERENT_MODE;
657 /* 16200 or 27000 */ 649 /* 16200 or 27000 */
658 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 650 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
659 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { 651 } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
660 if (ss_enabled)
661 args.v3.sInput.ucDispPllConfig |=
662 DISPPLL_CONFIG_SS_ENABLE;
663 } else {
664 if (mode->clock > 165000) 652 if (mode->clock > 165000)
665 args.v3.sInput.ucDispPllConfig |= 653 args.v3.sInput.ucDispPllConfig |=
666 DISPPLL_CONFIG_DUAL_LINK; 654 DISPPLL_CONFIG_DUAL_LINK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a8973acb3987..677af91b555c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2201,6 +2201,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2201 struct evergreen_mc_save save; 2201 struct evergreen_mc_save save;
2202 u32 grbm_reset = 0; 2202 u32 grbm_reset = 0;
2203 2203
2204 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2205 return 0;
2206
2204 dev_info(rdev->dev, "GPU softreset \n"); 2207 dev_info(rdev->dev, "GPU softreset \n");
2205 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2208 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2206 RREG32(GRBM_STATUS)); 2209 RREG32(GRBM_STATUS));
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 46da5142b131..5968dde243e9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3522,7 +3522,7 @@ int r100_ring_test(struct radeon_device *rdev)
3522 if (i < rdev->usec_timeout) { 3522 if (i < rdev->usec_timeout) {
3523 DRM_INFO("ring test succeeded in %d usecs\n", i); 3523 DRM_INFO("ring test succeeded in %d usecs\n", i);
3524 } else { 3524 } else {
3525 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", 3525 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3526 scratch, tmp); 3526 scratch, tmp);
3527 r = -EINVAL; 3527 r = -EINVAL;
3528 } 3528 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aca2236268fa..1e10e3e2ba2a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1287,6 +1287,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
1287 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 1287 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1288 u32 tmp; 1288 u32 tmp;
1289 1289
1290 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1291 return 0;
1292
1290 dev_info(rdev->dev, "GPU softreset \n"); 1293 dev_info(rdev->dev, "GPU softreset \n");
1291 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 1294 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1292 RREG32(R_008010_GRBM_STATUS)); 1295 RREG32(R_008010_GRBM_STATUS));
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1573202a6418..52777902bbcc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
387 *line_mux = 0x90; 387 *line_mux = 0x90;
388 } 388 }
389 389
390 /* mac rv630 */ 390 /* mac rv630, rv730, others */
391 if ((dev->pdev->device == 0x9588) && 391 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
392 (dev->pdev->subsystem_vendor == 0x106b) && 392 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
393 (dev->pdev->subsystem_device == 0x00a6)) { 393 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
394 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && 394 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
395 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
396 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
397 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
398 }
399 } 395 }
400 396
401 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 397 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d5680a0c87af..275b26a708d6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,7 +48,7 @@
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) 49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 */ 52 */
53#define KMS_DRIVER_MAJOR 2 53#define KMS_DRIVER_MAJOR 2
54#define KMS_DRIVER_MINOR 8 54#define KMS_DRIVER_MINOR 8
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a289646e8aa4..9ec830c77af0 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
110 110
111int radeon_irq_kms_init(struct radeon_device *rdev) 111int radeon_irq_kms_init(struct radeon_device *rdev)
112{ 112{
113 int i;
113 int r = 0; 114 int r = 0;
114 115
115 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 116 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
116 117
117 spin_lock_init(&rdev->irq.sw_lock); 118 spin_lock_init(&rdev->irq.sw_lock);
119 for (i = 0; i < rdev->num_crtc; i++)
120 spin_lock_init(&rdev->irq.pflip_lock[i]);
118 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 121 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
119 if (r) { 122 if (r) {
120 return r; 123 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 28a53e4a925f..98321298cffd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
201 } 201 }
202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
203 break; 203 break;
204 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
205 /* return clock value in KHz */
206 value = rdev->clock.spll.reference_freq * 10;
207 break;
204 default: 208 default:
205 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 209 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
206 return -EINVAL; 210 return -EINVAL;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65da417..ace2b1623b21 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
636 void (*irq_set_state)(void *cookie, bool state), 636 void (*irq_set_state)(void *cookie, bool state),
637 unsigned int (*set_vga_decode)(void *cookie, bool decode)) 637 unsigned int (*set_vga_decode)(void *cookie, bool decode))
638{ 638{
639 int ret = -1; 639 int ret = -ENODEV;
640 struct vga_device *vgadev; 640 struct vga_device *vgadev;
641 unsigned long flags; 641 unsigned long flags;
642 642