aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c72
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/ni.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c374
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c7
28 files changed, 442 insertions, 248 deletions
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index cdf8b1e7602d..441ebc1bdbef 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -239,7 +239,7 @@ int drm_vma_info(struct seq_file *m, void *data)
239 mutex_lock(&dev->struct_mutex); 239 mutex_lock(&dev->struct_mutex);
240 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", 240 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
241 atomic_read(&dev->vma_count), 241 atomic_read(&dev->vma_count),
242 high_memory, (void *)virt_to_phys(high_memory)); 242 high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
243 243
244 list_for_each_entry(pt, &dev->vmalist, head) { 244 list_for_each_entry(pt, &dev->vmalist, head) {
245 vma = pt->vma; 245 vma = pt->vma;
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 38f3a6cb8c7d..3edd981e0770 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
303 303
304 ch7xxx_readb(dvo, CH7xxx_PM, &val); 304 ch7xxx_readb(dvo, CH7xxx_PM, &val);
305 305
306 if (val & CH7xxx_PM_FPD) 306 if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
307 return false;
308 else
309 return true; 307 return true;
308 else
309 return false;
310} 310}
311 311
312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4f2831aa5fed..b84f7861e438 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1341,9 +1341,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1341static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1341static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1342{ 1342{
1343 struct scatterlist *sg = obj->pages->sgl; 1343 struct scatterlist *sg = obj->pages->sgl;
1344 while (n >= SG_MAX_SINGLE_ALLOC) { 1344 int nents = obj->pages->nents;
1345 while (nents > SG_MAX_SINGLE_ALLOC) {
1346 if (n < SG_MAX_SINGLE_ALLOC - 1)
1347 break;
1348
1345 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); 1349 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1346 n -= SG_MAX_SINGLE_ALLOC - 1; 1350 n -= SG_MAX_SINGLE_ALLOC - 1;
1351 nents -= SG_MAX_SINGLE_ALLOC - 1;
1347 } 1352 }
1348 return sg_page(sg+n); 1353 return sg_page(sg+n);
1349} 1354}
@@ -1427,7 +1432,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
1427int __must_check i915_gem_idle(struct drm_device *dev); 1432int __must_check i915_gem_idle(struct drm_device *dev);
1428int i915_add_request(struct intel_ring_buffer *ring, 1433int i915_add_request(struct intel_ring_buffer *ring,
1429 struct drm_file *file, 1434 struct drm_file *file,
1430 struct drm_i915_gem_request *request); 1435 u32 *seqno);
1431int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1436int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1432 uint32_t seqno); 1437 uint32_t seqno);
1433int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1438int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19dbdd7dd564..d33d02d13c96 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1955,11 +1955,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1955int 1955int
1956i915_add_request(struct intel_ring_buffer *ring, 1956i915_add_request(struct intel_ring_buffer *ring,
1957 struct drm_file *file, 1957 struct drm_file *file,
1958 struct drm_i915_gem_request *request) 1958 u32 *out_seqno)
1959{ 1959{
1960 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1960 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1961 uint32_t seqno; 1961 struct drm_i915_gem_request *request;
1962 u32 request_ring_position; 1962 u32 request_ring_position;
1963 u32 seqno;
1963 int was_empty; 1964 int was_empty;
1964 int ret; 1965 int ret;
1965 1966
@@ -1974,11 +1975,9 @@ i915_add_request(struct intel_ring_buffer *ring,
1974 if (ret) 1975 if (ret)
1975 return ret; 1976 return ret;
1976 1977
1977 if (request == NULL) { 1978 request = kmalloc(sizeof(*request), GFP_KERNEL);
1978 request = kmalloc(sizeof(*request), GFP_KERNEL); 1979 if (request == NULL)
1979 if (request == NULL) 1980 return -ENOMEM;
1980 return -ENOMEM;
1981 }
1982 1981
1983 seqno = i915_gem_next_request_seqno(ring); 1982 seqno = i915_gem_next_request_seqno(ring);
1984 1983
@@ -2030,6 +2029,8 @@ i915_add_request(struct intel_ring_buffer *ring,
2030 } 2029 }
2031 } 2030 }
2032 2031
2032 if (out_seqno)
2033 *out_seqno = seqno;
2033 return 0; 2034 return 0;
2034} 2035}
2035 2036
@@ -3959,6 +3960,9 @@ i915_gem_init_hw(struct drm_device *dev)
3959 if (!intel_enable_gtt()) 3960 if (!intel_enable_gtt())
3960 return -EIO; 3961 return -EIO;
3961 3962
3963 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3964 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3965
3962 i915_gem_l3_remap(dev); 3966 i915_gem_l3_remap(dev);
3963 3967
3964 i915_gem_init_swizzling(dev); 3968 i915_gem_init_swizzling(dev);
@@ -4098,7 +4102,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4098 } 4102 }
4099 4103
4100 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4104 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4101 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4102 mutex_unlock(&dev->struct_mutex); 4105 mutex_unlock(&dev->struct_mutex);
4103 4106
4104 ret = drm_irq_install(dev); 4107 ret = drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 64c1be0a9cfd..a4162ddff6c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -521,7 +521,7 @@
521 */ 521 */
522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
523#define _3D_CHICKEN3 0x02090 523#define _3D_CHICKEN3 0x02090
524#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5) 524#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
525 525
526#define MI_MODE 0x0209c 526#define MI_MODE 0x0209c
527# define VS_TIMER_DISPATCH (1 << 6) 527# define VS_TIMER_DISPATCH (1 << 6)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2b6ce9b2674a..682bd3729baf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3253 3253
3254 if (HAS_PCH_CPT(dev)) 3254 if (HAS_PCH_CPT(dev))
3255 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3255 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3256
3257 /*
3258 * There seems to be a race in PCH platform hw (at least on some
3259 * outputs) where an enabled pipe still completes any pageflip right
3260 * away (as if the pipe is off) instead of waiting for vblank. As soon
3261 * as the first vblank happend, everything works as expected. Hence just
3262 * wait for one vblank before returning to avoid strange things
3263 * happening.
3264 */
3265 intel_wait_for_vblank(dev, intel_crtc->pipe);
3256} 3266}
3257 3267
3258static void ironlake_crtc_disable(struct drm_crtc *crtc) 3268static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -7892,8 +7902,7 @@ static struct intel_quirk intel_quirks[] = {
7892 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 7902 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7893 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 7903 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7894 7904
7895 /* 855 & before need to leave pipe A & dpll A up */ 7905 /* 830/845 need to leave pipe A & dpll A up */
7896 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7897 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7906 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7898 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7907 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7899 7908
@@ -8049,29 +8058,42 @@ static void intel_enable_pipe_a(struct drm_device *dev)
8049 8058
8050} 8059}
8051 8060
8061static bool
8062intel_check_plane_mapping(struct intel_crtc *crtc)
8063{
8064 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8065 u32 reg, val;
8066
8067 if (dev_priv->num_pipe == 1)
8068 return true;
8069
8070 reg = DSPCNTR(!crtc->plane);
8071 val = I915_READ(reg);
8072
8073 if ((val & DISPLAY_PLANE_ENABLE) &&
8074 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8075 return false;
8076
8077 return true;
8078}
8079
8052static void intel_sanitize_crtc(struct intel_crtc *crtc) 8080static void intel_sanitize_crtc(struct intel_crtc *crtc)
8053{ 8081{
8054 struct drm_device *dev = crtc->base.dev; 8082 struct drm_device *dev = crtc->base.dev;
8055 struct drm_i915_private *dev_priv = dev->dev_private; 8083 struct drm_i915_private *dev_priv = dev->dev_private;
8056 u32 reg, val; 8084 u32 reg;
8057 8085
8058 /* Clear any frame start delays used for debugging left by the BIOS */ 8086 /* Clear any frame start delays used for debugging left by the BIOS */
8059 reg = PIPECONF(crtc->pipe); 8087 reg = PIPECONF(crtc->pipe);
8060 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 8088 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8061 8089
8062 /* We need to sanitize the plane -> pipe mapping first because this will 8090 /* We need to sanitize the plane -> pipe mapping first because this will
8063 * disable the crtc (and hence change the state) if it is wrong. */ 8091 * disable the crtc (and hence change the state) if it is wrong. Note
8064 if (!HAS_PCH_SPLIT(dev)) { 8092 * that gen4+ has a fixed plane -> pipe mapping. */
8093 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
8065 struct intel_connector *connector; 8094 struct intel_connector *connector;
8066 bool plane; 8095 bool plane;
8067 8096
8068 reg = DSPCNTR(crtc->plane);
8069 val = I915_READ(reg);
8070
8071 if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
8072 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8073 goto ok;
8074
8075 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 8097 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8076 crtc->base.base.id); 8098 crtc->base.base.id);
8077 8099
@@ -8095,7 +8117,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
8095 WARN_ON(crtc->active); 8117 WARN_ON(crtc->active);
8096 crtc->base.enabled = false; 8118 crtc->base.enabled = false;
8097 } 8119 }
8098ok:
8099 8120
8100 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 8121 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8101 crtc->pipe == PIPE_A && !crtc->active) { 8122 crtc->pipe == PIPE_A && !crtc->active) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d1e8ddb2d6c0..1b727a5c9ee5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2369,8 +2369,9 @@ static void
2369intel_dp_destroy(struct drm_connector *connector) 2369intel_dp_destroy(struct drm_connector *connector)
2370{ 2370{
2371 struct drm_device *dev = connector->dev; 2371 struct drm_device *dev = connector->dev;
2372 struct intel_dp *intel_dp = intel_attached_dp(connector);
2372 2373
2373 if (intel_dpd_is_edp(dev)) 2374 if (is_edp(intel_dp))
2374 intel_panel_destroy_backlight(dev); 2375 intel_panel_destroy_backlight(dev);
2375 2376
2376 drm_sysfs_connector_remove(connector); 2377 drm_sysfs_connector_remove(connector);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ebff850a9ab6..495625914e4a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
209} 209}
210 210
211static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 211static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
212 struct drm_i915_gem_request *request,
213 void (*tail)(struct intel_overlay *)) 212 void (*tail)(struct intel_overlay *))
214{ 213{
215 struct drm_device *dev = overlay->dev; 214 struct drm_device *dev = overlay->dev;
@@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
218 int ret; 217 int ret;
219 218
220 BUG_ON(overlay->last_flip_req); 219 BUG_ON(overlay->last_flip_req);
221 ret = i915_add_request(ring, NULL, request); 220 ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
222 if (ret) { 221 if (ret)
223 kfree(request); 222 return ret;
224 return ret; 223
225 }
226 overlay->last_flip_req = request->seqno;
227 overlay->flip_tail = tail; 224 overlay->flip_tail = tail;
228 ret = i915_wait_seqno(ring, overlay->last_flip_req); 225 ret = i915_wait_seqno(ring, overlay->last_flip_req);
229 if (ret) 226 if (ret)
@@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
240 struct drm_device *dev = overlay->dev; 237 struct drm_device *dev = overlay->dev;
241 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
242 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 239 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
243 struct drm_i915_gem_request *request;
244 int ret; 240 int ret;
245 241
246 BUG_ON(overlay->active); 242 BUG_ON(overlay->active);
@@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
248 244
249 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 245 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
250 246
251 request = kzalloc(sizeof(*request), GFP_KERNEL);
252 if (request == NULL) {
253 ret = -ENOMEM;
254 goto out;
255 }
256
257 ret = intel_ring_begin(ring, 4); 247 ret = intel_ring_begin(ring, 4);
258 if (ret) { 248 if (ret)
259 kfree(request); 249 return ret;
260 goto out;
261 }
262 250
263 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); 251 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
264 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); 252 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266 intel_ring_emit(ring, MI_NOOP); 254 intel_ring_emit(ring, MI_NOOP);
267 intel_ring_advance(ring); 255 intel_ring_advance(ring);
268 256
269 ret = intel_overlay_do_wait_request(overlay, request, NULL); 257 return intel_overlay_do_wait_request(overlay, NULL);
270out:
271 return ret;
272} 258}
273 259
274/* overlay needs to be enabled in OCMD reg */ 260/* overlay needs to be enabled in OCMD reg */
@@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
278 struct drm_device *dev = overlay->dev; 264 struct drm_device *dev = overlay->dev;
279 drm_i915_private_t *dev_priv = dev->dev_private; 265 drm_i915_private_t *dev_priv = dev->dev_private;
280 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
281 struct drm_i915_gem_request *request;
282 u32 flip_addr = overlay->flip_addr; 267 u32 flip_addr = overlay->flip_addr;
283 u32 tmp; 268 u32 tmp;
284 int ret; 269 int ret;
285 270
286 BUG_ON(!overlay->active); 271 BUG_ON(!overlay->active);
287 272
288 request = kzalloc(sizeof(*request), GFP_KERNEL);
289 if (request == NULL)
290 return -ENOMEM;
291
292 if (load_polyphase_filter) 273 if (load_polyphase_filter)
293 flip_addr |= OFC_UPDATE; 274 flip_addr |= OFC_UPDATE;
294 275
@@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
298 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 279 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
299 280
300 ret = intel_ring_begin(ring, 2); 281 ret = intel_ring_begin(ring, 2);
301 if (ret) { 282 if (ret)
302 kfree(request);
303 return ret; 283 return ret;
304 } 284
305 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 285 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
306 intel_ring_emit(ring, flip_addr); 286 intel_ring_emit(ring, flip_addr);
307 intel_ring_advance(ring); 287 intel_ring_advance(ring);
308 288
309 ret = i915_add_request(ring, NULL, request); 289 return i915_add_request(ring, NULL, &overlay->last_flip_req);
310 if (ret) {
311 kfree(request);
312 return ret;
313 }
314
315 overlay->last_flip_req = request->seqno;
316 return 0;
317} 290}
318 291
319static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 292static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay)
349 struct drm_i915_private *dev_priv = dev->dev_private; 322 struct drm_i915_private *dev_priv = dev->dev_private;
350 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 323 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
351 u32 flip_addr = overlay->flip_addr; 324 u32 flip_addr = overlay->flip_addr;
352 struct drm_i915_gem_request *request;
353 int ret; 325 int ret;
354 326
355 BUG_ON(!overlay->active); 327 BUG_ON(!overlay->active);
356 328
357 request = kzalloc(sizeof(*request), GFP_KERNEL);
358 if (request == NULL)
359 return -ENOMEM;
360
361 /* According to intel docs the overlay hw may hang (when switching 329 /* According to intel docs the overlay hw may hang (when switching
362 * off) without loading the filter coeffs. It is however unclear whether 330 * off) without loading the filter coeffs. It is however unclear whether
363 * this applies to the disabling of the overlay or to the switching off 331 * this applies to the disabling of the overlay or to the switching off
@@ -365,10 +333,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 flip_addr |= OFC_UPDATE; 333 flip_addr |= OFC_UPDATE;
366 334
367 ret = intel_ring_begin(ring, 6); 335 ret = intel_ring_begin(ring, 6);
368 if (ret) { 336 if (ret)
369 kfree(request);
370 return ret; 337 return ret;
371 } 338
372 /* wait for overlay to go idle */ 339 /* wait for overlay to go idle */
373 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 340 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
374 intel_ring_emit(ring, flip_addr); 341 intel_ring_emit(ring, flip_addr);
@@ -379,8 +346,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
379 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 346 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
380 intel_ring_advance(ring); 347 intel_ring_advance(ring);
381 348
382 return intel_overlay_do_wait_request(overlay, request, 349 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
383 intel_overlay_off_tail);
384} 350}
385 351
386/* recover from an interruption due to a signal 352/* recover from an interruption due to a signal
@@ -425,24 +391,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
425 return 0; 391 return 0;
426 392
427 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 393 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
428 struct drm_i915_gem_request *request;
429
430 /* synchronous slowpath */ 394 /* synchronous slowpath */
431 request = kzalloc(sizeof(*request), GFP_KERNEL);
432 if (request == NULL)
433 return -ENOMEM;
434
435 ret = intel_ring_begin(ring, 2); 395 ret = intel_ring_begin(ring, 2);
436 if (ret) { 396 if (ret)
437 kfree(request);
438 return ret; 397 return ret;
439 }
440 398
441 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 399 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
442 intel_ring_emit(ring, MI_NOOP); 400 intel_ring_emit(ring, MI_NOOP);
443 intel_ring_advance(ring); 401 intel_ring_advance(ring);
444 402
445 ret = intel_overlay_do_wait_request(overlay, request, 403 ret = intel_overlay_do_wait_request(overlay,
446 intel_overlay_release_old_vid_tail); 404 intel_overlay_release_old_vid_tail);
447 if (ret) 405 if (ret)
448 return ret; 406 return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b3b4b6cea8b0..72f41aaa71ff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3442 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 3442 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3443 3443
3444 /* Bspec says we need to always set all mask bits. */ 3444 /* Bspec says we need to always set all mask bits. */
3445 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | 3445 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
3446 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); 3446 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
3447 3447
3448 /* 3448 /*
3449 * According to the spec the following bits should be 3449 * According to the spec the following bits should be
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 9ed6e728a94c..7d750382a833 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -43,7 +43,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
43 *ver = nv_ro08(bios, dcb); 43 *ver = nv_ro08(bios, dcb);
44 44
45 if (*ver >= 0x41) { 45 if (*ver >= 0x41) {
46 nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver); 46 nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
47 return 0x0000; 47 return 0x0000;
48 } else 48 } else
49 if (*ver >= 0x30) { 49 if (*ver >= 0x30) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 436e9efe7ef5..42d7539e6525 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -277,7 +277,6 @@ nv50_fb_dtor(struct nouveau_object *object)
277 __free_page(priv->r100c08_page); 277 __free_page(priv->r100c08_page);
278 } 278 }
279 279
280 nouveau_mm_fini(&priv->base.vram);
281 nouveau_fb_destroy(&priv->base); 280 nouveau_fb_destroy(&priv->base);
282} 281}
283 282
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index b29237970fa0..523178685180 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -134,7 +134,7 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
134 end = ptimer->read(ptimer); 134 end = ptimer->read(ptimer);
135 135
136 if (cycles == 5) { 136 if (cycles == 5) {
137 tach = (u64)60000000000; 137 tach = (u64)60000000000ULL;
138 do_div(tach, (end - start)); 138 do_div(tach, (end - start));
139 return tach; 139 return tach;
140 } else 140 } else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 259e5f1adf47..35ac57f0aab6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -456,6 +456,7 @@ static struct ttm_tt *
456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
457 uint32_t page_flags, struct page *dummy_read) 457 uint32_t page_flags, struct page *dummy_read)
458{ 458{
459#if __OS_HAS_AGP
459 struct nouveau_drm *drm = nouveau_bdev(bdev); 460 struct nouveau_drm *drm = nouveau_bdev(bdev);
460 struct drm_device *dev = drm->dev; 461 struct drm_device *dev = drm->dev;
461 462
@@ -463,6 +464,7 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size, 464 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
464 page_flags, dummy_read); 465 page_flags, dummy_read);
465 } 466 }
467#endif
466 468
467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); 469 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
468} 470}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 0bf64c90aa20..5566172774df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -52,7 +52,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
52{ 52{
53 struct nouveau_drm *drm = nouveau_drm(dev); 53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_pm *pm = nouveau_pm(dev); 54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm); 55 struct nouveau_therm *therm = nouveau_therm(drm->device);
56 int ret; 56 int ret;
57 57
58 /*XXX: not on all boards, we should control based on temperature 58 /*XXX: not on all boards, we should control based on temperature
@@ -64,7 +64,6 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
64 ret = therm->fan_set(therm, perflvl->fanspeed); 64 ret = therm->fan_set(therm, perflvl->fanspeed);
65 if (ret && ret != -ENODEV) { 65 if (ret && ret != -ENODEV) {
66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret); 66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
67 return ret;
68 } 67 }
69 } 68 }
70 69
@@ -706,8 +705,7 @@ nouveau_hwmon_init(struct drm_device *dev)
706 struct device *hwmon_dev; 705 struct device *hwmon_dev;
707 int ret = 0; 706 int ret = 0;
708 707
709 if (!therm || !therm->temp_get || !therm->attr_get || 708 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
710 !therm->attr_set || therm->temp_get(therm) < 0)
711 return -ENODEV; 709 return -ENODEV;
712 710
713 hwmon_dev = hwmon_device_register(&dev->pdev->dev); 711 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 96184d02c8d9..2e566e123e9e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1690 } 1690 }
1691 /* all other cases */ 1691 /* all other cases */
1692 pll_in_use = radeon_get_pll_use_mask(crtc); 1692 pll_in_use = radeon_get_pll_use_mask(crtc);
1693 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1694 return ATOM_PPLL2;
1695 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1693 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1696 return ATOM_PPLL1; 1694 return ATOM_PPLL1;
1695 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1696 return ATOM_PPLL2;
1697 DRM_ERROR("unable to allocate a PPLL\n"); 1697 DRM_ERROR("unable to allocate a PPLL\n");
1698 return ATOM_PPLL_INVALID; 1698 return ATOM_PPLL_INVALID;
1699 } else { 1699 } else {
@@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1715 } 1715 }
1716 /* all other cases */ 1716 /* all other cases */
1717 pll_in_use = radeon_get_pll_use_mask(crtc); 1717 pll_in_use = radeon_get_pll_use_mask(crtc);
1718 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1719 return ATOM_PPLL2;
1720 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1718 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1721 return ATOM_PPLL1; 1719 return ATOM_PPLL1;
1720 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1721 return ATOM_PPLL2;
1722 DRM_ERROR("unable to allocate a PPLL\n"); 1722 DRM_ERROR("unable to allocate a PPLL\n");
1723 return ATOM_PPLL_INVALID; 1723 return ATOM_PPLL_INVALID;
1724 } else { 1724 } else {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a1f49c5fd74b..14313ad43b76 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3431 if (!(mask & DRM_PCIE_SPEED_50)) 3431 if (!(mask & DRM_PCIE_SPEED_50))
3432 return; 3432 return;
3433 3433
3434 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3435 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3436 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3437 return;
3438 }
3439
3434 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 3440 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3435 3441
3436 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3437 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 3442 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3438 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3443 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3439 3444
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8bcb554ea0c5..8c74c729586d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
770 WREG32(0x15DC, 0); 770 WREG32(0x15DC, 0);
771 771
772 /* empty context1-7 */ 772 /* empty context1-7 */
773 /* Assign the pt base to something valid for now; the pts used for
774 * the VMs are determined by the application and setup and assigned
775 * on the fly in the vm part of radeon_gart.c
776 */
773 for (i = 1; i < 8; i++) { 777 for (i = 1; i < 8; i++) {
774 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 778 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
775 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); 779 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
776 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 780 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
777 rdev->gart.table_addr >> 12); 781 rdev->gart.table_addr >> 12);
778 } 782 }
@@ -1572,12 +1576,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1572 if (vm == NULL) 1576 if (vm == NULL)
1573 return; 1577 return;
1574 1578
1575 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
1576 radeon_ring_write(ring, 0);
1577
1578 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
1579 radeon_ring_write(ring, vm->last_pfn);
1580
1581 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 1579 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1582 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1580 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1583 1581
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 70c800ff6190..cda280d157da 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3703 if (!(mask & DRM_PCIE_SPEED_50)) 3703 if (!(mask & DRM_PCIE_SPEED_50))
3704 return; 3704 return;
3705 3705
3706 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3707 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3708 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3709 return;
3710 }
3711
3706 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 3712 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3707 3713
3708 /* 55 nm r6xx asics */ 3714 /* 55 nm r6xx asics */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b04c06444d8b..8c42d54c2e26 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -663,9 +663,14 @@ struct radeon_vm {
663 struct list_head list; 663 struct list_head list;
664 struct list_head va; 664 struct list_head va;
665 unsigned id; 665 unsigned id;
666 unsigned last_pfn; 666
667 u64 pd_gpu_addr; 667 /* contains the page directory */
668 struct radeon_sa_bo *sa_bo; 668 struct radeon_sa_bo *page_directory;
669 uint64_t pd_gpu_addr;
670
671 /* array of page tables, one for each page directory entry */
672 struct radeon_sa_bo **page_tables;
673
669 struct mutex mutex; 674 struct mutex mutex;
670 /* last fence for cs using this vm */ 675 /* last fence for cs using this vm */
671 struct radeon_fence *fence; 676 struct radeon_fence *fence;
@@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
1843 */ 1848 */
1844int radeon_vm_manager_init(struct radeon_device *rdev); 1849int radeon_vm_manager_init(struct radeon_device *rdev);
1845void radeon_vm_manager_fini(struct radeon_device *rdev); 1850void radeon_vm_manager_fini(struct radeon_device *rdev);
1846int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1851void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1847void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1852void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1848int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 1853int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1854void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
1849struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1855struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1850 struct radeon_vm *vm, int ring); 1856 struct radeon_vm *vm, int ring);
1851void radeon_vm_fence(struct radeon_device *rdev, 1857void radeon_vm_fence(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index b0a5688c67f8..196d28d99570 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle,
201 201
202 size = *(u16 *) info->buffer.pointer; 202 size = *(u16 *) info->buffer.pointer;
203 if (size < 12) { 203 if (size < 12) {
204 DRM_INFO("ATIF buffer is too small: %lu\n", size); 204 DRM_INFO("ATIF buffer is too small: %zu\n", size);
205 err = -EINVAL; 205 err = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
@@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
370 370
371 radeon_set_backlight_level(rdev, enc, req.backlight_level); 371 radeon_set_backlight_level(rdev, enc, req.backlight_level);
372 372
373#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
373 if (rdev->is_atom_bios) { 374 if (rdev->is_atom_bios) {
374 struct radeon_encoder_atom_dig *dig = enc->enc_priv; 375 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
375 backlight_force_update(dig->bl_dev, 376 backlight_force_update(dig->bl_dev,
@@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
379 backlight_force_update(dig->bl_dev, 380 backlight_force_update(dig->bl_dev,
380 BACKLIGHT_UPDATE_HOTKEY); 381 BACKLIGHT_UPDATE_HOTKEY);
381 } 382 }
383#endif
382 } 384 }
383 } 385 }
384 /* TODO: check other events */ 386 /* TODO: check other events */
@@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle,
485 487
486 size = *(u16 *) info->buffer.pointer; 488 size = *(u16 *) info->buffer.pointer;
487 if (size < 8) { 489 if (size < 8) {
488 DRM_INFO("ATCS buffer is too small: %lu\n", size); 490 DRM_INFO("ATCS buffer is too small: %zu\n", size);
489 err = -EINVAL; 491 err = -EINVAL;
490 goto out; 492 goto out;
491 } 493 }
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 582e99449c12..1aa3f910b993 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -148,7 +148,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
148 148
149 size = *(u16 *) info->buffer.pointer; 149 size = *(u16 *) info->buffer.pointer;
150 if (size < 8) { 150 if (size < 8) {
151 printk("ATPX buffer is too small: %lu\n", size); 151 printk("ATPX buffer is too small: %zu\n", size);
152 err = -EINVAL; 152 err = -EINVAL;
153 goto out; 153 goto out;
154 } 154 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cb7b7c062fef..41672cc563fb 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
478 } 478 }
479 479
480out: 480out:
481 radeon_vm_add_to_lru(rdev, vm);
481 mutex_unlock(&vm->mutex); 482 mutex_unlock(&vm->mutex);
482 mutex_unlock(&rdev->vm_manager.lock); 483 mutex_unlock(&rdev->vm_manager.lock);
483 return r; 484 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 64a42647f08a..bd13ca09eb62 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1018,6 +1018,10 @@ int radeon_device_init(struct radeon_device *rdev,
1018 return r; 1018 return r;
1019 /* initialize vm here */ 1019 /* initialize vm here */
1020 mutex_init(&rdev->vm_manager.lock); 1020 mutex_init(&rdev->vm_manager.lock);
1021 /* Adjust VM size here.
1022 * Currently set to 4GB ((1 << 20) 4k pages).
1023 * Max GPUVM size for cayman and SI is 40 bits.
1024 */
1021 rdev->vm_manager.max_pfn = 1 << 20; 1025 rdev->vm_manager.max_pfn = 1 << 20;
1022 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1026 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1023 1027
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0c06d196b75..a7677dd1ce98 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 423 */
424 424
425/** 425/**
426 * radeon_vm_num_pde - return the number of page directory entries
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Calculate the number of page directory entries (cayman+).
431 */
432static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
433{
434 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
435}
436
437/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes 438 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 * 439 *
428 * @rdev: radeon_device pointer 440 * @rdev: radeon_device pointer
@@ -431,7 +443,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
431 */ 443 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 444static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{ 445{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; 446 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
435} 447}
436 448
437/** 449/**
@@ -451,11 +463,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
451 463
452 if (!rdev->vm_manager.enabled) { 464 if (!rdev->vm_manager.enabled) {
453 /* allocate enough for 2 full VM pts */ 465 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); 466 size = radeon_vm_directory_size(rdev);
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); 467 size += rdev->vm_manager.max_pfn * 8;
456 size *= 2; 468 size *= 2;
457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 469 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
458 size, 470 RADEON_GPU_PAGE_ALIGN(size),
459 RADEON_GEM_DOMAIN_VRAM); 471 RADEON_GEM_DOMAIN_VRAM);
460 if (r) { 472 if (r) {
461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 473 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,7 +488,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
476 488
477 /* restore page table */ 489 /* restore page table */
478 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 490 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
479 if (vm->sa_bo == NULL) 491 if (vm->page_directory == NULL)
480 continue; 492 continue;
481 493
482 list_for_each_entry(bo_va, &vm->va, vm_list) { 494 list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -500,16 +512,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
500 struct radeon_vm *vm) 512 struct radeon_vm *vm)
501{ 513{
502 struct radeon_bo_va *bo_va; 514 struct radeon_bo_va *bo_va;
515 int i;
503 516
504 if (!vm->sa_bo) 517 if (!vm->page_directory)
505 return; 518 return;
506 519
507 list_del_init(&vm->list); 520 list_del_init(&vm->list);
508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); 521 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
509 522
510 list_for_each_entry(bo_va, &vm->va, vm_list) { 523 list_for_each_entry(bo_va, &vm->va, vm_list) {
511 bo_va->valid = false; 524 bo_va->valid = false;
512 } 525 }
526
527 if (vm->page_tables == NULL)
528 return;
529
530 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
531 radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
532
533 kfree(vm->page_tables);
513} 534}
514 535
515/** 536/**
@@ -546,63 +567,106 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
546} 567}
547 568
548/** 569/**
570 * radeon_vm_evict - evict page table to make room for new one
571 *
572 * @rdev: radeon_device pointer
573 * @vm: VM we want to allocate something for
574 *
575 * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
576 * Returns 0 for success, -ENOMEM for failure.
577 *
578 * Global and local mutex must be locked!
579 */
580int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
581{
582 struct radeon_vm *vm_evict;
583
584 if (list_empty(&rdev->vm_manager.lru_vm))
585 return -ENOMEM;
586
587 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
588 struct radeon_vm, list);
589 if (vm_evict == vm)
590 return -ENOMEM;
591
592 mutex_lock(&vm_evict->mutex);
593 radeon_vm_free_pt(rdev, vm_evict);
594 mutex_unlock(&vm_evict->mutex);
595 return 0;
596}
597
598/**
549 * radeon_vm_alloc_pt - allocates a page table for a VM 599 * radeon_vm_alloc_pt - allocates a page table for a VM
550 * 600 *
551 * @rdev: radeon_device pointer 601 * @rdev: radeon_device pointer
552 * @vm: vm to bind 602 * @vm: vm to bind
553 * 603 *
554 * Allocate a page table for the requested vm (cayman+). 604 * Allocate a page table for the requested vm (cayman+).
555 * Also starts to populate the page table.
556 * Returns 0 for success, error for failure. 605 * Returns 0 for success, error for failure.
557 * 606 *
558 * Global and local mutex must be locked! 607 * Global and local mutex must be locked!
559 */ 608 */
560int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) 609int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
561{ 610{
562 struct radeon_vm *vm_evict; 611 unsigned pd_size, pts_size;
563 int r;
564 u64 *pd_addr; 612 u64 *pd_addr;
565 int tables_size; 613 int r;
566 614
567 if (vm == NULL) { 615 if (vm == NULL) {
568 return -EINVAL; 616 return -EINVAL;
569 } 617 }
570 618
571 /* allocate enough to cover the current VM size */ 619 if (vm->page_directory != NULL) {
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
575 if (vm->sa_bo != NULL) {
576 /* update lru */
577 list_del_init(&vm->list);
578 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
579 return 0; 620 return 0;
580 } 621 }
581 622
582retry: 623retry:
583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 624 pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
584 tables_size, RADEON_GPU_PAGE_SIZE, false); 625 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
626 &vm->page_directory, pd_size,
627 RADEON_GPU_PAGE_SIZE, false);
585 if (r == -ENOMEM) { 628 if (r == -ENOMEM) {
586 if (list_empty(&rdev->vm_manager.lru_vm)) { 629 r = radeon_vm_evict(rdev, vm);
630 if (r)
587 return r; 631 return r;
588 }
589 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
590 mutex_lock(&vm_evict->mutex);
591 radeon_vm_free_pt(rdev, vm_evict);
592 mutex_unlock(&vm_evict->mutex);
593 goto retry; 632 goto retry;
594 633
595 } else if (r) { 634 } else if (r) {
596 return r; 635 return r;
597 } 636 }
598 637
599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); 638 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); 639
601 memset(pd_addr, 0, tables_size); 640 /* Initially clear the page directory */
641 pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
642 memset(pd_addr, 0, pd_size);
643
644 pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
645 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
646
647 if (vm->page_tables == NULL) {
648 DRM_ERROR("Cannot allocate memory for page table array\n");
649 radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
650 return -ENOMEM;
651 }
652
653 return 0;
654}
602 655
656/**
657 * radeon_vm_add_to_lru - add VMs page table to LRU list
658 *
659 * @rdev: radeon_device pointer
660 * @vm: vm to add to LRU
661 *
662 * Add the allocated page table to the LRU list (cayman+).
663 *
664 * Global mutex must be locked!
665 */
666void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
667{
668 list_del_init(&vm->list);
603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 669 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
605 &rdev->ring_tmp_bo.bo->tbo.mem);
606} 670}
607 671
608/** 672/**
@@ -793,20 +857,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
793 } 857 }
794 858
795 mutex_lock(&vm->mutex); 859 mutex_lock(&vm->mutex);
796 if (last_pfn > vm->last_pfn) {
797 /* release mutex and lock in right order */
798 mutex_unlock(&vm->mutex);
799 mutex_lock(&rdev->vm_manager.lock);
800 mutex_lock(&vm->mutex);
801 /* and check again */
802 if (last_pfn > vm->last_pfn) {
803 /* grow va space 32M by 32M */
804 unsigned align = ((32 << 20) >> 12) - 1;
805 radeon_vm_free_pt(rdev, vm);
806 vm->last_pfn = (last_pfn + align) & ~align;
807 }
808 mutex_unlock(&rdev->vm_manager.lock);
809 }
810 head = &vm->va; 860 head = &vm->va;
811 last_offset = 0; 861 last_offset = 0;
812 list_for_each_entry(tmp, &vm->va, vm_list) { 862 list_for_each_entry(tmp, &vm->va, vm_list) {
@@ -865,6 +915,155 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
865} 915}
866 916
867/** 917/**
918 * radeon_vm_update_pdes - make sure that page directory is valid
919 *
920 * @rdev: radeon_device pointer
921 * @vm: requested vm
922 * @start: start of GPU address range
923 * @end: end of GPU address range
924 *
925 * Allocates new page tables if necessary
926 * and updates the page directory (cayman+).
927 * Returns 0 for success, error for failure.
928 *
929 * Global and local mutex must be locked!
930 */
931static int radeon_vm_update_pdes(struct radeon_device *rdev,
932 struct radeon_vm *vm,
933 uint64_t start, uint64_t end)
934{
935 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
936
937 uint64_t last_pde = ~0, last_pt = ~0;
938 unsigned count = 0;
939 uint64_t pt_idx;
940 int r;
941
942 start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
943 end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
944
945 /* walk over the address space and update the page directory */
946 for (pt_idx = start; pt_idx <= end; ++pt_idx) {
947 uint64_t pde, pt;
948
949 if (vm->page_tables[pt_idx])
950 continue;
951
952retry:
953 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
954 &vm->page_tables[pt_idx],
955 RADEON_VM_PTE_COUNT * 8,
956 RADEON_GPU_PAGE_SIZE, false);
957
958 if (r == -ENOMEM) {
959 r = radeon_vm_evict(rdev, vm);
960 if (r)
961 return r;
962 goto retry;
963 } else if (r) {
964 return r;
965 }
966
967 pde = vm->pd_gpu_addr + pt_idx * 8;
968
969 pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
970
971 if (((last_pde + 8 * count) != pde) ||
972 ((last_pt + incr * count) != pt)) {
973
974 if (count) {
975 radeon_asic_vm_set_page(rdev, last_pde,
976 last_pt, count, incr,
977 RADEON_VM_PAGE_VALID);
978 }
979
980 count = 1;
981 last_pde = pde;
982 last_pt = pt;
983 } else {
984 ++count;
985 }
986 }
987
988 if (count) {
989 radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
990 incr, RADEON_VM_PAGE_VALID);
991
992 }
993
994 return 0;
995}
996
997/**
998 * radeon_vm_update_ptes - make sure that page tables are valid
999 *
1000 * @rdev: radeon_device pointer
1001 * @vm: requested vm
1002 * @start: start of GPU address range
1003 * @end: end of GPU address range
1004 * @dst: destination address to map to
1005 * @flags: mapping flags
1006 *
1007 * Update the page tables in the range @start - @end (cayman+).
1008 *
1009 * Global and local mutex must be locked!
1010 */
1011static void radeon_vm_update_ptes(struct radeon_device *rdev,
1012 struct radeon_vm *vm,
1013 uint64_t start, uint64_t end,
1014 uint64_t dst, uint32_t flags)
1015{
1016 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
1017
1018 uint64_t last_pte = ~0, last_dst = ~0;
1019 unsigned count = 0;
1020 uint64_t addr;
1021
1022 start = start / RADEON_GPU_PAGE_SIZE;
1023 end = end / RADEON_GPU_PAGE_SIZE;
1024
1025 /* walk over the address space and update the page tables */
1026 for (addr = start; addr < end; ) {
1027 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
1028 unsigned nptes;
1029 uint64_t pte;
1030
1031 if ((addr & ~mask) == (end & ~mask))
1032 nptes = end - addr;
1033 else
1034 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
1035
1036 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1037 pte += (addr & mask) * 8;
1038
1039 if (((last_pte + 8 * count) != pte) ||
1040 ((count + nptes) > 1 << 11)) {
1041
1042 if (count) {
1043 radeon_asic_vm_set_page(rdev, last_pte,
1044 last_dst, count,
1045 RADEON_GPU_PAGE_SIZE,
1046 flags);
1047 }
1048
1049 count = nptes;
1050 last_pte = pte;
1051 last_dst = dst;
1052 } else {
1053 count += nptes;
1054 }
1055
1056 addr += nptes;
1057 dst += nptes * RADEON_GPU_PAGE_SIZE;
1058 }
1059
1060 if (count) {
1061 radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
1062 RADEON_GPU_PAGE_SIZE, flags);
1063 }
1064}
1065
1066/**
868 * radeon_vm_bo_update_pte - map a bo into the vm page table 1067 * radeon_vm_bo_update_pte - map a bo into the vm page table
869 * 1068 *
870 * @rdev: radeon_device pointer 1069 * @rdev: radeon_device pointer
@@ -887,12 +1086,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
887 struct radeon_semaphore *sem = NULL; 1086 struct radeon_semaphore *sem = NULL;
888 struct radeon_bo_va *bo_va; 1087 struct radeon_bo_va *bo_va;
889 unsigned nptes, npdes, ndw; 1088 unsigned nptes, npdes, ndw;
890 uint64_t pe, addr; 1089 uint64_t addr;
891 uint64_t pfn;
892 int r; 1090 int r;
893 1091
894 /* nothing to do if vm isn't bound */ 1092 /* nothing to do if vm isn't bound */
895 if (vm->sa_bo == NULL) 1093 if (vm->page_directory == NULL)
896 return 0; 1094 return 0;
897 1095
898 bo_va = radeon_vm_bo_find(vm, bo); 1096 bo_va = radeon_vm_bo_find(vm, bo);
@@ -939,25 +1137,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
939 } 1137 }
940 } 1138 }
941 1139
942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
944 ndw = 32;
945
946 nptes = radeon_bo_ngpu_pages(bo); 1140 nptes = radeon_bo_ngpu_pages(bo);
947 1141
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); 1142 /* assume two extra pdes in case the mapping overlaps the borders */
1143 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
949 1144
950 /* handle cases where a bo spans several pdes */ 1145 /* estimate number of dw needed */
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - 1146 /* semaphore, fence and padding */
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; 1147 ndw = 32;
1148
1149 if (RADEON_VM_BLOCK_SIZE > 11)
1150 /* reserve space for one header for every 2k dwords */
1151 ndw += (nptes >> 11) * 3;
1152 else
1153 /* reserve space for one header for
1154 every (1 << BLOCK_SIZE) entries */
1155 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
953 1156
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */ 1157 /* reserve space for pte addresses */
957 ndw += nptes * 2; 1158 ndw += nptes * 2;
958 1159
959 /* reserve space for one header for every 2k dwords */ 1160 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3; 1161 ndw += (npdes >> 11) * 3;
1162
961 /* reserve space for pde addresses */ 1163 /* reserve space for pde addresses */
962 ndw += npdes * 2; 1164 ndw += npdes * 2;
963 1165
@@ -971,22 +1173,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
971 radeon_fence_note_sync(vm->fence, ridx); 1173 radeon_fence_note_sync(vm->fence, ridx);
972 } 1174 }
973 1175
974 /* update page table entries */ 1176 r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
975 pe = vm->pd_gpu_addr; 1177 if (r) {
976 pe += radeon_vm_directory_size(rdev); 1178 radeon_ring_unlock_undo(rdev, ring);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; 1179 return r;
978 1180 }
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987 1181
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes, 1182 radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); 1183 addr, bo_va->flags);
990 1184
991 radeon_fence_unref(&vm->fence); 1185 radeon_fence_unref(&vm->fence);
992 r = radeon_fence_emit(rdev, &vm->fence, ridx); 1186 r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -997,6 +1191,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
997 radeon_ring_unlock_commit(rdev, ring); 1191 radeon_ring_unlock_commit(rdev, ring);
998 radeon_semaphore_free(rdev, &sem, vm->fence); 1192 radeon_semaphore_free(rdev, &sem, vm->fence);
999 radeon_fence_unref(&vm->last_flush); 1193 radeon_fence_unref(&vm->last_flush);
1194
1000 return 0; 1195 return 0;
1001} 1196}
1002 1197
@@ -1056,31 +1251,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1056 * @rdev: radeon_device pointer 1251 * @rdev: radeon_device pointer
1057 * @vm: requested vm 1252 * @vm: requested vm
1058 * 1253 *
1059 * Init @vm (cayman+). 1254 * Init @vm fields (cayman+).
1060 * Map the IB pool and any other shared objects into the VM
1061 * by default as it's used by all VMs.
1062 * Returns 0 for success, error for failure.
1063 */ 1255 */
1064int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1256void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1065{ 1257{
1066 struct radeon_bo_va *bo_va;
1067 int r;
1068
1069 vm->id = 0; 1258 vm->id = 0;
1070 vm->fence = NULL; 1259 vm->fence = NULL;
1071 vm->last_pfn = 0;
1072 mutex_init(&vm->mutex); 1260 mutex_init(&vm->mutex);
1073 INIT_LIST_HEAD(&vm->list); 1261 INIT_LIST_HEAD(&vm->list);
1074 INIT_LIST_HEAD(&vm->va); 1262 INIT_LIST_HEAD(&vm->va);
1075
1076 /* map the ib pool buffer at 0 in virtual address space, set
1077 * read only
1078 */
1079 bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
1080 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1081 RADEON_VM_PAGE_READABLE |
1082 RADEON_VM_PAGE_SNOOPED);
1083 return r;
1084} 1263}
1085 1264
1086/** 1265/**
@@ -1102,17 +1281,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1102 radeon_vm_free_pt(rdev, vm); 1281 radeon_vm_free_pt(rdev, vm);
1103 mutex_unlock(&rdev->vm_manager.lock); 1282 mutex_unlock(&rdev->vm_manager.lock);
1104 1283
1105 /* remove all bo at this point non are busy any more because unbind
1106 * waited for the last vm fence to signal
1107 */
1108 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1109 if (!r) {
1110 bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
1111 list_del_init(&bo_va->bo_list);
1112 list_del_init(&bo_va->vm_list);
1113 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1114 kfree(bo_va);
1115 }
1116 if (!list_empty(&vm->va)) { 1284 if (!list_empty(&vm->va)) {
1117 dev_err(rdev->dev, "still active bo inside vm\n"); 1285 dev_err(rdev->dev, "still active bo inside vm\n");
1118 } 1286 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 83b8d8aa71c0..dc781c49b96b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
419 /* new gpu have virtual address space support */ 419 /* new gpu have virtual address space support */
420 if (rdev->family >= CHIP_CAYMAN) { 420 if (rdev->family >= CHIP_CAYMAN) {
421 struct radeon_fpriv *fpriv; 421 struct radeon_fpriv *fpriv;
422 struct radeon_bo_va *bo_va;
422 int r; 423 int r;
423 424
424 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 425 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
426 return -ENOMEM; 427 return -ENOMEM;
427 } 428 }
428 429
429 r = radeon_vm_init(rdev, &fpriv->vm); 430 radeon_vm_init(rdev, &fpriv->vm);
431
432 /* map the ib pool buffer read only into
433 * virtual address space */
434 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
435 rdev->ring_tmp_bo.bo);
436 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
437 RADEON_VM_PAGE_READABLE |
438 RADEON_VM_PAGE_SNOOPED);
430 if (r) { 439 if (r) {
431 radeon_vm_fini(rdev, &fpriv->vm); 440 radeon_vm_fini(rdev, &fpriv->vm);
432 kfree(fpriv); 441 kfree(fpriv);
@@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
454 /* new gpu have virtual address space support */ 463 /* new gpu have virtual address space support */
455 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 464 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
456 struct radeon_fpriv *fpriv = file_priv->driver_priv; 465 struct radeon_fpriv *fpriv = file_priv->driver_priv;
466 struct radeon_bo_va *bo_va;
467 int r;
468
469 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
470 if (!r) {
471 bo_va = radeon_vm_bo_find(&fpriv->vm,
472 rdev->ring_tmp_bo.bo);
473 if (bo_va)
474 radeon_vm_bo_rmv(rdev, bo_va);
475 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
476 }
457 477
458 radeon_vm_fini(rdev, &fpriv->vm); 478 radeon_vm_fini(rdev, &fpriv->vm);
459 kfree(fpriv); 479 kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 92487e614778..a13ad9d707cf 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
269 .disable = radeon_legacy_encoder_disable, 269 .disable = radeon_legacy_encoder_disable,
270}; 270};
271 271
272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
273
274static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
275{
276 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
277 uint8_t level;
278
279 /* Convert brightness to hardware level */
280 if (bd->props.brightness < 0)
281 level = 0;
282 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
283 level = RADEON_MAX_BL_LEVEL;
284 else
285 level = bd->props.brightness;
286
287 if (pdata->negative)
288 level = RADEON_MAX_BL_LEVEL - level;
289
290 return level;
291}
292
293u8 272u8
294radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) 273radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
295{ 274{
@@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve
331 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); 310 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
332} 311}
333 312
313#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
314
315static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
316{
317 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
318 uint8_t level;
319
320 /* Convert brightness to hardware level */
321 if (bd->props.brightness < 0)
322 level = 0;
323 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
324 level = RADEON_MAX_BL_LEVEL;
325 else
326 level = bd->props.brightness;
327
328 if (pdata->negative)
329 level = RADEON_MAX_BL_LEVEL - level;
330
331 return level;
332}
333
334static int radeon_legacy_backlight_update_status(struct backlight_device *bd) 334static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
335{ 335{
336 struct radeon_backlight_privdata *pdata = bl_get_data(bd); 336 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -991,11 +991,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
991static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) 991static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
992{ 992{
993 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 993 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
994 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; 994 /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
995 if (tmds) {
996 if (tmds->i2c_bus)
997 radeon_i2c_destroy(tmds->i2c_bus);
998 }
999 kfree(radeon_encoder->enc_priv); 995 kfree(radeon_encoder->enc_priv);
1000 drm_encoder_cleanup(encoder); 996 drm_encoder_cleanup(encoder);
1001 kfree(radeon_encoder); 997 kfree(radeon_encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bba66902c83b..47634f27f2e5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
305{ 305{
306#if DRM_DEBUG_CODE 306#if DRM_DEBUG_CODE
307 if (ring->count_dw <= 0) { 307 if (ring->count_dw <= 0) {
308 DRM_ERROR("radeon: writting more dword to ring than expected !\n"); 308 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
309 } 309 }
310#endif 310#endif
311 ring->ring[ring->wptr++] = v; 311 ring->ring[ring->wptr++] = v;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f79633a036c3..df8dd7701643 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2407 WREG32(0x15DC, 0); 2407 WREG32(0x15DC, 0);
2408 2408
2409 /* empty context1-15 */ 2409 /* empty context1-15 */
2410 /* FIXME start with 4G, once using 2 level pt switch to full
2411 * vm size space
2412 */
2413 /* set vm size, must be a multiple of 4 */ 2410 /* set vm size, must be a multiple of 4 */
2414 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 2411 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2415 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); 2412 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2413 /* Assign the pt base to something valid for now; the pts used for
2414 * the VMs are determined by the application and setup and assigned
2415 * on the fly in the vm part of radeon_gart.c
2416 */
2416 for (i = 1; i < 16; i++) { 2417 for (i = 1; i < 16; i++) {
2417 if (i < 8) 2418 if (i < 8)
2418 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 2419 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),