aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/fscache/page.c14
6 files changed, 61 insertions, 50 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f245c588ae95..ce7914c4c044 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -262,6 +262,7 @@ enum intel_pch {
262}; 262};
263 263
264#define QUIRK_PIPEA_FORCE (1<<0) 264#define QUIRK_PIPEA_FORCE (1<<0)
265#define QUIRK_LVDS_SSC_DISABLE (1<<1)
265 266
266struct intel_fbdev; 267struct intel_fbdev;
267 268
@@ -1194,7 +1195,9 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
1194void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1195void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1195 1196
1196uint32_t 1197uint32_t
1197i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); 1198i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1199 uint32_t size,
1200 int tiling_mode);
1198 1201
1199/* i915_gem_gtt.c */ 1202/* i915_gem_gtt.c */
1200void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1203void i915_gem_restore_gtt_mappings(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c0d1247f453..a087e1bf0c2f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1374,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1374} 1374}
1375 1375
1376static uint32_t 1376static uint32_t
1377i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) 1377i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1378{ 1378{
1379 struct drm_device *dev = obj->base.dev; 1379 uint32_t gtt_size;
1380 uint32_t size;
1381 1380
1382 if (INTEL_INFO(dev)->gen >= 4 || 1381 if (INTEL_INFO(dev)->gen >= 4 ||
1383 obj->tiling_mode == I915_TILING_NONE) 1382 tiling_mode == I915_TILING_NONE)
1384 return obj->base.size; 1383 return size;
1385 1384
1386 /* Previous chips need a power-of-two fence region when tiling */ 1385 /* Previous chips need a power-of-two fence region when tiling */
1387 if (INTEL_INFO(dev)->gen == 3) 1386 if (INTEL_INFO(dev)->gen == 3)
1388 size = 1024*1024; 1387 gtt_size = 1024*1024;
1389 else 1388 else
1390 size = 512*1024; 1389 gtt_size = 512*1024;
1391 1390
1392 while (size < obj->base.size) 1391 while (gtt_size < size)
1393 size <<= 1; 1392 gtt_size <<= 1;
1394 1393
1395 return size; 1394 return gtt_size;
1396} 1395}
1397 1396
1398/** 1397/**
@@ -1403,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1403 * potential fence register mapping. 1402 * potential fence register mapping.
1404 */ 1403 */
1405static uint32_t 1404static uint32_t
1406i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) 1405i915_gem_get_gtt_alignment(struct drm_device *dev,
1406 uint32_t size,
1407 int tiling_mode)
1407{ 1408{
1408 struct drm_device *dev = obj->base.dev;
1409
1410 /* 1409 /*
1411 * Minimum alignment is 4k (GTT page size), but might be greater 1410 * Minimum alignment is 4k (GTT page size), but might be greater
1412 * if a fence register is needed for the object. 1411 * if a fence register is needed for the object.
1413 */ 1412 */
1414 if (INTEL_INFO(dev)->gen >= 4 || 1413 if (INTEL_INFO(dev)->gen >= 4 ||
1415 obj->tiling_mode == I915_TILING_NONE) 1414 tiling_mode == I915_TILING_NONE)
1416 return 4096; 1415 return 4096;
1417 1416
1418 /* 1417 /*
1419 * Previous chips need to be aligned to the size of the smallest 1418 * Previous chips need to be aligned to the size of the smallest
1420 * fence register that can contain the object. 1419 * fence register that can contain the object.
1421 */ 1420 */
1422 return i915_gem_get_gtt_size(obj); 1421 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1423} 1422}
1424 1423
1425/** 1424/**
1426 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an 1425 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1427 * unfenced object 1426 * unfenced object
1428 * @obj: object to check 1427 * @dev: the device
1428 * @size: size of the object
1429 * @tiling_mode: tiling mode of the object
1429 * 1430 *
1430 * Return the required GTT alignment for an object, only taking into account 1431 * Return the required GTT alignment for an object, only taking into account
1431 * unfenced tiled surface requirements. 1432 * unfenced tiled surface requirements.
1432 */ 1433 */
1433uint32_t 1434uint32_t
1434i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1435i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1436 uint32_t size,
1437 int tiling_mode)
1435{ 1438{
1436 struct drm_device *dev = obj->base.dev;
1437 int tile_height;
1438
1439 /* 1439 /*
1440 * Minimum alignment is 4k (GTT page size) for sane hw. 1440 * Minimum alignment is 4k (GTT page size) for sane hw.
1441 */ 1441 */
1442 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || 1442 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1443 obj->tiling_mode == I915_TILING_NONE) 1443 tiling_mode == I915_TILING_NONE)
1444 return 4096; 1444 return 4096;
1445 1445
1446 /* 1446 /* Previous hardware however needs to be aligned to a power-of-two
1447 * Older chips need unfenced tiled buffers to be aligned to the left 1447 * tile height. The simplest method for determining this is to reuse
1448 * edge of an even tile row (where tile rows are counted as if the bo is 1448 * the power-of-tile object size.
1449 * placed in a fenced gtt region).
1450 */ 1449 */
1451 if (IS_GEN2(dev)) 1450 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1452 tile_height = 16;
1453 else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1454 tile_height = 32;
1455 else
1456 tile_height = 8;
1457
1458 return tile_height * obj->stride * 2;
1459} 1451}
1460 1452
1461int 1453int
@@ -2744,9 +2736,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2744 return -EINVAL; 2736 return -EINVAL;
2745 } 2737 }
2746 2738
2747 fence_size = i915_gem_get_gtt_size(obj); 2739 fence_size = i915_gem_get_gtt_size(dev,
2748 fence_alignment = i915_gem_get_gtt_alignment(obj); 2740 obj->base.size,
2749 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); 2741 obj->tiling_mode);
2742 fence_alignment = i915_gem_get_gtt_alignment(dev,
2743 obj->base.size,
2744 obj->tiling_mode);
2745 unfenced_alignment =
2746 i915_gem_get_unfenced_gtt_alignment(dev,
2747 obj->base.size,
2748 obj->tiling_mode);
2750 2749
2751 if (alignment == 0) 2750 if (alignment == 0)
2752 alignment = map_and_fenceable ? fence_alignment : 2751 alignment = map_and_fenceable ? fence_alignment :
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 82d70fd9e933..99c4faa59d8f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -348,7 +348,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
348 /* Rebind if we need a change of alignment */ 348 /* Rebind if we need a change of alignment */
349 if (!obj->map_and_fenceable) { 349 if (!obj->map_and_fenceable) {
350 u32 unfenced_alignment = 350 u32 unfenced_alignment =
351 i915_gem_get_unfenced_gtt_alignment(obj); 351 i915_gem_get_unfenced_gtt_alignment(dev,
352 obj->base.size,
353 args->tiling_mode);
352 if (obj->gtt_offset & (unfenced_alignment - 1)) 354 if (obj->gtt_offset & (unfenced_alignment - 1))
353 ret = i915_gem_object_unbind(obj); 355 ret = i915_gem_object_unbind(obj);
354 } 356 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 21b6f93fe919..0f1c799afea1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4305,7 +4305,8 @@ static void intel_update_watermarks(struct drm_device *dev)
4305 4305
4306static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4306static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4307{ 4307{
4308 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4308 return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4309 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4309} 4310}
4310 4311
4311static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4312static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
@@ -7810,6 +7811,15 @@ static void quirk_pipea_force (struct drm_device *dev)
7810 DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); 7811 DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7811} 7812}
7812 7813
7814/*
7815 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7816 */
7817static void quirk_ssc_force_disable(struct drm_device *dev)
7818{
7819 struct drm_i915_private *dev_priv = dev->dev_private;
7820 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
7821}
7822
7813struct intel_quirk { 7823struct intel_quirk {
7814 int device; 7824 int device;
7815 int subsystem_vendor; 7825 int subsystem_vendor;
@@ -7838,6 +7848,9 @@ struct intel_quirk intel_quirks[] = {
7838 /* 855 & before need to leave pipe A & dpll A up */ 7848 /* 855 & before need to leave pipe A & dpll A up */
7839 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7849 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7840 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7850 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7851
7852 /* Lenovo U160 cannot use SSC on LVDS */
7853 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
7841}; 7854};
7842 7855
7843static void intel_init_quirks(struct drm_device *dev) 7856static void intel_init_quirks(struct drm_device *dev)
diff --git a/fs/dcache.c b/fs/dcache.c
index 6e4ea6d87774..fbdcbca40725 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1813,8 +1813,6 @@ seqretry:
1813 tname = dentry->d_name.name; 1813 tname = dentry->d_name.name;
1814 i = dentry->d_inode; 1814 i = dentry->d_inode;
1815 prefetch(tname); 1815 prefetch(tname);
1816 if (i)
1817 prefetch(i);
1818 /* 1816 /*
1819 * This seqcount check is required to ensure name and 1817 * This seqcount check is required to ensure name and
1820 * len are loaded atomically, so as not to walk off the 1818 * len are loaded atomically, so as not to walk off the
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2f343b4d7a7d..3f7a59bfa7ad 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -976,16 +976,12 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
976 976
977 pagevec_init(&pvec, 0); 977 pagevec_init(&pvec, 0);
978 next = 0; 978 next = 0;
979 while (next <= (loff_t)-1 && 979 do {
980 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE) 980 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
981 ) { 981 break;
982 for (i = 0; i < pagevec_count(&pvec); i++) { 982 for (i = 0; i < pagevec_count(&pvec); i++) {
983 struct page *page = pvec.pages[i]; 983 struct page *page = pvec.pages[i];
984 pgoff_t page_index = page->index; 984 next = page->index;
985
986 ASSERTCMP(page_index, >=, next);
987 next = page_index + 1;
988
989 if (PageFsCache(page)) { 985 if (PageFsCache(page)) {
990 __fscache_wait_on_page_write(cookie, page); 986 __fscache_wait_on_page_write(cookie, page);
991 __fscache_uncache_page(cookie, page); 987 __fscache_uncache_page(cookie, page);
@@ -993,7 +989,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
993 } 989 }
994 pagevec_release(&pvec); 990 pagevec_release(&pvec);
995 cond_resched(); 991 cond_resched();
996 } 992 } while (++next);
997 993
998 _leave(""); 994 _leave("");
999} 995}