aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_dma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c360
1 files changed, 109 insertions, 251 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dd2c93ebfa3..7a26f4dd21ae 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43 43#include <acpi/video.h>
44extern int intel_max_stolen; /* from AGP driver */
45 44
46/** 45/**
47 * Sets up the hardware status page for devices that need a physical address 46 * Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
64 63
65 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
66 65
67 if (IS_I965G(dev)) 66 if (INTEL_INFO(dev)->gen >= 4)
68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
69 0xf0; 68 0xf0;
70 69
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
133 132
134 mutex_lock(&dev->struct_mutex); 133 mutex_lock(&dev->struct_mutex);
135 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 134 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
136 if (HAS_BSD(dev)) 135 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
137 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 136 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
138 mutex_unlock(&dev->struct_mutex); 137 mutex_unlock(&dev->struct_mutex);
139 138
140 /* Clear the HWS virtual address at teardown */ 139 /* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
223 ring->status_page.page_addr); 222 ring->status_page.page_addr);
224 if (ring->status_page.gfx_addr != 0) 223 if (ring->status_page.gfx_addr != 0)
225 ring->setup_status_page(dev, ring); 224 intel_ring_setup_status_page(dev, ring);
226 else 225 else
227 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 226 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
228 227
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
377 return -EINVAL; 376 return -EINVAL;
378 } 377 }
379 378
380 if (IS_I965G(dev)) { 379 if (INTEL_INFO(dev)->gen >= 4) {
381 BEGIN_LP_RING(4); 380 BEGIN_LP_RING(4);
382 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 381 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
383 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 382 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
481 480
482 if (!IS_I830(dev) && !IS_845G(dev)) { 481 if (!IS_I830(dev) && !IS_845G(dev)) {
483 BEGIN_LP_RING(2); 482 BEGIN_LP_RING(2);
484 if (IS_I965G(dev)) { 483 if (INTEL_INFO(dev)->gen >= 4) {
485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
486 OUT_RING(batch->start); 485 OUT_RING(batch->start);
487 } else { 486 } else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
500 } 499 }
501 500
502 501
503 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 502 if (IS_G4X(dev) || IS_GEN5(dev)) {
504 BEGIN_LP_RING(2); 503 BEGIN_LP_RING(2);
505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
506 OUT_RING(MI_NOOP); 505 OUT_RING(MI_NOOP);
@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
765 case I915_PARAM_HAS_BSD: 764 case I915_PARAM_HAS_BSD:
766 value = HAS_BSD(dev); 765 value = HAS_BSD(dev);
767 break; 766 break;
767 case I915_PARAM_HAS_BLT:
768 value = HAS_BLT(dev);
769 break;
768 default: 770 default:
769 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 771 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
770 param->param); 772 param->param);
@@ -888,12 +890,12 @@ static int
888intel_alloc_mchbar_resource(struct drm_device *dev) 890intel_alloc_mchbar_resource(struct drm_device *dev)
889{ 891{
890 drm_i915_private_t *dev_priv = dev->dev_private; 892 drm_i915_private_t *dev_priv = dev->dev_private;
891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 893 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
892 u32 temp_lo, temp_hi = 0; 894 u32 temp_lo, temp_hi = 0;
893 u64 mchbar_addr; 895 u64 mchbar_addr;
894 int ret; 896 int ret;
895 897
896 if (IS_I965G(dev)) 898 if (INTEL_INFO(dev)->gen >= 4)
897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 899 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
898 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 900 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
899 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 901 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
920 return ret; 922 return ret;
921 } 923 }
922 924
923 if (IS_I965G(dev)) 925 if (INTEL_INFO(dev)->gen >= 4)
924 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 926 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
925 upper_32_bits(dev_priv->mch_res.start)); 927 upper_32_bits(dev_priv->mch_res.start));
926 928
@@ -934,7 +936,7 @@ static void
934intel_setup_mchbar(struct drm_device *dev) 936intel_setup_mchbar(struct drm_device *dev)
935{ 937{
936 drm_i915_private_t *dev_priv = dev->dev_private; 938 drm_i915_private_t *dev_priv = dev->dev_private;
937 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 939 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
938 u32 temp; 940 u32 temp;
939 bool enabled; 941 bool enabled;
940 942
@@ -971,7 +973,7 @@ static void
971intel_teardown_mchbar(struct drm_device *dev) 973intel_teardown_mchbar(struct drm_device *dev)
972{ 974{
973 drm_i915_private_t *dev_priv = dev->dev_private; 975 drm_i915_private_t *dev_priv = dev->dev_private;
974 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 976 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
975 u32 temp; 977 u32 temp;
976 978
977 if (dev_priv->mchbar_need_disable) { 979 if (dev_priv->mchbar_need_disable) {
@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
990 release_resource(&dev_priv->mch_res); 992 release_resource(&dev_priv->mch_res);
991} 993}
992 994
993/**
994 * i915_probe_agp - get AGP bootup configuration
995 * @pdev: PCI device
996 * @aperture_size: returns AGP aperture configured size
997 * @preallocated_size: returns size of BIOS preallocated AGP space
998 *
999 * Since Intel integrated graphics are UMA, the BIOS has to set aside
1000 * some RAM for the framebuffer at early boot. This code figures out
1001 * how much was set aside so we can use it for our own purposes.
1002 */
1003static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
1004 uint32_t *preallocated_size,
1005 uint32_t *start)
1006{
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u16 tmp = 0;
1009 unsigned long overhead;
1010 unsigned long stolen;
1011
1012 /* Get the fb aperture size and "stolen" memory amount. */
1013 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
1014
1015 *aperture_size = 1024 * 1024;
1016 *preallocated_size = 1024 * 1024;
1017
1018 switch (dev->pdev->device) {
1019 case PCI_DEVICE_ID_INTEL_82830_CGC:
1020 case PCI_DEVICE_ID_INTEL_82845G_IG:
1021 case PCI_DEVICE_ID_INTEL_82855GM_IG:
1022 case PCI_DEVICE_ID_INTEL_82865_IG:
1023 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
1024 *aperture_size *= 64;
1025 else
1026 *aperture_size *= 128;
1027 break;
1028 default:
1029 /* 9xx supports large sizes, just look at the length */
1030 *aperture_size = pci_resource_len(dev->pdev, 2);
1031 break;
1032 }
1033
1034 /*
1035 * Some of the preallocated space is taken by the GTT
1036 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
1037 */
1038 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
1039 overhead = 4096;
1040 else
1041 overhead = (*aperture_size / 1024) + 4096;
1042
1043 if (IS_GEN6(dev)) {
1044 /* SNB has memory control reg at 0x50.w */
1045 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
1046
1047 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
1048 case INTEL_855_GMCH_GMS_DISABLED:
1049 DRM_ERROR("video memory is disabled\n");
1050 return -1;
1051 case SNB_GMCH_GMS_STOLEN_32M:
1052 stolen = 32 * 1024 * 1024;
1053 break;
1054 case SNB_GMCH_GMS_STOLEN_64M:
1055 stolen = 64 * 1024 * 1024;
1056 break;
1057 case SNB_GMCH_GMS_STOLEN_96M:
1058 stolen = 96 * 1024 * 1024;
1059 break;
1060 case SNB_GMCH_GMS_STOLEN_128M:
1061 stolen = 128 * 1024 * 1024;
1062 break;
1063 case SNB_GMCH_GMS_STOLEN_160M:
1064 stolen = 160 * 1024 * 1024;
1065 break;
1066 case SNB_GMCH_GMS_STOLEN_192M:
1067 stolen = 192 * 1024 * 1024;
1068 break;
1069 case SNB_GMCH_GMS_STOLEN_224M:
1070 stolen = 224 * 1024 * 1024;
1071 break;
1072 case SNB_GMCH_GMS_STOLEN_256M:
1073 stolen = 256 * 1024 * 1024;
1074 break;
1075 case SNB_GMCH_GMS_STOLEN_288M:
1076 stolen = 288 * 1024 * 1024;
1077 break;
1078 case SNB_GMCH_GMS_STOLEN_320M:
1079 stolen = 320 * 1024 * 1024;
1080 break;
1081 case SNB_GMCH_GMS_STOLEN_352M:
1082 stolen = 352 * 1024 * 1024;
1083 break;
1084 case SNB_GMCH_GMS_STOLEN_384M:
1085 stolen = 384 * 1024 * 1024;
1086 break;
1087 case SNB_GMCH_GMS_STOLEN_416M:
1088 stolen = 416 * 1024 * 1024;
1089 break;
1090 case SNB_GMCH_GMS_STOLEN_448M:
1091 stolen = 448 * 1024 * 1024;
1092 break;
1093 case SNB_GMCH_GMS_STOLEN_480M:
1094 stolen = 480 * 1024 * 1024;
1095 break;
1096 case SNB_GMCH_GMS_STOLEN_512M:
1097 stolen = 512 * 1024 * 1024;
1098 break;
1099 default:
1100 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1101 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1102 return -1;
1103 }
1104 } else {
1105 switch (tmp & INTEL_GMCH_GMS_MASK) {
1106 case INTEL_855_GMCH_GMS_DISABLED:
1107 DRM_ERROR("video memory is disabled\n");
1108 return -1;
1109 case INTEL_855_GMCH_GMS_STOLEN_1M:
1110 stolen = 1 * 1024 * 1024;
1111 break;
1112 case INTEL_855_GMCH_GMS_STOLEN_4M:
1113 stolen = 4 * 1024 * 1024;
1114 break;
1115 case INTEL_855_GMCH_GMS_STOLEN_8M:
1116 stolen = 8 * 1024 * 1024;
1117 break;
1118 case INTEL_855_GMCH_GMS_STOLEN_16M:
1119 stolen = 16 * 1024 * 1024;
1120 break;
1121 case INTEL_855_GMCH_GMS_STOLEN_32M:
1122 stolen = 32 * 1024 * 1024;
1123 break;
1124 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1125 stolen = 48 * 1024 * 1024;
1126 break;
1127 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1128 stolen = 64 * 1024 * 1024;
1129 break;
1130 case INTEL_GMCH_GMS_STOLEN_128M:
1131 stolen = 128 * 1024 * 1024;
1132 break;
1133 case INTEL_GMCH_GMS_STOLEN_256M:
1134 stolen = 256 * 1024 * 1024;
1135 break;
1136 case INTEL_GMCH_GMS_STOLEN_96M:
1137 stolen = 96 * 1024 * 1024;
1138 break;
1139 case INTEL_GMCH_GMS_STOLEN_160M:
1140 stolen = 160 * 1024 * 1024;
1141 break;
1142 case INTEL_GMCH_GMS_STOLEN_224M:
1143 stolen = 224 * 1024 * 1024;
1144 break;
1145 case INTEL_GMCH_GMS_STOLEN_352M:
1146 stolen = 352 * 1024 * 1024;
1147 break;
1148 default:
1149 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1150 tmp & INTEL_GMCH_GMS_MASK);
1151 return -1;
1152 }
1153 }
1154
1155 *preallocated_size = stolen - overhead;
1156 *start = overhead;
1157
1158 return 0;
1159}
1160
1161#define PTE_ADDRESS_MASK 0xfffff000 995#define PTE_ADDRESS_MASK 0xfffff000
1162#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 996#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1163#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 997#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1181{ 1015{
1182 unsigned long *gtt; 1016 unsigned long *gtt;
1183 unsigned long entry, phys; 1017 unsigned long entry, phys;
1184 int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1018 int gtt_bar = IS_GEN2(dev) ? 1 : 0;
1185 int gtt_offset, gtt_size; 1019 int gtt_offset, gtt_size;
1186 1020
1187 if (IS_I965G(dev)) { 1021 if (INTEL_INFO(dev)->gen >= 4) {
1188 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1022 if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
1189 gtt_offset = 2*1024*1024; 1023 gtt_offset = 2*1024*1024;
1190 gtt_size = 2*1024*1024; 1024 gtt_size = 2*1024*1024;
1191 } else { 1025 } else {
@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1210 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1044 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1211 1045
1212 /* Mask out these reserved bits on this hardware. */ 1046 /* Mask out these reserved bits on this hardware. */
1213 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1047 if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
1214 IS_I945G(dev) || IS_I945GM(dev)) {
1215 entry &= ~PTE_ADDRESS_MASK_HIGH; 1048 entry &= ~PTE_ADDRESS_MASK_HIGH;
1216 }
1217 1049
1218 /* If it's not a mapping type we know, then bail. */ 1050 /* If it's not a mapping type we know, then bail. */
1219 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1051 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1252 unsigned long ll_base = 0; 1084 unsigned long ll_base = 0;
1253 1085
1254 /* Leave 1M for line length buffer & misc. */ 1086 /* Leave 1M for line length buffer & misc. */
1255 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1087 compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
1256 if (!compressed_fb) { 1088 if (!compressed_fb) {
1257 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1089 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1258 i915_warn_stolen(dev); 1090 i915_warn_stolen(dev);
@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1273 } 1105 }
1274 1106
1275 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1107 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1276 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1108 compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
1277 4096, 0); 1109 4096, 0);
1278 if (!compressed_llb) { 1110 if (!compressed_llb) {
1279 i915_warn_stolen(dev); 1111 i915_warn_stolen(dev);
@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1343 /* i915 resume handler doesn't set to D0 */ 1175 /* i915 resume handler doesn't set to D0 */
1344 pci_set_power_state(dev->pdev, PCI_D0); 1176 pci_set_power_state(dev->pdev, PCI_D0);
1345 i915_resume(dev); 1177 i915_resume(dev);
1346 drm_kms_helper_poll_enable(dev);
1347 } else { 1178 } else {
1348 printk(KERN_ERR "i915: switched off\n"); 1179 printk(KERN_ERR "i915: switched off\n");
1349 drm_kms_helper_poll_disable(dev);
1350 i915_suspend(dev, pmm); 1180 i915_suspend(dev, pmm);
1351 } 1181 }
1352} 1182}
@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1363} 1193}
1364 1194
1365static int i915_load_modeset_init(struct drm_device *dev, 1195static int i915_load_modeset_init(struct drm_device *dev,
1366 unsigned long prealloc_start,
1367 unsigned long prealloc_size, 1196 unsigned long prealloc_size,
1368 unsigned long agp_size) 1197 unsigned long agp_size)
1369{ 1198{
1370 struct drm_i915_private *dev_priv = dev->dev_private; 1199 struct drm_i915_private *dev_priv = dev->dev_private;
1371 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1372 int ret = 0; 1200 int ret = 0;
1373 1201
1374 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 1202 /* Basic memrange allocator for stolen space (aka mm.vram) */
1375 0xff000000; 1203 drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
1376
1377 /* Basic memrange allocator for stolen space (aka vram) */
1378 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1379 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1380
1381 /* We're off and running w/KMS */
1382 dev_priv->mm.suspended = 0;
1383 1204
1384 /* Let GEM Manage from end of prealloc space to end of aperture. 1205 /* Let GEM Manage from end of prealloc space to end of aperture.
1385 * 1206 *
@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1414 */ 1235 */
1415 dev_priv->allow_batchbuffer = 1; 1236 dev_priv->allow_batchbuffer = 1;
1416 1237
1417 ret = intel_init_bios(dev); 1238 ret = intel_parse_bios(dev);
1418 if (ret) 1239 if (ret)
1419 DRM_INFO("failed to find VBIOS tables\n"); 1240 DRM_INFO("failed to find VBIOS tables\n");
1420 1241
@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1423 if (ret) 1244 if (ret)
1424 goto cleanup_ringbuffer; 1245 goto cleanup_ringbuffer;
1425 1246
1247 intel_register_dsm_handler();
1248
1426 ret = vga_switcheroo_register_client(dev->pdev, 1249 ret = vga_switcheroo_register_client(dev->pdev,
1427 i915_switcheroo_set_state, 1250 i915_switcheroo_set_state,
1428 i915_switcheroo_can_switch); 1251 i915_switcheroo_can_switch);
@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
1443 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1266 /* FIXME: do pre/post-mode set stuff in core KMS code */
1444 dev->vblank_disable_allowed = 1; 1267 dev->vblank_disable_allowed = 1;
1445 1268
1446 /*
1447 * Initialize the hardware status page IRQ location.
1448 */
1449
1450 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1451
1452 ret = intel_fbdev_init(dev); 1269 ret = intel_fbdev_init(dev);
1453 if (ret) 1270 if (ret)
1454 goto cleanup_irq; 1271 goto cleanup_irq;
1455 1272
1456 drm_kms_helper_poll_init(dev); 1273 drm_kms_helper_poll_init(dev);
1274
1275 /* We're off and running w/KMS */
1276 dev_priv->mm.suspended = 0;
1277
1457 return 0; 1278 return 0;
1458 1279
1459cleanup_irq: 1280cleanup_irq:
@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
1907 * - dev_priv->fmax 1728 * - dev_priv->fmax
1908 * - dev_priv->gpu_busy 1729 * - dev_priv->gpu_busy
1909 */ 1730 */
1910DEFINE_SPINLOCK(mchdev_lock); 1731static DEFINE_SPINLOCK(mchdev_lock);
1911 1732
1912/** 1733/**
1913 * i915_read_mch_val - return value for IPS use 1734 * i915_read_mch_val - return value for IPS use
@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2062 struct drm_i915_private *dev_priv; 1883 struct drm_i915_private *dev_priv;
2063 resource_size_t base, size; 1884 resource_size_t base, size;
2064 int ret = 0, mmio_bar; 1885 int ret = 0, mmio_bar;
2065 uint32_t agp_size, prealloc_size, prealloc_start; 1886 uint32_t agp_size, prealloc_size;
2066 /* i915 has 4 more counters */ 1887 /* i915 has 4 more counters */
2067 dev->counters += 4; 1888 dev->counters += 4;
2068 dev->types[6] = _DRM_STAT_IRQ; 1889 dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2079 dev_priv->info = (struct intel_device_info *) flags; 1900 dev_priv->info = (struct intel_device_info *) flags;
2080 1901
2081 /* Add register map (needed for suspend/resume) */ 1902 /* Add register map (needed for suspend/resume) */
2082 mmio_bar = IS_I9XX(dev) ? 0 : 1; 1903 mmio_bar = IS_GEN2(dev) ? 1 : 0;
2083 base = pci_resource_start(dev->pdev, mmio_bar); 1904 base = pci_resource_start(dev->pdev, mmio_bar);
2084 size = pci_resource_len(dev->pdev, mmio_bar); 1905 size = pci_resource_len(dev->pdev, mmio_bar);
2085 1906
@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2121 "performance may suffer.\n"); 1942 "performance may suffer.\n");
2122 } 1943 }
2123 1944
2124 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 1945 dev_priv->mm.gtt = intel_gtt_get();
2125 if (ret) 1946 if (!dev_priv->mm.gtt) {
1947 DRM_ERROR("Failed to initialize GTT\n");
1948 ret = -ENODEV;
2126 goto out_iomapfree; 1949 goto out_iomapfree;
2127
2128 if (prealloc_size > intel_max_stolen) {
2129 DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
2130 prealloc_size >> 20, intel_max_stolen >> 20);
2131 prealloc_size = intel_max_stolen;
2132 } 1950 }
2133 1951
2134 dev_priv->wq = create_singlethread_workqueue("i915"); 1952 prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1953 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1954
1955 /* The i915 workqueue is primarily used for batched retirement of
1956 * requests (and thus managing bo) once the task has been completed
1957 * by the GPU. i915_gem_retire_requests() is called directly when we
1958 * need high-priority retirement, such as waiting for an explicit
1959 * bo.
1960 *
1961 * It is also used for periodic low-priority events, such as
1962 * idle-timers and hangcheck.
1963 *
1964 * All tasks on the workqueue are expected to acquire the dev mutex
1965 * so there is no point in running more than one instance of the
1966 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1967 */
1968 dev_priv->wq = alloc_workqueue("i915",
1969 WQ_UNBOUND | WQ_NON_REENTRANT,
1970 1);
2135 if (dev_priv->wq == NULL) { 1971 if (dev_priv->wq == NULL) {
2136 DRM_ERROR("Failed to create our workqueue.\n"); 1972 DRM_ERROR("Failed to create our workqueue.\n");
2137 ret = -ENOMEM; 1973 ret = -ENOMEM;
@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2159 1995
2160 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1996 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2161 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1997 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2162 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1998 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
2163 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1999 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2164 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2000 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2165 } 2001 }
2166 2002
2167 /* Try to make sure MCHBAR is enabled before poking at it */ 2003 /* Try to make sure MCHBAR is enabled before poking at it */
2168 intel_setup_mchbar(dev); 2004 intel_setup_mchbar(dev);
2005 intel_setup_gmbus(dev);
2006 intel_opregion_setup(dev);
2007
2008 /* Make sure the bios did its job and set up vital registers */
2009 intel_setup_bios(dev);
2169 2010
2170 i915_gem_load(dev); 2011 i915_gem_load(dev);
2171 2012
@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2178 2019
2179 if (IS_PINEVIEW(dev)) 2020 if (IS_PINEVIEW(dev))
2180 i915_pineview_get_mem_freq(dev); 2021 i915_pineview_get_mem_freq(dev);
2181 else if (IS_IRONLAKE(dev)) 2022 else if (IS_GEN5(dev))
2182 i915_ironlake_get_mem_freq(dev); 2023 i915_ironlake_get_mem_freq(dev);
2183 2024
2184 /* On the 945G/GM, the chipset reports the MSI capability on the 2025 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2212 intel_detect_pch(dev); 2053 intel_detect_pch(dev);
2213 2054
2214 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2055 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2215 ret = i915_load_modeset_init(dev, prealloc_start, 2056 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
2216 prealloc_size, agp_size);
2217 if (ret < 0) { 2057 if (ret < 0) {
2218 DRM_ERROR("failed to init modeset\n"); 2058 DRM_ERROR("failed to init modeset\n");
2219 goto out_workqueue_free; 2059 goto out_workqueue_free;
@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2221 } 2061 }
2222 2062
2223 /* Must be done after probing outputs */ 2063 /* Must be done after probing outputs */
2224 intel_opregion_init(dev, 0); 2064 intel_opregion_init(dev);
2065 acpi_video_register();
2225 2066
2226 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2067 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2227 (unsigned long) dev); 2068 (unsigned long) dev);
@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2231 dev_priv->mchdev_lock = &mchdev_lock; 2072 dev_priv->mchdev_lock = &mchdev_lock;
2232 spin_unlock(&mchdev_lock); 2073 spin_unlock(&mchdev_lock);
2233 2074
2234 /* XXX Prevent module unload due to memory corruption bugs. */
2235 __module_get(THIS_MODULE);
2236
2237 return 0; 2075 return 0;
2238 2076
2239out_workqueue_free: 2077out_workqueue_free:
@@ -2252,15 +2090,20 @@ free_priv:
2252int i915_driver_unload(struct drm_device *dev) 2090int i915_driver_unload(struct drm_device *dev)
2253{ 2091{
2254 struct drm_i915_private *dev_priv = dev->dev_private; 2092 struct drm_i915_private *dev_priv = dev->dev_private;
2255 2093 int ret;
2256 i915_destroy_error_state(dev);
2257 2094
2258 spin_lock(&mchdev_lock); 2095 spin_lock(&mchdev_lock);
2259 i915_mch_dev = NULL; 2096 i915_mch_dev = NULL;
2260 spin_unlock(&mchdev_lock); 2097 spin_unlock(&mchdev_lock);
2261 2098
2262 destroy_workqueue(dev_priv->wq); 2099 mutex_lock(&dev->struct_mutex);
2263 del_timer_sync(&dev_priv->hangcheck_timer); 2100 ret = i915_gpu_idle(dev);
2101 if (ret)
2102 DRM_ERROR("failed to idle hardware: %d\n", ret);
2103 mutex_unlock(&dev->struct_mutex);
2104
2105 /* Cancel the retire work handler, which should be idle now. */
2106 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2264 2107
2265 io_mapping_free(dev_priv->mm.gtt_mapping); 2108 io_mapping_free(dev_priv->mm.gtt_mapping);
2266 if (dev_priv->mm.gtt_mtrr >= 0) { 2109 if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
2269 dev_priv->mm.gtt_mtrr = -1; 2112 dev_priv->mm.gtt_mtrr = -1;
2270 } 2113 }
2271 2114
2115 acpi_video_unregister();
2116
2272 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2117 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2118 intel_fbdev_fini(dev);
2273 intel_modeset_cleanup(dev); 2119 intel_modeset_cleanup(dev);
2274 2120
2275 /* 2121 /*
@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
2281 dev_priv->child_dev = NULL; 2127 dev_priv->child_dev = NULL;
2282 dev_priv->child_dev_num = 0; 2128 dev_priv->child_dev_num = 0;
2283 } 2129 }
2284 drm_irq_uninstall(dev); 2130
2285 vga_switcheroo_unregister_client(dev->pdev); 2131 vga_switcheroo_unregister_client(dev->pdev);
2286 vga_client_register(dev->pdev, NULL, NULL, NULL); 2132 vga_client_register(dev->pdev, NULL, NULL, NULL);
2287 } 2133 }
2288 2134
2135 /* Free error state after interrupts are fully disabled. */
2136 del_timer_sync(&dev_priv->hangcheck_timer);
2137 cancel_work_sync(&dev_priv->error_work);
2138 i915_destroy_error_state(dev);
2139
2289 if (dev->pdev->msi_enabled) 2140 if (dev->pdev->msi_enabled)
2290 pci_disable_msi(dev->pdev); 2141 pci_disable_msi(dev->pdev);
2291 2142
2292 if (dev_priv->regs != NULL) 2143 intel_opregion_fini(dev);
2293 iounmap(dev_priv->regs);
2294
2295 intel_opregion_free(dev, 0);
2296 2144
2297 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2145 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2146 /* Flush any outstanding unpin_work. */
2147 flush_workqueue(dev_priv->wq);
2148
2298 i915_gem_free_all_phys_object(dev); 2149 i915_gem_free_all_phys_object(dev);
2299 2150
2300 mutex_lock(&dev->struct_mutex); 2151 mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
2302 mutex_unlock(&dev->struct_mutex); 2153 mutex_unlock(&dev->struct_mutex);
2303 if (I915_HAS_FBC(dev) && i915_powersave) 2154 if (I915_HAS_FBC(dev) && i915_powersave)
2304 i915_cleanup_compression(dev); 2155 i915_cleanup_compression(dev);
2305 drm_mm_takedown(&dev_priv->vram); 2156 drm_mm_takedown(&dev_priv->mm.vram);
2306 i915_gem_lastclose(dev);
2307 2157
2308 intel_cleanup_overlay(dev); 2158 intel_cleanup_overlay(dev);
2159
2160 if (!I915_NEED_GFX_HWS(dev))
2161 i915_free_hws(dev);
2309 } 2162 }
2310 2163
2164 if (dev_priv->regs != NULL)
2165 iounmap(dev_priv->regs);
2166
2167 intel_teardown_gmbus(dev);
2311 intel_teardown_mchbar(dev); 2168 intel_teardown_mchbar(dev);
2312 2169
2170 destroy_workqueue(dev_priv->wq);
2171
2313 pci_dev_put(dev_priv->bridge_dev); 2172 pci_dev_put(dev_priv->bridge_dev);
2314 kfree(dev->dev_private); 2173 kfree(dev->dev_private);
2315 2174
2316 return 0; 2175 return 0;
2317} 2176}
2318 2177
2319int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 2178int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2320{ 2179{
2321 struct drm_i915_file_private *i915_file_priv; 2180 struct drm_i915_file_private *file_priv;
2322 2181
2323 DRM_DEBUG_DRIVER("\n"); 2182 DRM_DEBUG_DRIVER("\n");
2324 i915_file_priv = (struct drm_i915_file_private *) 2183 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
2325 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 2184 if (!file_priv)
2326
2327 if (!i915_file_priv)
2328 return -ENOMEM; 2185 return -ENOMEM;
2329 2186
2330 file_priv->driver_priv = i915_file_priv; 2187 file->driver_priv = file_priv;
2331 2188
2332 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 2189 spin_lock_init(&file_priv->mm.lock);
2190 INIT_LIST_HEAD(&file_priv->mm.request_list);
2333 2191
2334 return 0; 2192 return 0;
2335} 2193}
@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2372 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2230 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
2373} 2231}
2374 2232
2375void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 2233void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2376{ 2234{
2377 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2378 2236
2379 kfree(i915_file_priv); 2237 kfree(file_priv);
2380} 2238}
2381 2239
2382struct drm_ioctl_desc i915_ioctls[] = { 2240struct drm_ioctl_desc i915_ioctls[] = {