diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 04:38:28 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 04:38:28 -0500 |
commit | 8b6f08eaef16dfcfebc32fa9a017bf70336ad9ec (patch) | |
tree | 9f29f39de67b85baad5eca7d7165549a166c4367 /drivers | |
parent | 4ae26f46c98f58ef19ad34f475617b40740d2faa (diff) | |
parent | 8a453cac94803910305f7e95cbd157b6bbd88811 (diff) |
Merge branch 'sh/alphaproject' into sh-latest
Diffstat (limited to 'drivers')
32 files changed, 1742 insertions, 948 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 4b9359a6f6ca..83c32cb72582 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -464,6 +464,7 @@ config XEN_BLKDEV_FRONTEND | |||
464 | tristate "Xen virtual block device support" | 464 | tristate "Xen virtual block device support" |
465 | depends on XEN | 465 | depends on XEN |
466 | default y | 466 | default y |
467 | select XEN_XENBUS_FRONTEND | ||
467 | help | 468 | help |
468 | This driver implements the front-end of the Xen virtual | 469 | This driver implements the front-end of the Xen virtual |
469 | block device driver. It communicates with a back-end driver | 470 | block device driver. It communicates with a back-end driver |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 010e3defd6c3..c195bfeade11 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -94,6 +94,8 @@ | |||
94 | #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) | 94 | #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) |
95 | #define G4x_GMCH_SIZE_VT_2M (0xc << 8) | 95 | #define G4x_GMCH_SIZE_VT_2M (0xc << 8) |
96 | 96 | ||
97 | #define GFX_FLSH_CNTL 0x2170 /* 915+ */ | ||
98 | |||
97 | #define I810_DRAM_CTL 0x3000 | 99 | #define I810_DRAM_CTL 0x3000 |
98 | #define I810_DRAM_ROW_0 0x00000001 | 100 | #define I810_DRAM_ROW_0 0x00000001 |
99 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | 101 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 356f73e0d17e..e921b693412b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -688,14 +688,14 @@ static int intel_gtt_init(void) | |||
688 | 688 | ||
689 | intel_private.base.stolen_size = intel_gtt_stolen_size(); | 689 | intel_private.base.stolen_size = intel_gtt_stolen_size(); |
690 | 690 | ||
691 | intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; | ||
692 | |||
691 | ret = intel_gtt_setup_scratch_page(); | 693 | ret = intel_gtt_setup_scratch_page(); |
692 | if (ret != 0) { | 694 | if (ret != 0) { |
693 | intel_gtt_cleanup(); | 695 | intel_gtt_cleanup(); |
694 | return ret; | 696 | return ret; |
695 | } | 697 | } |
696 | 698 | ||
697 | intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; | ||
698 | |||
699 | return 0; | 699 | return 0; |
700 | } | 700 | } |
701 | 701 | ||
@@ -814,6 +814,12 @@ static bool intel_enable_gtt(void) | |||
814 | } | 814 | } |
815 | } | 815 | } |
816 | 816 | ||
817 | /* On the resume path we may be adjusting the PGTBL value, so | ||
818 | * be paranoid and flush all chipset write buffers... | ||
819 | */ | ||
820 | if (INTEL_GTT_GEN >= 3) | ||
821 | writel(0, intel_private.registers+GFX_FLSH_CNTL); | ||
822 | |||
817 | reg = intel_private.registers+I810_PGETBL_CTL; | 823 | reg = intel_private.registers+I810_PGETBL_CTL; |
818 | writel(intel_private.PGETBL_save, reg); | 824 | writel(intel_private.PGETBL_save, reg); |
819 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { | 825 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { |
@@ -823,6 +829,9 @@ static bool intel_enable_gtt(void) | |||
823 | return false; | 829 | return false; |
824 | } | 830 | } |
825 | 831 | ||
832 | if (INTEL_GTT_GEN >= 3) | ||
833 | writel(0, intel_private.registers+GFX_FLSH_CNTL); | ||
834 | |||
826 | return true; | 835 | return true; |
827 | } | 836 | } |
828 | 837 | ||
@@ -991,14 +1000,14 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, | |||
991 | if (mem->page_count == 0) | 1000 | if (mem->page_count == 0) |
992 | return 0; | 1001 | return 0; |
993 | 1002 | ||
1003 | intel_gtt_clear_range(pg_start, mem->page_count); | ||
1004 | |||
994 | if (intel_private.base.needs_dmar) { | 1005 | if (intel_private.base.needs_dmar) { |
995 | intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); | 1006 | intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); |
996 | mem->sg_list = NULL; | 1007 | mem->sg_list = NULL; |
997 | mem->num_sg = 0; | 1008 | mem->num_sg = 0; |
998 | } | 1009 | } |
999 | 1010 | ||
1000 | intel_gtt_clear_range(pg_start, mem->page_count); | ||
1001 | |||
1002 | return 0; | 1011 | return 0; |
1003 | } | 1012 | } |
1004 | 1013 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 92f75782c332..19a3d58044dd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) | |||
106 | } | 106 | } |
107 | } | 107 | } |
108 | 108 | ||
109 | static const char *agp_type_str(int type) | ||
110 | { | ||
111 | switch (type) { | ||
112 | case 0: return " uncached"; | ||
113 | case 1: return " snooped"; | ||
114 | default: return ""; | ||
115 | } | ||
116 | } | ||
117 | |||
109 | static void | 118 | static void |
110 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 119 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
111 | { | 120 | { |
112 | seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", | 121 | seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", |
113 | &obj->base, | 122 | &obj->base, |
114 | get_pin_flag(obj), | 123 | get_pin_flag(obj), |
115 | get_tiling_flag(obj), | 124 | get_tiling_flag(obj), |
@@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
118 | obj->base.write_domain, | 127 | obj->base.write_domain, |
119 | obj->last_rendering_seqno, | 128 | obj->last_rendering_seqno, |
120 | obj->last_fenced_seqno, | 129 | obj->last_fenced_seqno, |
130 | agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), | ||
121 | obj->dirty ? " dirty" : "", | 131 | obj->dirty ? " dirty" : "", |
122 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | 132 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); |
123 | if (obj->base.name) | 133 | if (obj->base.name) |
@@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
276 | return 0; | 286 | return 0; |
277 | } | 287 | } |
278 | 288 | ||
289 | static int i915_gem_gtt_info(struct seq_file *m, void* data) | ||
290 | { | ||
291 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
292 | struct drm_device *dev = node->minor->dev; | ||
293 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
294 | struct drm_i915_gem_object *obj; | ||
295 | size_t total_obj_size, total_gtt_size; | ||
296 | int count, ret; | ||
297 | |||
298 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
299 | if (ret) | ||
300 | return ret; | ||
301 | |||
302 | total_obj_size = total_gtt_size = count = 0; | ||
303 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
304 | seq_printf(m, " "); | ||
305 | describe_obj(m, obj); | ||
306 | seq_printf(m, "\n"); | ||
307 | total_obj_size += obj->base.size; | ||
308 | total_gtt_size += obj->gtt_space->size; | ||
309 | count++; | ||
310 | } | ||
311 | |||
312 | mutex_unlock(&dev->struct_mutex); | ||
313 | |||
314 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | ||
315 | count, total_obj_size, total_gtt_size); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
279 | 320 | ||
280 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) | 321 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) |
281 | { | 322 | { |
@@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
456 | } | 497 | } |
457 | seq_printf(m, "Interrupts received: %d\n", | 498 | seq_printf(m, "Interrupts received: %d\n", |
458 | atomic_read(&dev_priv->irq_received)); | 499 | atomic_read(&dev_priv->irq_received)); |
459 | for (i = 0; i < I915_NUM_RINGS; i++) | 500 | for (i = 0; i < I915_NUM_RINGS; i++) { |
501 | if (IS_GEN6(dev)) { | ||
502 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", | ||
503 | dev_priv->ring[i].name, | ||
504 | I915_READ_IMR(&dev_priv->ring[i])); | ||
505 | } | ||
460 | i915_ring_seqno_info(m, &dev_priv->ring[i]); | 506 | i915_ring_seqno_info(m, &dev_priv->ring[i]); |
507 | } | ||
461 | mutex_unlock(&dev->struct_mutex); | 508 | mutex_unlock(&dev->struct_mutex); |
462 | 509 | ||
463 | return 0; | 510 | return 0; |
@@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m, | |||
656 | seq_printf(m, "%s [%d]:\n", name, count); | 703 | seq_printf(m, "%s [%d]:\n", name, count); |
657 | 704 | ||
658 | while (count--) { | 705 | while (count--) { |
659 | seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", | 706 | seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s", |
660 | err->gtt_offset, | 707 | err->gtt_offset, |
661 | err->size, | 708 | err->size, |
662 | err->read_domains, | 709 | err->read_domains, |
@@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m, | |||
666 | tiling_flag(err->tiling), | 713 | tiling_flag(err->tiling), |
667 | dirty_flag(err->dirty), | 714 | dirty_flag(err->dirty), |
668 | purgeable_flag(err->purgeable), | 715 | purgeable_flag(err->purgeable), |
669 | ring_str(err->ring)); | 716 | ring_str(err->ring), |
717 | agp_type_str(err->agp_type)); | ||
670 | 718 | ||
671 | if (err->name) | 719 | if (err->name) |
672 | seq_printf(m, " (name: %d)", err->name); | 720 | seq_printf(m, " (name: %d)", err->name); |
@@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
744 | if (error->batchbuffer[i]) { | 792 | if (error->batchbuffer[i]) { |
745 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | 793 | struct drm_i915_error_object *obj = error->batchbuffer[i]; |
746 | 794 | ||
747 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | 795 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", |
796 | dev_priv->ring[i].name, | ||
797 | obj->gtt_offset); | ||
748 | offset = 0; | 798 | offset = 0; |
749 | for (page = 0; page < obj->page_count; page++) { | 799 | for (page = 0; page < obj->page_count; page++) { |
750 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 800 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
@@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
890 | struct drm_device *dev = node->minor->dev; | 940 | struct drm_device *dev = node->minor->dev; |
891 | drm_i915_private_t *dev_priv = dev->dev_private; | 941 | drm_i915_private_t *dev_priv = dev->dev_private; |
892 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 942 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
893 | u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); | 943 | u32 rstdbyctl = I915_READ(RSTDBYCTL); |
894 | u16 crstandvid = I915_READ16(CRSTANDVID); | 944 | u16 crstandvid = I915_READ16(CRSTANDVID); |
895 | 945 | ||
896 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 946 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
@@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
913 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); | 963 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); |
914 | seq_printf(m, "Render standby enabled: %s\n", | 964 | seq_printf(m, "Render standby enabled: %s\n", |
915 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); | 965 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); |
966 | seq_printf(m, "Current RS state: "); | ||
967 | switch (rstdbyctl & RSX_STATUS_MASK) { | ||
968 | case RSX_STATUS_ON: | ||
969 | seq_printf(m, "on\n"); | ||
970 | break; | ||
971 | case RSX_STATUS_RC1: | ||
972 | seq_printf(m, "RC1\n"); | ||
973 | break; | ||
974 | case RSX_STATUS_RC1E: | ||
975 | seq_printf(m, "RC1E\n"); | ||
976 | break; | ||
977 | case RSX_STATUS_RS1: | ||
978 | seq_printf(m, "RS1\n"); | ||
979 | break; | ||
980 | case RSX_STATUS_RS2: | ||
981 | seq_printf(m, "RS2 (RC6)\n"); | ||
982 | break; | ||
983 | case RSX_STATUS_RS3: | ||
984 | seq_printf(m, "RC3 (RC6+)\n"); | ||
985 | break; | ||
986 | default: | ||
987 | seq_printf(m, "unknown\n"); | ||
988 | break; | ||
989 | } | ||
916 | 990 | ||
917 | return 0; | 991 | return 0; |
918 | } | 992 | } |
@@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
1187 | static struct drm_info_list i915_debugfs_list[] = { | 1261 | static struct drm_info_list i915_debugfs_list[] = { |
1188 | {"i915_capabilities", i915_capabilities, 0, 0}, | 1262 | {"i915_capabilities", i915_capabilities, 0, 0}, |
1189 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1263 | {"i915_gem_objects", i915_gem_object_info, 0}, |
1264 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | ||
1190 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 1265 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
1191 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 1266 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
1192 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 1267 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0568dbdc10ef..844f3c972b04 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1962 | /* enable GEM by default */ | 1962 | /* enable GEM by default */ |
1963 | dev_priv->has_gem = 1; | 1963 | dev_priv->has_gem = 1; |
1964 | 1964 | ||
1965 | if (dev_priv->has_gem == 0 && | ||
1966 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
1967 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
1968 | ret = -ENODEV; | ||
1969 | goto out_workqueue_free; | ||
1970 | } | ||
1971 | |||
1972 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 1965 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1973 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 1966 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1974 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { | 1967 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { |
@@ -2055,7 +2048,6 @@ out_gem_unload: | |||
2055 | 2048 | ||
2056 | intel_teardown_gmbus(dev); | 2049 | intel_teardown_gmbus(dev); |
2057 | intel_teardown_mchbar(dev); | 2050 | intel_teardown_mchbar(dev); |
2058 | out_workqueue_free: | ||
2059 | destroy_workqueue(dev_priv->wq); | 2051 | destroy_workqueue(dev_priv->wq); |
2060 | out_iomapfree: | 2052 | out_iomapfree: |
2061 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2053 | io_mapping_free(dev_priv->mm.gtt_mapping); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 872493331988..0de75a23f8e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600); | |||
49 | unsigned int i915_lvds_downclock = 0; | 49 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
51 | 51 | ||
52 | bool i915_try_reset = true; | ||
53 | module_param_named(reset, i915_try_reset, bool, 0600); | ||
54 | |||
52 | static struct drm_driver driver; | 55 | static struct drm_driver driver; |
53 | extern int intel_agp_enabled; | 56 | extern int intel_agp_enabled; |
54 | 57 | ||
@@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
352 | 355 | ||
353 | /* Resume the modeset for every activated CRTC */ | 356 | /* Resume the modeset for every activated CRTC */ |
354 | drm_helper_resume_force_mode(dev); | 357 | drm_helper_resume_force_mode(dev); |
358 | |||
359 | if (dev_priv->renderctx && dev_priv->pwrctx) | ||
360 | ironlake_enable_rc6(dev); | ||
355 | } | 361 | } |
356 | 362 | ||
357 | intel_opregion_init(dev); | 363 | intel_opregion_init(dev); |
@@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
475 | bool need_display = true; | 481 | bool need_display = true; |
476 | int ret; | 482 | int ret; |
477 | 483 | ||
484 | if (!i915_try_reset) | ||
485 | return 0; | ||
486 | |||
478 | if (!mutex_trylock(&dev->struct_mutex)) | 487 | if (!mutex_trylock(&dev->struct_mutex)) |
479 | return -EBUSY; | 488 | return -EBUSY; |
480 | 489 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aac1bf332f75..385fc7ec39d3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -172,20 +172,21 @@ struct drm_i915_error_state { | |||
172 | int page_count; | 172 | int page_count; |
173 | u32 gtt_offset; | 173 | u32 gtt_offset; |
174 | u32 *pages[0]; | 174 | u32 *pages[0]; |
175 | } *ringbuffer, *batchbuffer[2]; | 175 | } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; |
176 | struct drm_i915_error_buffer { | 176 | struct drm_i915_error_buffer { |
177 | size_t size; | 177 | u32 size; |
178 | u32 name; | 178 | u32 name; |
179 | u32 seqno; | 179 | u32 seqno; |
180 | u32 gtt_offset; | 180 | u32 gtt_offset; |
181 | u32 read_domains; | 181 | u32 read_domains; |
182 | u32 write_domain; | 182 | u32 write_domain; |
183 | u32 fence_reg; | 183 | s32 fence_reg:5; |
184 | s32 pinned:2; | 184 | s32 pinned:2; |
185 | u32 tiling:2; | 185 | u32 tiling:2; |
186 | u32 dirty:1; | 186 | u32 dirty:1; |
187 | u32 purgeable:1; | 187 | u32 purgeable:1; |
188 | u32 ring:4; | 188 | u32 ring:4; |
189 | u32 agp_type:1; | ||
189 | } *active_bo, *pinned_bo; | 190 | } *active_bo, *pinned_bo; |
190 | u32 active_bo_count, pinned_bo_count; | 191 | u32 active_bo_count, pinned_bo_count; |
191 | struct intel_overlay_error_state *overlay; | 192 | struct intel_overlay_error_state *overlay; |
@@ -332,6 +333,7 @@ typedef struct drm_i915_private { | |||
332 | 333 | ||
333 | /* LVDS info */ | 334 | /* LVDS info */ |
334 | int backlight_level; /* restore backlight to this value */ | 335 | int backlight_level; /* restore backlight to this value */ |
336 | bool backlight_enabled; | ||
335 | struct drm_display_mode *panel_fixed_mode; | 337 | struct drm_display_mode *panel_fixed_mode; |
336 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 338 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
337 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 339 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
@@ -794,6 +796,7 @@ struct drm_i915_gem_object { | |||
794 | */ | 796 | */ |
795 | struct hlist_node exec_node; | 797 | struct hlist_node exec_node; |
796 | unsigned long exec_handle; | 798 | unsigned long exec_handle; |
799 | struct drm_i915_gem_exec_object2 *exec_entry; | ||
797 | 800 | ||
798 | /** | 801 | /** |
799 | * Current offset of the object in GTT space. | 802 | * Current offset of the object in GTT space. |
@@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | |||
1006 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | 1009 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); |
1007 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 1010 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
1008 | struct drm_file *file_priv); | 1011 | struct drm_file *file_priv); |
1009 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
1010 | extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
1011 | extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, | ||
1012 | u32 mask); | ||
1013 | extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, | ||
1014 | u32 mask); | ||
1015 | 1012 | ||
1016 | void | 1013 | void |
1017 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1014 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
@@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1091 | struct drm_file *file_priv); | 1088 | struct drm_file *file_priv); |
1092 | void i915_gem_load(struct drm_device *dev); | 1089 | void i915_gem_load(struct drm_device *dev); |
1093 | int i915_gem_init_object(struct drm_gem_object *obj); | 1090 | int i915_gem_init_object(struct drm_gem_object *obj); |
1094 | void i915_gem_flush_ring(struct drm_device *dev, | 1091 | int __must_check i915_gem_flush_ring(struct drm_device *dev, |
1095 | struct intel_ring_buffer *ring, | 1092 | struct intel_ring_buffer *ring, |
1096 | uint32_t invalidate_domains, | 1093 | uint32_t invalidate_domains, |
1097 | uint32_t flush_domains); | 1094 | uint32_t flush_domains); |
1098 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1095 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1099 | size_t size); | 1096 | size_t size); |
1100 | void i915_gem_free_object(struct drm_gem_object *obj); | 1097 | void i915_gem_free_object(struct drm_gem_object *obj); |
@@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev); | |||
1265 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1262 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
1266 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1263 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1267 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1264 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1265 | extern void ironlake_enable_rc6(struct drm_device *dev); | ||
1268 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | 1266 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1269 | extern void intel_detect_pch (struct drm_device *dev); | 1267 | extern void intel_detect_pch (struct drm_device *dev); |
1270 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1268 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c79c0b62ef60..3dfc848ff755 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,18 +35,18 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | 38 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
41 | static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, | 41 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
42 | bool write); | 42 | bool write); |
43 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | 43 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
44 | uint64_t offset, | 44 | uint64_t offset, |
45 | uint64_t size); | 45 | uint64_t size); |
46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); | 46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
47 | static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 47 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
48 | unsigned alignment, | 48 | unsigned alignment, |
49 | bool map_and_fenceable); | 49 | bool map_and_fenceable); |
50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, | 50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
51 | struct drm_i915_fence_reg *reg); | 51 | struct drm_i915_fence_reg *reg); |
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, | 52 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1935 | { | 1935 | { |
1936 | drm_i915_private_t *dev_priv; | 1936 | drm_i915_private_t *dev_priv; |
1937 | struct drm_device *dev; | 1937 | struct drm_device *dev; |
1938 | bool idle; | ||
1939 | int i; | ||
1938 | 1940 | ||
1939 | dev_priv = container_of(work, drm_i915_private_t, | 1941 | dev_priv = container_of(work, drm_i915_private_t, |
1940 | mm.retire_work.work); | 1942 | mm.retire_work.work); |
@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1948 | 1950 | ||
1949 | i915_gem_retire_requests(dev); | 1951 | i915_gem_retire_requests(dev); |
1950 | 1952 | ||
1951 | if (!dev_priv->mm.suspended && | 1953 | /* Send a periodic flush down the ring so we don't hold onto GEM |
1952 | (!list_empty(&dev_priv->ring[RCS].request_list) || | 1954 | * objects indefinitely. |
1953 | !list_empty(&dev_priv->ring[VCS].request_list) || | 1955 | */ |
1954 | !list_empty(&dev_priv->ring[BCS].request_list))) | 1956 | idle = true; |
1957 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
1958 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; | ||
1959 | |||
1960 | if (!list_empty(&ring->gpu_write_list)) { | ||
1961 | struct drm_i915_gem_request *request; | ||
1962 | int ret; | ||
1963 | |||
1964 | ret = i915_gem_flush_ring(dev, ring, 0, | ||
1965 | I915_GEM_GPU_DOMAINS); | ||
1966 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
1967 | if (ret || request == NULL || | ||
1968 | i915_add_request(dev, NULL, request, ring)) | ||
1969 | kfree(request); | ||
1970 | } | ||
1971 | |||
1972 | idle &= list_empty(&ring->request_list); | ||
1973 | } | ||
1974 | |||
1975 | if (!dev_priv->mm.suspended && !idle) | ||
1955 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1976 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1977 | |||
1956 | mutex_unlock(&dev->struct_mutex); | 1978 | mutex_unlock(&dev->struct_mutex); |
1957 | } | 1979 | } |
1958 | 1980 | ||
@@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2142 | return ret; | 2164 | return ret; |
2143 | } | 2165 | } |
2144 | 2166 | ||
2145 | void | 2167 | int |
2146 | i915_gem_flush_ring(struct drm_device *dev, | 2168 | i915_gem_flush_ring(struct drm_device *dev, |
2147 | struct intel_ring_buffer *ring, | 2169 | struct intel_ring_buffer *ring, |
2148 | uint32_t invalidate_domains, | 2170 | uint32_t invalidate_domains, |
2149 | uint32_t flush_domains) | 2171 | uint32_t flush_domains) |
2150 | { | 2172 | { |
2151 | ring->flush(ring, invalidate_domains, flush_domains); | 2173 | int ret; |
2174 | |||
2175 | ret = ring->flush(ring, invalidate_domains, flush_domains); | ||
2176 | if (ret) | ||
2177 | return ret; | ||
2178 | |||
2152 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2179 | i915_gem_process_flushing_list(dev, flush_domains, ring); |
2180 | return 0; | ||
2153 | } | 2181 | } |
2154 | 2182 | ||
2155 | static int i915_ring_idle(struct drm_device *dev, | 2183 | static int i915_ring_idle(struct drm_device *dev, |
2156 | struct intel_ring_buffer *ring) | 2184 | struct intel_ring_buffer *ring) |
2157 | { | 2185 | { |
2186 | int ret; | ||
2187 | |||
2158 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2188 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2159 | return 0; | 2189 | return 0; |
2160 | 2190 | ||
2161 | if (!list_empty(&ring->gpu_write_list)) | 2191 | if (!list_empty(&ring->gpu_write_list)) { |
2162 | i915_gem_flush_ring(dev, ring, | 2192 | ret = i915_gem_flush_ring(dev, ring, |
2163 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2193 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2194 | if (ret) | ||
2195 | return ret; | ||
2196 | } | ||
2197 | |||
2164 | return i915_wait_request(dev, | 2198 | return i915_wait_request(dev, |
2165 | i915_gem_next_request_seqno(dev, ring), | 2199 | i915_gem_next_request_seqno(dev, ring), |
2166 | ring); | 2200 | ring); |
@@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2370 | int ret; | 2404 | int ret; |
2371 | 2405 | ||
2372 | if (obj->fenced_gpu_access) { | 2406 | if (obj->fenced_gpu_access) { |
2373 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2407 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2374 | i915_gem_flush_ring(obj->base.dev, | 2408 | ret = i915_gem_flush_ring(obj->base.dev, |
2375 | obj->last_fenced_ring, | 2409 | obj->last_fenced_ring, |
2376 | 0, obj->base.write_domain); | 2410 | 0, obj->base.write_domain); |
2411 | if (ret) | ||
2412 | return ret; | ||
2413 | } | ||
2377 | 2414 | ||
2378 | obj->fenced_gpu_access = false; | 2415 | obj->fenced_gpu_access = false; |
2379 | } | 2416 | } |
@@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2393 | obj->last_fenced_ring = NULL; | 2430 | obj->last_fenced_ring = NULL; |
2394 | } | 2431 | } |
2395 | 2432 | ||
2433 | /* Ensure that all CPU reads are completed before installing a fence | ||
2434 | * and all writes before removing the fence. | ||
2435 | */ | ||
2436 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) | ||
2437 | mb(); | ||
2438 | |||
2396 | return 0; | 2439 | return 0; |
2397 | } | 2440 | } |
2398 | 2441 | ||
@@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2523 | return ret; | 2566 | return ret; |
2524 | } else if (obj->tiling_changed) { | 2567 | } else if (obj->tiling_changed) { |
2525 | if (obj->fenced_gpu_access) { | 2568 | if (obj->fenced_gpu_access) { |
2526 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 2569 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2527 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 2570 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2528 | 0, obj->base.write_domain); | 2571 | 0, obj->base.write_domain); |
2572 | if (ret) | ||
2573 | return ret; | ||
2574 | } | ||
2529 | 2575 | ||
2530 | obj->fenced_gpu_access = false; | 2576 | obj->fenced_gpu_access = false; |
2531 | } | 2577 | } |
@@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2736 | obj->gtt_space = NULL; | 2782 | obj->gtt_space = NULL; |
2737 | 2783 | ||
2738 | if (ret == -ENOMEM) { | 2784 | if (ret == -ENOMEM) { |
2739 | /* first try to clear up some space from the GTT */ | 2785 | /* first try to reclaim some memory by clearing the GTT */ |
2740 | ret = i915_gem_evict_something(dev, size, | 2786 | ret = i915_gem_evict_everything(dev, false); |
2741 | alignment, | ||
2742 | map_and_fenceable); | ||
2743 | if (ret) { | 2787 | if (ret) { |
2744 | /* now try to shrink everyone else */ | 2788 | /* now try to shrink everyone else */ |
2745 | if (gfpmask) { | 2789 | if (gfpmask) { |
@@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2747 | goto search_free; | 2791 | goto search_free; |
2748 | } | 2792 | } |
2749 | 2793 | ||
2750 | return ret; | 2794 | return -ENOMEM; |
2751 | } | 2795 | } |
2752 | 2796 | ||
2753 | goto search_free; | 2797 | goto search_free; |
@@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2762 | drm_mm_put_block(obj->gtt_space); | 2806 | drm_mm_put_block(obj->gtt_space); |
2763 | obj->gtt_space = NULL; | 2807 | obj->gtt_space = NULL; |
2764 | 2808 | ||
2765 | ret = i915_gem_evict_something(dev, size, | 2809 | if (i915_gem_evict_everything(dev, false)) |
2766 | alignment, map_and_fenceable); | ||
2767 | if (ret) | ||
2768 | return ret; | 2810 | return ret; |
2769 | 2811 | ||
2770 | goto search_free; | 2812 | goto search_free; |
@@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2811 | } | 2853 | } |
2812 | 2854 | ||
2813 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2855 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2814 | static void | 2856 | static int |
2815 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | 2857 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2816 | { | 2858 | { |
2817 | struct drm_device *dev = obj->base.dev; | 2859 | struct drm_device *dev = obj->base.dev; |
2818 | 2860 | ||
2819 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2861 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2820 | return; | 2862 | return 0; |
2821 | 2863 | ||
2822 | /* Queue the GPU write cache flushing we need. */ | 2864 | /* Queue the GPU write cache flushing we need. */ |
2823 | i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); | 2865 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2824 | BUG_ON(obj->base.write_domain); | ||
2825 | } | 2866 | } |
2826 | 2867 | ||
2827 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2868 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | |||
2833 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) | 2874 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2834 | return; | 2875 | return; |
2835 | 2876 | ||
2836 | /* No actual flushing is required for the GTT write domain. Writes | 2877 | /* No actual flushing is required for the GTT write domain. Writes |
2837 | * to it immediately go to main memory as far as we know, so there's | 2878 | * to it immediately go to main memory as far as we know, so there's |
2838 | * no chipset flush. It also doesn't land in render cache. | 2879 | * no chipset flush. It also doesn't land in render cache. |
2880 | * | ||
2881 | * However, we do have to enforce the order so that all writes through | ||
2882 | * the GTT land before any writes to the device, such as updates to | ||
2883 | * the GATT itself. | ||
2839 | */ | 2884 | */ |
2885 | wmb(); | ||
2886 | |||
2840 | i915_gem_release_mmap(obj); | 2887 | i915_gem_release_mmap(obj); |
2841 | 2888 | ||
2842 | old_write_domain = obj->base.write_domain; | 2889 | old_write_domain = obj->base.write_domain; |
@@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2882 | if (obj->gtt_space == NULL) | 2929 | if (obj->gtt_space == NULL) |
2883 | return -EINVAL; | 2930 | return -EINVAL; |
2884 | 2931 | ||
2885 | i915_gem_object_flush_gpu_write_domain(obj); | 2932 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2933 | if (ret) | ||
2934 | return ret; | ||
2935 | |||
2886 | if (obj->pending_gpu_write || write) { | 2936 | if (obj->pending_gpu_write || write) { |
2887 | ret = i915_gem_object_wait_rendering(obj, true); | 2937 | ret = i915_gem_object_wait_rendering(obj, true); |
2888 | if (ret) | 2938 | if (ret) |
@@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | |||
2927 | if (obj->gtt_space == NULL) | 2977 | if (obj->gtt_space == NULL) |
2928 | return -EINVAL; | 2978 | return -EINVAL; |
2929 | 2979 | ||
2930 | i915_gem_object_flush_gpu_write_domain(obj); | 2980 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2981 | if (ret) | ||
2982 | return ret; | ||
2983 | |||
2931 | 2984 | ||
2932 | /* Currently, we are always called from an non-interruptible context. */ | 2985 | /* Currently, we are always called from an non-interruptible context. */ |
2933 | if (pipelined != obj->ring) { | 2986 | if (pipelined != obj->ring) { |
@@ -2952,12 +3005,17 @@ int | |||
2952 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 3005 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
2953 | bool interruptible) | 3006 | bool interruptible) |
2954 | { | 3007 | { |
3008 | int ret; | ||
3009 | |||
2955 | if (!obj->active) | 3010 | if (!obj->active) |
2956 | return 0; | 3011 | return 0; |
2957 | 3012 | ||
2958 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 3013 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2959 | i915_gem_flush_ring(obj->base.dev, obj->ring, | 3014 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2960 | 0, obj->base.write_domain); | 3015 | 0, obj->base.write_domain); |
3016 | if (ret) | ||
3017 | return ret; | ||
3018 | } | ||
2961 | 3019 | ||
2962 | return i915_gem_object_wait_rendering(obj, interruptible); | 3020 | return i915_gem_object_wait_rendering(obj, interruptible); |
2963 | } | 3021 | } |
@@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
2974 | uint32_t old_write_domain, old_read_domains; | 3032 | uint32_t old_write_domain, old_read_domains; |
2975 | int ret; | 3033 | int ret; |
2976 | 3034 | ||
2977 | i915_gem_object_flush_gpu_write_domain(obj); | 3035 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3036 | if (ret) | ||
3037 | return ret; | ||
3038 | |||
2978 | ret = i915_gem_object_wait_rendering(obj, true); | 3039 | ret = i915_gem_object_wait_rendering(obj, true); |
2979 | if (ret) | 3040 | if (ret) |
2980 | return ret; | 3041 | return ret; |
@@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | |||
3069 | if (offset == 0 && size == obj->base.size) | 3130 | if (offset == 0 && size == obj->base.size) |
3070 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3131 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3071 | 3132 | ||
3072 | i915_gem_object_flush_gpu_write_domain(obj); | 3133 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3134 | if (ret) | ||
3135 | return ret; | ||
3136 | |||
3073 | ret = i915_gem_object_wait_rendering(obj, true); | 3137 | ret = i915_gem_object_wait_rendering(obj, true); |
3074 | if (ret) | 3138 | if (ret) |
3075 | return ret; | 3139 | return ret; |
@@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3362 | * flush earlier is beneficial. | 3426 | * flush earlier is beneficial. |
3363 | */ | 3427 | */ |
3364 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3428 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3365 | i915_gem_flush_ring(dev, obj->ring, | 3429 | ret = i915_gem_flush_ring(dev, obj->ring, |
3366 | 0, obj->base.write_domain); | 3430 | 0, obj->base.write_domain); |
3367 | } else if (obj->ring->outstanding_lazy_request == | 3431 | } else if (obj->ring->outstanding_lazy_request == |
3368 | obj->last_rendering_seqno) { | 3432 | obj->last_rendering_seqno) { |
3369 | struct drm_i915_gem_request *request; | 3433 | struct drm_i915_gem_request *request; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 78b8cf90c922..3d39005540aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | /* Nothing found, clean up and bail out! */ | 129 | /* Nothing found, clean up and bail out! */ |
130 | list_for_each_entry(obj, &unwind_list, exec_list) { | 130 | while (!list_empty(&unwind_list)) { |
131 | obj = list_first_entry(&unwind_list, | ||
132 | struct drm_i915_gem_object, | ||
133 | exec_list); | ||
134 | |||
131 | ret = drm_mm_scan_remove_block(obj->gtt_space); | 135 | ret = drm_mm_scan_remove_block(obj->gtt_space); |
132 | BUG_ON(ret); | 136 | BUG_ON(ret); |
137 | |||
138 | list_del_init(&obj->exec_list); | ||
133 | drm_gem_object_unreference(&obj->base); | 139 | drm_gem_object_unreference(&obj->base); |
134 | } | 140 | } |
135 | 141 | ||
@@ -162,6 +168,7 @@ found: | |||
162 | exec_list); | 168 | exec_list); |
163 | if (ret == 0) | 169 | if (ret == 0) |
164 | ret = i915_gem_object_unbind(obj); | 170 | ret = i915_gem_object_unbind(obj); |
171 | |||
165 | list_del_init(&obj->exec_list); | 172 | list_del_init(&obj->exec_list); |
166 | drm_gem_object_unreference(&obj->base); | 173 | drm_gem_object_unreference(&obj->base); |
167 | } | 174 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 61129e6759eb..e69834341ef0 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb) | |||
268 | static int | 268 | static int |
269 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | 269 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
270 | struct eb_objects *eb, | 270 | struct eb_objects *eb, |
271 | struct drm_i915_gem_exec_object2 *entry, | ||
272 | struct drm_i915_gem_relocation_entry *reloc) | 271 | struct drm_i915_gem_relocation_entry *reloc) |
273 | { | 272 | { |
274 | struct drm_device *dev = obj->base.dev; | 273 | struct drm_device *dev = obj->base.dev; |
@@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
411 | 410 | ||
412 | static int | 411 | static int |
413 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | 412 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
414 | struct eb_objects *eb, | 413 | struct eb_objects *eb) |
415 | struct drm_i915_gem_exec_object2 *entry) | ||
416 | { | 414 | { |
417 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 415 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
416 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
418 | int i, ret; | 417 | int i, ret; |
419 | 418 | ||
420 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | 419 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
@@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | |||
426 | sizeof(reloc))) | 425 | sizeof(reloc))) |
427 | return -EFAULT; | 426 | return -EFAULT; |
428 | 427 | ||
429 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); | 428 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); |
430 | if (ret) | 429 | if (ret) |
431 | return ret; | 430 | return ret; |
432 | 431 | ||
@@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | |||
442 | static int | 441 | static int |
443 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | 442 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
444 | struct eb_objects *eb, | 443 | struct eb_objects *eb, |
445 | struct drm_i915_gem_exec_object2 *entry, | ||
446 | struct drm_i915_gem_relocation_entry *relocs) | 444 | struct drm_i915_gem_relocation_entry *relocs) |
447 | { | 445 | { |
446 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
448 | int i, ret; | 447 | int i, ret; |
449 | 448 | ||
450 | for (i = 0; i < entry->relocation_count; i++) { | 449 | for (i = 0; i < entry->relocation_count; i++) { |
451 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); | 450 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); |
452 | if (ret) | 451 | if (ret) |
453 | return ret; | 452 | return ret; |
454 | } | 453 | } |
@@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | |||
459 | static int | 458 | static int |
460 | i915_gem_execbuffer_relocate(struct drm_device *dev, | 459 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
461 | struct eb_objects *eb, | 460 | struct eb_objects *eb, |
462 | struct list_head *objects, | 461 | struct list_head *objects) |
463 | struct drm_i915_gem_exec_object2 *exec) | ||
464 | { | 462 | { |
465 | struct drm_i915_gem_object *obj; | 463 | struct drm_i915_gem_object *obj; |
466 | int ret; | 464 | int ret; |
@@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
468 | list_for_each_entry(obj, objects, exec_list) { | 466 | list_for_each_entry(obj, objects, exec_list) { |
469 | obj->base.pending_read_domains = 0; | 467 | obj->base.pending_read_domains = 0; |
470 | obj->base.pending_write_domain = 0; | 468 | obj->base.pending_write_domain = 0; |
471 | ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); | 469 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
472 | if (ret) | 470 | if (ret) |
473 | return ret; | 471 | return ret; |
474 | } | 472 | } |
@@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
479 | static int | 477 | static int |
480 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 478 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
481 | struct drm_file *file, | 479 | struct drm_file *file, |
482 | struct list_head *objects, | 480 | struct list_head *objects) |
483 | struct drm_i915_gem_exec_object2 *exec) | ||
484 | { | 481 | { |
485 | struct drm_i915_gem_object *obj; | 482 | struct drm_i915_gem_object *obj; |
486 | struct drm_i915_gem_exec_object2 *entry; | ||
487 | int ret, retry; | 483 | int ret, retry; |
488 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 484 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
485 | struct list_head ordered_objects; | ||
486 | |||
487 | INIT_LIST_HEAD(&ordered_objects); | ||
488 | while (!list_empty(objects)) { | ||
489 | struct drm_i915_gem_exec_object2 *entry; | ||
490 | bool need_fence, need_mappable; | ||
491 | |||
492 | obj = list_first_entry(objects, | ||
493 | struct drm_i915_gem_object, | ||
494 | exec_list); | ||
495 | entry = obj->exec_entry; | ||
496 | |||
497 | need_fence = | ||
498 | has_fenced_gpu_access && | ||
499 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
500 | obj->tiling_mode != I915_TILING_NONE; | ||
501 | need_mappable = | ||
502 | entry->relocation_count ? true : need_fence; | ||
503 | |||
504 | if (need_mappable) | ||
505 | list_move(&obj->exec_list, &ordered_objects); | ||
506 | else | ||
507 | list_move_tail(&obj->exec_list, &ordered_objects); | ||
508 | } | ||
509 | list_splice(&ordered_objects, objects); | ||
489 | 510 | ||
490 | /* Attempt to pin all of the buffers into the GTT. | 511 | /* Attempt to pin all of the buffers into the GTT. |
491 | * This is done in 3 phases: | 512 | * This is done in 3 phases: |
@@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
504 | ret = 0; | 525 | ret = 0; |
505 | 526 | ||
506 | /* Unbind any ill-fitting objects or pin. */ | 527 | /* Unbind any ill-fitting objects or pin. */ |
507 | entry = exec; | ||
508 | list_for_each_entry(obj, objects, exec_list) { | 528 | list_for_each_entry(obj, objects, exec_list) { |
529 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
509 | bool need_fence, need_mappable; | 530 | bool need_fence, need_mappable; |
510 | 531 | if (!obj->gtt_space) | |
511 | if (!obj->gtt_space) { | ||
512 | entry++; | ||
513 | continue; | 532 | continue; |
514 | } | ||
515 | 533 | ||
516 | need_fence = | 534 | need_fence = |
517 | has_fenced_gpu_access && | 535 | has_fenced_gpu_access && |
@@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
534 | } | 552 | } |
535 | 553 | ||
536 | /* Bind fresh objects */ | 554 | /* Bind fresh objects */ |
537 | entry = exec; | ||
538 | list_for_each_entry(obj, objects, exec_list) { | 555 | list_for_each_entry(obj, objects, exec_list) { |
556 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
539 | bool need_fence; | 557 | bool need_fence; |
540 | 558 | ||
541 | need_fence = | 559 | need_fence = |
@@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
570 | } | 588 | } |
571 | 589 | ||
572 | entry->offset = obj->gtt_offset; | 590 | entry->offset = obj->gtt_offset; |
573 | entry++; | ||
574 | } | 591 | } |
575 | 592 | ||
576 | /* Decrement pin count for bound objects */ | 593 | /* Decrement pin count for bound objects */ |
@@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
622 | int i, total, ret; | 639 | int i, total, ret; |
623 | 640 | ||
624 | /* We may process another execbuffer during the unlock... */ | 641 | /* We may process another execbuffer during the unlock... */ |
625 | while (list_empty(objects)) { | 642 | while (!list_empty(objects)) { |
626 | obj = list_first_entry(objects, | 643 | obj = list_first_entry(objects, |
627 | struct drm_i915_gem_object, | 644 | struct drm_i915_gem_object, |
628 | exec_list); | 645 | exec_list); |
@@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
665 | } | 682 | } |
666 | 683 | ||
667 | /* reacquire the objects */ | 684 | /* reacquire the objects */ |
668 | INIT_LIST_HEAD(objects); | ||
669 | eb_reset(eb); | 685 | eb_reset(eb); |
670 | for (i = 0; i < count; i++) { | 686 | for (i = 0; i < count; i++) { |
671 | struct drm_i915_gem_object *obj; | 687 | struct drm_i915_gem_object *obj; |
@@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
681 | 697 | ||
682 | list_add_tail(&obj->exec_list, objects); | 698 | list_add_tail(&obj->exec_list, objects); |
683 | obj->exec_handle = exec[i].handle; | 699 | obj->exec_handle = exec[i].handle; |
700 | obj->exec_entry = &exec[i]; | ||
684 | eb_add_object(eb, obj); | 701 | eb_add_object(eb, obj); |
685 | } | 702 | } |
686 | 703 | ||
687 | ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); | 704 | ret = i915_gem_execbuffer_reserve(ring, file, objects); |
688 | if (ret) | 705 | if (ret) |
689 | goto err; | 706 | goto err; |
690 | 707 | ||
@@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
693 | obj->base.pending_read_domains = 0; | 710 | obj->base.pending_read_domains = 0; |
694 | obj->base.pending_write_domain = 0; | 711 | obj->base.pending_write_domain = 0; |
695 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, | 712 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
696 | exec, | ||
697 | reloc + total); | 713 | reloc + total); |
698 | if (ret) | 714 | if (ret) |
699 | goto err; | 715 | goto err; |
@@ -713,25 +729,34 @@ err: | |||
713 | return ret; | 729 | return ret; |
714 | } | 730 | } |
715 | 731 | ||
716 | static void | 732 | static int |
717 | i915_gem_execbuffer_flush(struct drm_device *dev, | 733 | i915_gem_execbuffer_flush(struct drm_device *dev, |
718 | uint32_t invalidate_domains, | 734 | uint32_t invalidate_domains, |
719 | uint32_t flush_domains, | 735 | uint32_t flush_domains, |
720 | uint32_t flush_rings) | 736 | uint32_t flush_rings) |
721 | { | 737 | { |
722 | drm_i915_private_t *dev_priv = dev->dev_private; | 738 | drm_i915_private_t *dev_priv = dev->dev_private; |
723 | int i; | 739 | int i, ret; |
724 | 740 | ||
725 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 741 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
726 | intel_gtt_chipset_flush(); | 742 | intel_gtt_chipset_flush(); |
727 | 743 | ||
744 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
745 | wmb(); | ||
746 | |||
728 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 747 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
729 | for (i = 0; i < I915_NUM_RINGS; i++) | 748 | for (i = 0; i < I915_NUM_RINGS; i++) |
730 | if (flush_rings & (1 << i)) | 749 | if (flush_rings & (1 << i)) { |
731 | i915_gem_flush_ring(dev, &dev_priv->ring[i], | 750 | ret = i915_gem_flush_ring(dev, |
732 | invalidate_domains, | 751 | &dev_priv->ring[i], |
733 | flush_domains); | 752 | invalidate_domains, |
753 | flush_domains); | ||
754 | if (ret) | ||
755 | return ret; | ||
756 | } | ||
734 | } | 757 | } |
758 | |||
759 | return 0; | ||
735 | } | 760 | } |
736 | 761 | ||
737 | static int | 762 | static int |
@@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
795 | cd.invalidate_domains, | 820 | cd.invalidate_domains, |
796 | cd.flush_domains); | 821 | cd.flush_domains); |
797 | #endif | 822 | #endif |
798 | i915_gem_execbuffer_flush(ring->dev, | 823 | ret = i915_gem_execbuffer_flush(ring->dev, |
799 | cd.invalidate_domains, | 824 | cd.invalidate_domains, |
800 | cd.flush_domains, | 825 | cd.flush_domains, |
801 | cd.flush_rings); | 826 | cd.flush_rings); |
827 | if (ret) | ||
828 | return ret; | ||
802 | } | 829 | } |
803 | 830 | ||
804 | list_for_each_entry(obj, objects, exec_list) { | 831 | list_for_each_entry(obj, objects, exec_list) { |
@@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
921 | struct intel_ring_buffer *ring) | 948 | struct intel_ring_buffer *ring) |
922 | { | 949 | { |
923 | struct drm_i915_gem_request *request; | 950 | struct drm_i915_gem_request *request; |
924 | u32 flush_domains; | 951 | u32 invalidate; |
925 | 952 | ||
926 | /* | 953 | /* |
927 | * Ensure that the commands in the batch buffer are | 954 | * Ensure that the commands in the batch buffer are |
@@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
929 | * | 956 | * |
930 | * The sampler always gets flushed on i965 (sigh). | 957 | * The sampler always gets flushed on i965 (sigh). |
931 | */ | 958 | */ |
932 | flush_domains = 0; | 959 | invalidate = I915_GEM_DOMAIN_COMMAND; |
933 | if (INTEL_INFO(dev)->gen >= 4) | 960 | if (INTEL_INFO(dev)->gen >= 4) |
934 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 961 | invalidate |= I915_GEM_DOMAIN_SAMPLER; |
935 | 962 | if (ring->flush(ring, invalidate, 0)) { | |
936 | ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); | 963 | i915_gem_next_request_seqno(dev, ring); |
964 | return; | ||
965 | } | ||
937 | 966 | ||
938 | /* Add a breadcrumb for the completion of the batch buffer */ | 967 | /* Add a breadcrumb for the completion of the batch buffer */ |
939 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 968 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
@@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1098 | 1127 | ||
1099 | list_add_tail(&obj->exec_list, &objects); | 1128 | list_add_tail(&obj->exec_list, &objects); |
1100 | obj->exec_handle = exec[i].handle; | 1129 | obj->exec_handle = exec[i].handle; |
1130 | obj->exec_entry = &exec[i]; | ||
1101 | eb_add_object(eb, obj); | 1131 | eb_add_object(eb, obj); |
1102 | } | 1132 | } |
1103 | 1133 | ||
1134 | /* take note of the batch buffer before we might reorder the lists */ | ||
1135 | batch_obj = list_entry(objects.prev, | ||
1136 | struct drm_i915_gem_object, | ||
1137 | exec_list); | ||
1138 | |||
1104 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 1139 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1105 | ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); | 1140 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); |
1106 | if (ret) | 1141 | if (ret) |
1107 | goto err; | 1142 | goto err; |
1108 | 1143 | ||
1109 | /* The objects are in their final locations, apply the relocations. */ | 1144 | /* The objects are in their final locations, apply the relocations. */ |
1110 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); | 1145 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); |
1111 | if (ret) { | 1146 | if (ret) { |
1112 | if (ret == -EFAULT) { | 1147 | if (ret == -EFAULT) { |
1113 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, | 1148 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, |
@@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1121 | } | 1156 | } |
1122 | 1157 | ||
1123 | /* Set the pending read domains for the batch buffer to COMMAND */ | 1158 | /* Set the pending read domains for the batch buffer to COMMAND */ |
1124 | batch_obj = list_entry(objects.prev, | ||
1125 | struct drm_i915_gem_object, | ||
1126 | exec_list); | ||
1127 | if (batch_obj->base.pending_write_domain) { | 1159 | if (batch_obj->base.pending_write_domain) { |
1128 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 1160 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); |
1129 | ret = -EINVAL; | 1161 | ret = -EINVAL; |
@@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1340 | drm_free_large(exec2_list); | 1372 | drm_free_large(exec2_list); |
1341 | return ret; | 1373 | return ret; |
1342 | } | 1374 | } |
1343 | |||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 86673e77d7cb..70433ae50ac8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) | |||
85 | 85 | ||
86 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 86 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
87 | { | 87 | { |
88 | struct drm_device *dev = obj->base.dev; | 88 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
89 | struct drm_i915_private *dev_priv = dev->dev_private; | 89 | obj->base.size >> PAGE_SHIFT); |
90 | 90 | ||
91 | if (dev_priv->mm.gtt->needs_dmar) { | 91 | if (obj->sg_list) { |
92 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); | 92 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
93 | obj->sg_list = NULL; | 93 | obj->sg_list = NULL; |
94 | obj->num_sg = 0; | ||
95 | } | 94 | } |
96 | |||
97 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | ||
98 | obj->base.size >> PAGE_SHIFT); | ||
99 | } | 95 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0dadc025b77b..e418e8bb61e6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,26 +64,6 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
73 | POSTING_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
83 | POSTING_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | static void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
@@ -105,26 +85,6 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
105 | } | 85 | } |
106 | } | 86 | } |
107 | 87 | ||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask & mask) != 0) { | ||
112 | dev_priv->irq_mask &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
114 | POSTING_READ(IMR); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void | ||
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask & mask) != mask) { | ||
122 | dev_priv->irq_mask |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
124 | POSTING_READ(IMR); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline u32 | 88 | static inline u32 |
129 | i915_pipestat(int pipe) | 89 | i915_pipestat(int pipe) |
130 | { | 90 | { |
@@ -389,9 +349,12 @@ static void notify_ring(struct drm_device *dev, | |||
389 | { | 349 | { |
390 | struct drm_i915_private *dev_priv = dev->dev_private; | 350 | struct drm_i915_private *dev_priv = dev->dev_private; |
391 | u32 seqno = ring->get_seqno(ring); | 351 | u32 seqno = ring->get_seqno(ring); |
392 | ring->irq_seqno = seqno; | 352 | |
393 | trace_i915_gem_request_complete(dev, seqno); | 353 | trace_i915_gem_request_complete(dev, seqno); |
354 | |||
355 | ring->irq_seqno = seqno; | ||
394 | wake_up_all(&ring->irq_queue); | 356 | wake_up_all(&ring->irq_queue); |
357 | |||
395 | dev_priv->hangcheck_count = 0; | 358 | dev_priv->hangcheck_count = 0; |
396 | mod_timer(&dev_priv->hangcheck_timer, | 359 | mod_timer(&dev_priv->hangcheck_timer, |
397 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 360 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -435,6 +398,50 @@ static void gen6_pm_irq_handler(struct drm_device *dev) | |||
435 | I915_WRITE(GEN6_PMIIR, pm_iir); | 398 | I915_WRITE(GEN6_PMIIR, pm_iir); |
436 | } | 399 | } |
437 | 400 | ||
401 | static void pch_irq_handler(struct drm_device *dev) | ||
402 | { | ||
403 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
404 | u32 pch_iir; | ||
405 | |||
406 | pch_iir = I915_READ(SDEIIR); | ||
407 | |||
408 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
409 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
410 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
411 | SDE_AUDIO_POWER_SHIFT); | ||
412 | |||
413 | if (pch_iir & SDE_GMBUS) | ||
414 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
415 | |||
416 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
417 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
418 | |||
419 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
420 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
421 | |||
422 | if (pch_iir & SDE_POISON) | ||
423 | DRM_ERROR("PCH poison interrupt\n"); | ||
424 | |||
425 | if (pch_iir & SDE_FDI_MASK) { | ||
426 | u32 fdia, fdib; | ||
427 | |||
428 | fdia = I915_READ(FDI_RXA_IIR); | ||
429 | fdib = I915_READ(FDI_RXB_IIR); | ||
430 | DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); | ||
431 | } | ||
432 | |||
433 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
434 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
435 | |||
436 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
437 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
438 | |||
439 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
440 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
441 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
442 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
443 | } | ||
444 | |||
438 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 445 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
439 | { | 446 | { |
440 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 447 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -502,8 +509,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
502 | drm_handle_vblank(dev, 1); | 509 | drm_handle_vblank(dev, 1); |
503 | 510 | ||
504 | /* check event from PCH */ | 511 | /* check event from PCH */ |
505 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) | 512 | if (de_iir & DE_PCH_EVENT) { |
506 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 513 | if (pch_iir & hotplug_mask) |
514 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
515 | pch_irq_handler(dev); | ||
516 | } | ||
507 | 517 | ||
508 | if (de_iir & DE_PCU_EVENT) { | 518 | if (de_iir & DE_PCU_EVENT) { |
509 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 519 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
@@ -556,10 +566,9 @@ static void i915_error_work_func(struct work_struct *work) | |||
556 | 566 | ||
557 | #ifdef CONFIG_DEBUG_FS | 567 | #ifdef CONFIG_DEBUG_FS |
558 | static struct drm_i915_error_object * | 568 | static struct drm_i915_error_object * |
559 | i915_error_object_create(struct drm_device *dev, | 569 | i915_error_object_create(struct drm_i915_private *dev_priv, |
560 | struct drm_i915_gem_object *src) | 570 | struct drm_i915_gem_object *src) |
561 | { | 571 | { |
562 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
563 | struct drm_i915_error_object *dst; | 572 | struct drm_i915_error_object *dst; |
564 | int page, page_count; | 573 | int page, page_count; |
565 | u32 reloc_offset; | 574 | u32 reloc_offset; |
@@ -632,52 +641,6 @@ i915_error_state_free(struct drm_device *dev, | |||
632 | kfree(error); | 641 | kfree(error); |
633 | } | 642 | } |
634 | 643 | ||
635 | static u32 | ||
636 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | ||
637 | { | ||
638 | u32 cmd; | ||
639 | |||
640 | if (IS_I830(dev) || IS_845G(dev)) | ||
641 | cmd = MI_BATCH_BUFFER; | ||
642 | else if (INTEL_INFO(dev)->gen >= 4) | ||
643 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
644 | MI_BATCH_NON_SECURE_I965); | ||
645 | else | ||
646 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
647 | |||
648 | return ring[0] == cmd ? ring[1] : 0; | ||
649 | } | ||
650 | |||
651 | static u32 | ||
652 | i915_ringbuffer_last_batch(struct drm_device *dev, | ||
653 | struct intel_ring_buffer *ring) | ||
654 | { | ||
655 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
656 | u32 head, bbaddr; | ||
657 | u32 *val; | ||
658 | |||
659 | /* Locate the current position in the ringbuffer and walk back | ||
660 | * to find the most recently dispatched batch buffer. | ||
661 | */ | ||
662 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | ||
663 | |||
664 | val = (u32 *)(ring->virtual_start + head); | ||
665 | while (--val >= (u32 *)ring->virtual_start) { | ||
666 | bbaddr = i915_get_bbaddr(dev, val); | ||
667 | if (bbaddr) | ||
668 | return bbaddr; | ||
669 | } | ||
670 | |||
671 | val = (u32 *)(ring->virtual_start + ring->size); | ||
672 | while (--val >= (u32 *)ring->virtual_start) { | ||
673 | bbaddr = i915_get_bbaddr(dev, val); | ||
674 | if (bbaddr) | ||
675 | return bbaddr; | ||
676 | } | ||
677 | |||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, | 644 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
682 | int count, | 645 | int count, |
683 | struct list_head *head) | 646 | struct list_head *head) |
@@ -702,6 +665,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, | |||
702 | err->dirty = obj->dirty; | 665 | err->dirty = obj->dirty; |
703 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 666 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
704 | err->ring = obj->ring ? obj->ring->id : 0; | 667 | err->ring = obj->ring ? obj->ring->id : 0; |
668 | err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; | ||
705 | 669 | ||
706 | if (++i == count) | 670 | if (++i == count) |
707 | break; | 671 | break; |
@@ -741,6 +705,36 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
741 | } | 705 | } |
742 | } | 706 | } |
743 | 707 | ||
708 | static struct drm_i915_error_object * | ||
709 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | ||
710 | struct intel_ring_buffer *ring) | ||
711 | { | ||
712 | struct drm_i915_gem_object *obj; | ||
713 | u32 seqno; | ||
714 | |||
715 | if (!ring->get_seqno) | ||
716 | return NULL; | ||
717 | |||
718 | seqno = ring->get_seqno(ring); | ||
719 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
720 | if (obj->ring != ring) | ||
721 | continue; | ||
722 | |||
723 | if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) | ||
724 | continue; | ||
725 | |||
726 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
727 | continue; | ||
728 | |||
729 | /* We need to copy these to an anonymous buffer as the simplest | ||
730 | * method to avoid being overwritten by userspace. | ||
731 | */ | ||
732 | return i915_error_object_create(dev_priv, obj); | ||
733 | } | ||
734 | |||
735 | return NULL; | ||
736 | } | ||
737 | |||
744 | /** | 738 | /** |
745 | * i915_capture_error_state - capture an error record for later analysis | 739 | * i915_capture_error_state - capture an error record for later analysis |
746 | * @dev: drm device | 740 | * @dev: drm device |
@@ -755,10 +749,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
755 | struct drm_i915_private *dev_priv = dev->dev_private; | 749 | struct drm_i915_private *dev_priv = dev->dev_private; |
756 | struct drm_i915_gem_object *obj; | 750 | struct drm_i915_gem_object *obj; |
757 | struct drm_i915_error_state *error; | 751 | struct drm_i915_error_state *error; |
758 | struct drm_i915_gem_object *batchbuffer[2]; | ||
759 | unsigned long flags; | 752 | unsigned long flags; |
760 | u32 bbaddr; | 753 | int i; |
761 | int count; | ||
762 | 754 | ||
763 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 755 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
764 | error = dev_priv->first_error; | 756 | error = dev_priv->first_error; |
@@ -817,83 +809,30 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
817 | } | 809 | } |
818 | i915_gem_record_fences(dev, error); | 810 | i915_gem_record_fences(dev, error); |
819 | 811 | ||
820 | bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); | 812 | /* Record the active batchbuffers */ |
821 | 813 | for (i = 0; i < I915_NUM_RINGS; i++) | |
822 | /* Grab the current batchbuffer, most likely to have crashed. */ | 814 | error->batchbuffer[i] = |
823 | batchbuffer[0] = NULL; | 815 | i915_error_first_batchbuffer(dev_priv, |
824 | batchbuffer[1] = NULL; | 816 | &dev_priv->ring[i]); |
825 | count = 0; | ||
826 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
827 | if (batchbuffer[0] == NULL && | ||
828 | bbaddr >= obj->gtt_offset && | ||
829 | bbaddr < obj->gtt_offset + obj->base.size) | ||
830 | batchbuffer[0] = obj; | ||
831 | |||
832 | if (batchbuffer[1] == NULL && | ||
833 | error->acthd >= obj->gtt_offset && | ||
834 | error->acthd < obj->gtt_offset + obj->base.size) | ||
835 | batchbuffer[1] = obj; | ||
836 | |||
837 | count++; | ||
838 | } | ||
839 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
840 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
841 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||
842 | if (batchbuffer[0] == NULL && | ||
843 | bbaddr >= obj->gtt_offset && | ||
844 | bbaddr < obj->gtt_offset + obj->base.size) | ||
845 | batchbuffer[0] = obj; | ||
846 | |||
847 | if (batchbuffer[1] == NULL && | ||
848 | error->acthd >= obj->gtt_offset && | ||
849 | error->acthd < obj->gtt_offset + obj->base.size) | ||
850 | batchbuffer[1] = obj; | ||
851 | |||
852 | if (batchbuffer[0] && batchbuffer[1]) | ||
853 | break; | ||
854 | } | ||
855 | } | ||
856 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
857 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | ||
858 | if (batchbuffer[0] == NULL && | ||
859 | bbaddr >= obj->gtt_offset && | ||
860 | bbaddr < obj->gtt_offset + obj->base.size) | ||
861 | batchbuffer[0] = obj; | ||
862 | |||
863 | if (batchbuffer[1] == NULL && | ||
864 | error->acthd >= obj->gtt_offset && | ||
865 | error->acthd < obj->gtt_offset + obj->base.size) | ||
866 | batchbuffer[1] = obj; | ||
867 | |||
868 | if (batchbuffer[0] && batchbuffer[1]) | ||
869 | break; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | /* We need to copy these to an anonymous buffer as the simplest | ||
874 | * method to avoid being overwritten by userspace. | ||
875 | */ | ||
876 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
877 | if (batchbuffer[1] != batchbuffer[0]) | ||
878 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
879 | else | ||
880 | error->batchbuffer[1] = NULL; | ||
881 | 817 | ||
882 | /* Record the ringbuffer */ | 818 | /* Record the ringbuffer */ |
883 | error->ringbuffer = i915_error_object_create(dev, | 819 | error->ringbuffer = i915_error_object_create(dev_priv, |
884 | dev_priv->ring[RCS].obj); | 820 | dev_priv->ring[RCS].obj); |
885 | 821 | ||
886 | /* Record buffers on the active and pinned lists. */ | 822 | /* Record buffers on the active and pinned lists. */ |
887 | error->active_bo = NULL; | 823 | error->active_bo = NULL; |
888 | error->pinned_bo = NULL; | 824 | error->pinned_bo = NULL; |
889 | 825 | ||
890 | error->active_bo_count = count; | 826 | i = 0; |
827 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | ||
828 | i++; | ||
829 | error->active_bo_count = i; | ||
891 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | 830 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
892 | count++; | 831 | i++; |
893 | error->pinned_bo_count = count - error->active_bo_count; | 832 | error->pinned_bo_count = i - error->active_bo_count; |
894 | 833 | ||
895 | if (count) { | 834 | if (i) { |
896 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | 835 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
897 | GFP_ATOMIC); | 836 | GFP_ATOMIC); |
898 | if (error->active_bo) | 837 | if (error->active_bo) |
899 | error->pinned_bo = | 838 | error->pinned_bo = |
@@ -1673,11 +1612,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1673 | 1612 | ||
1674 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1613 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1675 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 1614 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1676 | if (IS_GEN6(dev)) { | ||
1677 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); | ||
1678 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); | ||
1679 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1680 | } | ||
1681 | 1615 | ||
1682 | if (IS_GEN6(dev)) | 1616 | if (IS_GEN6(dev)) |
1683 | render_irqs = | 1617 | render_irqs = |
@@ -1698,6 +1632,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1698 | } else { | 1632 | } else { |
1699 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1633 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1700 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1634 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1635 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | ||
1636 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1637 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1701 | } | 1638 | } |
1702 | 1639 | ||
1703 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1640 | dev_priv->pch_irq_mask = ~hotplug_mask; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8f948a6fbc1c..40a407f41f61 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -145,6 +145,8 @@ | |||
145 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 145 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
146 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | 146 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ |
147 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 147 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
148 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | ||
149 | #define MI_SUSPEND_FLUSH_EN (1<<0) | ||
148 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 150 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
149 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | 151 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) |
150 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 152 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
@@ -159,6 +161,7 @@ | |||
159 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 161 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
160 | #define MI_SAVE_EXT_STATE_EN (1<<3) | 162 | #define MI_SAVE_EXT_STATE_EN (1<<3) |
161 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | 163 | #define MI_RESTORE_EXT_STATE_EN (1<<2) |
164 | #define MI_FORCE_RESTORE (1<<1) | ||
162 | #define MI_RESTORE_INHIBIT (1<<0) | 165 | #define MI_RESTORE_INHIBIT (1<<0) |
163 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 166 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
164 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 167 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
@@ -288,6 +291,7 @@ | |||
288 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 291 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
289 | #define RING_ACTHD(base) ((base)+0x74) | 292 | #define RING_ACTHD(base) ((base)+0x74) |
290 | #define RING_NOPID(base) ((base)+0x94) | 293 | #define RING_NOPID(base) ((base)+0x94) |
294 | #define RING_IMR(base) ((base)+0xa8) | ||
291 | #define TAIL_ADDR 0x001FFFF8 | 295 | #define TAIL_ADDR 0x001FFFF8 |
292 | #define HEAD_WRAP_COUNT 0xFFE00000 | 296 | #define HEAD_WRAP_COUNT 0xFFE00000 |
293 | #define HEAD_WRAP_ONE 0x00200000 | 297 | #define HEAD_WRAP_ONE 0x00200000 |
@@ -1130,9 +1134,50 @@ | |||
1130 | #define RCBMINAVG 0x111a0 | 1134 | #define RCBMINAVG 0x111a0 |
1131 | #define RCUPEI 0x111b0 | 1135 | #define RCUPEI 0x111b0 |
1132 | #define RCDNEI 0x111b4 | 1136 | #define RCDNEI 0x111b4 |
1133 | #define MCHBAR_RENDER_STANDBY 0x111b8 | 1137 | #define RSTDBYCTL 0x111b8 |
1134 | #define RCX_SW_EXIT (1<<23) | 1138 | #define RS1EN (1<<31) |
1135 | #define RSX_STATUS_MASK 0x00700000 | 1139 | #define RS2EN (1<<30) |
1140 | #define RS3EN (1<<29) | ||
1141 | #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ | ||
1142 | #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ | ||
1143 | #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ | ||
1144 | #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ | ||
1145 | #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ | ||
1146 | #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ | ||
1147 | #define RSX_STATUS_MASK (7<<20) | ||
1148 | #define RSX_STATUS_ON (0<<20) | ||
1149 | #define RSX_STATUS_RC1 (1<<20) | ||
1150 | #define RSX_STATUS_RC1E (2<<20) | ||
1151 | #define RSX_STATUS_RS1 (3<<20) | ||
1152 | #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ | ||
1153 | #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ | ||
1154 | #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ | ||
1155 | #define RSX_STATUS_RSVD2 (7<<20) | ||
1156 | #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ | ||
1157 | #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ | ||
1158 | #define JRSC (1<<17) /* rsx coupled to cpu c-state */ | ||
1159 | #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ | ||
1160 | #define RS1CONTSAV_MASK (3<<14) | ||
1161 | #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ | ||
1162 | #define RS1CONTSAV_RSVD (1<<14) | ||
1163 | #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ | ||
1164 | #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ | ||
1165 | #define NORMSLEXLAT_MASK (3<<12) | ||
1166 | #define SLOW_RS123 (0<<12) | ||
1167 | #define SLOW_RS23 (1<<12) | ||
1168 | #define SLOW_RS3 (2<<12) | ||
1169 | #define NORMAL_RS123 (3<<12) | ||
1170 | #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ | ||
1171 | #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ | ||
1172 | #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ | ||
1173 | #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ | ||
1174 | #define RS_CSTATE_MASK (3<<4) | ||
1175 | #define RS_CSTATE_C367_RS1 (0<<4) | ||
1176 | #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) | ||
1177 | #define RS_CSTATE_RSVD (2<<4) | ||
1178 | #define RS_CSTATE_C367_RS2 (3<<4) | ||
1179 | #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ | ||
1180 | #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ | ||
1136 | #define VIDCTL 0x111c0 | 1181 | #define VIDCTL 0x111c0 |
1137 | #define VIDSTS 0x111c8 | 1182 | #define VIDSTS 0x111c8 |
1138 | #define VIDSTART 0x111cc /* 8 bits */ | 1183 | #define VIDSTART 0x111cc /* 8 bits */ |
@@ -2345,8 +2390,13 @@ | |||
2345 | 2390 | ||
2346 | /* Memory latency timer register */ | 2391 | /* Memory latency timer register */ |
2347 | #define MLTR_ILK 0x11222 | 2392 | #define MLTR_ILK 0x11222 |
2393 | #define MLTR_WM1_SHIFT 0 | ||
2394 | #define MLTR_WM2_SHIFT 8 | ||
2348 | /* the unit of memory self-refresh latency time is 0.5us */ | 2395 | /* the unit of memory self-refresh latency time is 0.5us */ |
2349 | #define ILK_SRLT_MASK 0x3f | 2396 | #define ILK_SRLT_MASK 0x3f |
2397 | #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) | ||
2398 | #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) | ||
2399 | #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) | ||
2350 | 2400 | ||
2351 | /* define the fifo size on Ironlake */ | 2401 | /* define the fifo size on Ironlake */ |
2352 | #define ILK_DISPLAY_FIFO 128 | 2402 | #define ILK_DISPLAY_FIFO 128 |
@@ -2728,12 +2778,41 @@ | |||
2728 | /* PCH */ | 2778 | /* PCH */ |
2729 | 2779 | ||
2730 | /* south display engine interrupt */ | 2780 | /* south display engine interrupt */ |
2781 | #define SDE_AUDIO_POWER_D (1 << 27) | ||
2782 | #define SDE_AUDIO_POWER_C (1 << 26) | ||
2783 | #define SDE_AUDIO_POWER_B (1 << 25) | ||
2784 | #define SDE_AUDIO_POWER_SHIFT (25) | ||
2785 | #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) | ||
2786 | #define SDE_GMBUS (1 << 24) | ||
2787 | #define SDE_AUDIO_HDCP_TRANSB (1 << 23) | ||
2788 | #define SDE_AUDIO_HDCP_TRANSA (1 << 22) | ||
2789 | #define SDE_AUDIO_HDCP_MASK (3 << 22) | ||
2790 | #define SDE_AUDIO_TRANSB (1 << 21) | ||
2791 | #define SDE_AUDIO_TRANSA (1 << 20) | ||
2792 | #define SDE_AUDIO_TRANS_MASK (3 << 20) | ||
2793 | #define SDE_POISON (1 << 19) | ||
2794 | /* 18 reserved */ | ||
2795 | #define SDE_FDI_RXB (1 << 17) | ||
2796 | #define SDE_FDI_RXA (1 << 16) | ||
2797 | #define SDE_FDI_MASK (3 << 16) | ||
2798 | #define SDE_AUXD (1 << 15) | ||
2799 | #define SDE_AUXC (1 << 14) | ||
2800 | #define SDE_AUXB (1 << 13) | ||
2801 | #define SDE_AUX_MASK (7 << 13) | ||
2802 | /* 12 reserved */ | ||
2731 | #define SDE_CRT_HOTPLUG (1 << 11) | 2803 | #define SDE_CRT_HOTPLUG (1 << 11) |
2732 | #define SDE_PORTD_HOTPLUG (1 << 10) | 2804 | #define SDE_PORTD_HOTPLUG (1 << 10) |
2733 | #define SDE_PORTC_HOTPLUG (1 << 9) | 2805 | #define SDE_PORTC_HOTPLUG (1 << 9) |
2734 | #define SDE_PORTB_HOTPLUG (1 << 8) | 2806 | #define SDE_PORTB_HOTPLUG (1 << 8) |
2735 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 2807 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
2736 | #define SDE_HOTPLUG_MASK (0xf << 8) | 2808 | #define SDE_HOTPLUG_MASK (0xf << 8) |
2809 | #define SDE_TRANSB_CRC_DONE (1 << 5) | ||
2810 | #define SDE_TRANSB_CRC_ERR (1 << 4) | ||
2811 | #define SDE_TRANSB_FIFO_UNDER (1 << 3) | ||
2812 | #define SDE_TRANSA_CRC_DONE (1 << 2) | ||
2813 | #define SDE_TRANSA_CRC_ERR (1 << 1) | ||
2814 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | ||
2815 | #define SDE_TRANS_MASK (0x3f) | ||
2737 | /* CPT */ | 2816 | /* CPT */ |
2738 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 2817 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) |
2739 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 2818 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
@@ -3174,10 +3253,11 @@ | |||
3174 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | 3253 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) |
3175 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | 3254 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) |
3176 | /* SNB B-stepping */ | 3255 | /* SNB B-stepping */ |
3177 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | 3256 | #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) |
3178 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | 3257 | #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) |
3179 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | 3258 | #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) |
3180 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | 3259 | #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) |
3260 | #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) | ||
3181 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) | 3261 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) |
3182 | 3262 | ||
3183 | #define FORCEWAKE 0xA18C | 3263 | #define FORCEWAKE 0xA18C |
@@ -3239,6 +3319,7 @@ | |||
3239 | 3319 | ||
3240 | #define GEN6_PCODE_MAILBOX 0x138124 | 3320 | #define GEN6_PCODE_MAILBOX 0x138124 |
3241 | #define GEN6_PCODE_READY (1<<31) | 3321 | #define GEN6_PCODE_READY (1<<31) |
3322 | #define GEN6_READ_OC_PARAMS 0xc | ||
3242 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 | 3323 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 |
3243 | #define GEN6_PCODE_DATA 0x138128 | 3324 | #define GEN6_PCODE_DATA 0x138128 |
3244 | 3325 | ||
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 410772466fa7..0521ecf26017 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev) | |||
740 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 740 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); |
741 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | 741 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); |
742 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | 742 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); |
743 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 743 | I915_WRITE(RSTDBYCTL, |
744 | dev_priv->saveMCHBAR_RENDER_STANDBY); | 744 | dev_priv->saveMCHBAR_RENDER_STANDBY); |
745 | } else { | 745 | } else { |
746 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 746 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); |
@@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev) | |||
811 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | 811 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); |
812 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | 812 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); |
813 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 813 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
814 | I915_READ(MCHBAR_RENDER_STANDBY); | 814 | I915_READ(RSTDBYCTL); |
815 | } else { | 815 | } else { |
816 | dev_priv->saveIER = I915_READ(IER); | 816 | dev_priv->saveIER = I915_READ(IER); |
817 | dev_priv->saveIMR = I915_READ(IMR); | 817 | dev_priv->saveIMR = I915_READ(IMR); |
@@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev) | |||
822 | if (IS_GEN6(dev)) | 822 | if (IS_GEN6(dev)) |
823 | gen6_disable_rps(dev); | 823 | gen6_disable_rps(dev); |
824 | 824 | ||
825 | /* XXX disabling the clock gating breaks suspend on gm45 | ||
826 | intel_disable_clock_gating(dev); | ||
827 | */ | ||
828 | |||
829 | /* Cache mode state */ | 825 | /* Cache mode state */ |
830 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 826 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); |
831 | 827 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 8df574316063..17035b87ee46 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "drm_crtc.h" | 31 | #include "drm_crtc.h" |
32 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | #include "drm_edid.h" | ||
33 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) | |||
287 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; | 288 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; |
288 | } | 289 | } |
289 | 290 | ||
290 | static bool intel_crt_detect_ddc(struct intel_crt *crt) | 291 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
291 | { | 292 | { |
293 | struct intel_crt *crt = intel_attached_crt(connector); | ||
292 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; | 294 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; |
293 | 295 | ||
294 | /* CRT should always be at 0, but check anyway */ | 296 | /* CRT should always be at 0, but check anyway */ |
@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt) | |||
301 | } | 303 | } |
302 | 304 | ||
303 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { | 305 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { |
304 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 306 | struct edid *edid; |
305 | return true; | 307 | bool is_digital = false; |
308 | |||
309 | edid = drm_get_edid(connector, | ||
310 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
311 | /* | ||
312 | * This may be a DVI-I connector with a shared DDC | ||
313 | * link between analog and digital outputs, so we | ||
314 | * have to check the EDID input spec of the attached device. | ||
315 | */ | ||
316 | if (edid != NULL) { | ||
317 | is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; | ||
318 | connector->display_info.raw_edid = NULL; | ||
319 | kfree(edid); | ||
320 | } | ||
321 | |||
322 | if (!is_digital) { | ||
323 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | ||
324 | return true; | ||
325 | } | ||
306 | } | 326 | } |
307 | 327 | ||
308 | return false; | 328 | return false; |
@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
458 | } | 478 | } |
459 | } | 479 | } |
460 | 480 | ||
461 | if (intel_crt_detect_ddc(crt)) | 481 | if (intel_crt_detect_ddc(connector)) |
462 | return connector_status_connected; | 482 | return connector_status_connected; |
463 | 483 | ||
464 | if (!force) | 484 | if (!force) |
@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
472 | crtc = intel_get_load_detect_pipe(&crt->base, connector, | 492 | crtc = intel_get_load_detect_pipe(&crt->base, connector, |
473 | NULL, &dpms_mode); | 493 | NULL, &dpms_mode); |
474 | if (crtc) { | 494 | if (crtc) { |
475 | if (intel_crt_detect_ddc(crt)) | 495 | if (intel_crt_detect_ddc(connector)) |
476 | status = connector_status_connected; | 496 | status = connector_status_connected; |
477 | else | 497 | else |
478 | status = intel_crt_load_detect(crtc, crt); | 498 | status = intel_crt_load_detect(crtc, crt); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0abe79fb6385..25d96889d7d2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3418,15 +3418,16 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3418 | static bool ironlake_compute_wm0(struct drm_device *dev, | 3418 | static bool ironlake_compute_wm0(struct drm_device *dev, |
3419 | int pipe, | 3419 | int pipe, |
3420 | const struct intel_watermark_params *display, | 3420 | const struct intel_watermark_params *display, |
3421 | int display_latency, | 3421 | int display_latency_ns, |
3422 | const struct intel_watermark_params *cursor, | 3422 | const struct intel_watermark_params *cursor, |
3423 | int cursor_latency, | 3423 | int cursor_latency_ns, |
3424 | int *plane_wm, | 3424 | int *plane_wm, |
3425 | int *cursor_wm) | 3425 | int *cursor_wm) |
3426 | { | 3426 | { |
3427 | struct drm_crtc *crtc; | 3427 | struct drm_crtc *crtc; |
3428 | int htotal, hdisplay, clock, pixel_size = 0; | 3428 | int htotal, hdisplay, clock, pixel_size; |
3429 | int line_time_us, line_count, entries; | 3429 | int line_time_us, line_count; |
3430 | int entries, tlb_miss; | ||
3430 | 3431 | ||
3431 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 3432 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
3432 | if (crtc->fb == NULL || !crtc->enabled) | 3433 | if (crtc->fb == NULL || !crtc->enabled) |
@@ -3438,7 +3439,10 @@ static bool ironlake_compute_wm0(struct drm_device *dev, | |||
3438 | pixel_size = crtc->fb->bits_per_pixel / 8; | 3439 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3439 | 3440 | ||
3440 | /* Use the small buffer method to calculate plane watermark */ | 3441 | /* Use the small buffer method to calculate plane watermark */ |
3441 | entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; | 3442 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3443 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; | ||
3444 | if (tlb_miss > 0) | ||
3445 | entries += tlb_miss; | ||
3442 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | 3446 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3443 | *plane_wm = entries + display->guard_size; | 3447 | *plane_wm = entries + display->guard_size; |
3444 | if (*plane_wm > (int)display->max_wm) | 3448 | if (*plane_wm > (int)display->max_wm) |
@@ -3446,8 +3450,11 @@ static bool ironlake_compute_wm0(struct drm_device *dev, | |||
3446 | 3450 | ||
3447 | /* Use the large buffer method to calculate cursor watermark */ | 3451 | /* Use the large buffer method to calculate cursor watermark */ |
3448 | line_time_us = ((htotal * 1000) / clock); | 3452 | line_time_us = ((htotal * 1000) / clock); |
3449 | line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; | 3453 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3450 | entries = line_count * 64 * pixel_size; | 3454 | entries = line_count * 64 * pixel_size; |
3455 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | ||
3456 | if (tlb_miss > 0) | ||
3457 | entries += tlb_miss; | ||
3451 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | 3458 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3452 | *cursor_wm = entries + cursor->guard_size; | 3459 | *cursor_wm = entries + cursor->guard_size; |
3453 | if (*cursor_wm > (int)cursor->max_wm) | 3460 | if (*cursor_wm > (int)cursor->max_wm) |
@@ -3456,113 +3463,17 @@ static bool ironlake_compute_wm0(struct drm_device *dev, | |||
3456 | return true; | 3463 | return true; |
3457 | } | 3464 | } |
3458 | 3465 | ||
3459 | static void ironlake_update_wm(struct drm_device *dev, | ||
3460 | int planea_clock, int planeb_clock, | ||
3461 | int sr_hdisplay, int sr_htotal, | ||
3462 | int pixel_size) | ||
3463 | { | ||
3464 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3465 | int plane_wm, cursor_wm, enabled; | ||
3466 | int tmp; | ||
3467 | |||
3468 | enabled = 0; | ||
3469 | if (ironlake_compute_wm0(dev, 0, | ||
3470 | &ironlake_display_wm_info, | ||
3471 | ILK_LP0_PLANE_LATENCY, | ||
3472 | &ironlake_cursor_wm_info, | ||
3473 | ILK_LP0_CURSOR_LATENCY, | ||
3474 | &plane_wm, &cursor_wm)) { | ||
3475 | I915_WRITE(WM0_PIPEA_ILK, | ||
3476 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3477 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
3478 | " plane %d, " "cursor: %d\n", | ||
3479 | plane_wm, cursor_wm); | ||
3480 | enabled++; | ||
3481 | } | ||
3482 | |||
3483 | if (ironlake_compute_wm0(dev, 1, | ||
3484 | &ironlake_display_wm_info, | ||
3485 | ILK_LP0_PLANE_LATENCY, | ||
3486 | &ironlake_cursor_wm_info, | ||
3487 | ILK_LP0_CURSOR_LATENCY, | ||
3488 | &plane_wm, &cursor_wm)) { | ||
3489 | I915_WRITE(WM0_PIPEB_ILK, | ||
3490 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3491 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
3492 | " plane %d, cursor: %d\n", | ||
3493 | plane_wm, cursor_wm); | ||
3494 | enabled++; | ||
3495 | } | ||
3496 | |||
3497 | /* | ||
3498 | * Calculate and update the self-refresh watermark only when one | ||
3499 | * display plane is used. | ||
3500 | */ | ||
3501 | tmp = 0; | ||
3502 | if (enabled == 1) { | ||
3503 | unsigned long line_time_us; | ||
3504 | int small, large, plane_fbc; | ||
3505 | int sr_clock, entries; | ||
3506 | int line_count, line_size; | ||
3507 | /* Read the self-refresh latency. The unit is 0.5us */ | ||
3508 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | ||
3509 | |||
3510 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
3511 | line_time_us = (sr_htotal * 1000) / sr_clock; | ||
3512 | |||
3513 | /* Use ns/us then divide to preserve precision */ | ||
3514 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | ||
3515 | / 1000; | ||
3516 | line_size = sr_hdisplay * pixel_size; | ||
3517 | |||
3518 | /* Use the minimum of the small and large buffer method for primary */ | ||
3519 | small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; | ||
3520 | large = line_count * line_size; | ||
3521 | |||
3522 | entries = DIV_ROUND_UP(min(small, large), | ||
3523 | ironlake_display_srwm_info.cacheline_size); | ||
3524 | |||
3525 | plane_fbc = entries * 64; | ||
3526 | plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); | ||
3527 | |||
3528 | plane_wm = entries + ironlake_display_srwm_info.guard_size; | ||
3529 | if (plane_wm > (int)ironlake_display_srwm_info.max_wm) | ||
3530 | plane_wm = ironlake_display_srwm_info.max_wm; | ||
3531 | |||
3532 | /* calculate the self-refresh watermark for display cursor */ | ||
3533 | entries = line_count * pixel_size * 64; | ||
3534 | entries = DIV_ROUND_UP(entries, | ||
3535 | ironlake_cursor_srwm_info.cacheline_size); | ||
3536 | |||
3537 | cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; | ||
3538 | if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) | ||
3539 | cursor_wm = ironlake_cursor_srwm_info.max_wm; | ||
3540 | |||
3541 | /* configure watermark and enable self-refresh */ | ||
3542 | tmp = (WM1_LP_SR_EN | | ||
3543 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3544 | (plane_fbc << WM1_LP_FBC_SHIFT) | | ||
3545 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3546 | cursor_wm); | ||
3547 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," | ||
3548 | " cursor %d\n", plane_wm, plane_fbc, cursor_wm); | ||
3549 | } | ||
3550 | I915_WRITE(WM1_LP_ILK, tmp); | ||
3551 | /* XXX setup WM2 and WM3 */ | ||
3552 | } | ||
3553 | |||
3554 | /* | 3466 | /* |
3555 | * Check the wm result. | 3467 | * Check the wm result. |
3556 | * | 3468 | * |
3557 | * If any calculated watermark values is larger than the maximum value that | 3469 | * If any calculated watermark values is larger than the maximum value that |
3558 | * can be programmed into the associated watermark register, that watermark | 3470 | * can be programmed into the associated watermark register, that watermark |
3559 | * must be disabled. | 3471 | * must be disabled. |
3560 | * | ||
3561 | * Also return true if all of those watermark values is 0, which is set by | ||
3562 | * sandybridge_compute_srwm, to indicate the latency is ZERO. | ||
3563 | */ | 3472 | */ |
3564 | static bool sandybridge_check_srwm(struct drm_device *dev, int level, | 3473 | static bool ironlake_check_srwm(struct drm_device *dev, int level, |
3565 | int fbc_wm, int display_wm, int cursor_wm) | 3474 | int fbc_wm, int display_wm, int cursor_wm, |
3475 | const struct intel_watermark_params *display, | ||
3476 | const struct intel_watermark_params *cursor) | ||
3566 | { | 3477 | { |
3567 | struct drm_i915_private *dev_priv = dev->dev_private; | 3478 | struct drm_i915_private *dev_priv = dev->dev_private; |
3568 | 3479 | ||
@@ -3571,7 +3482,7 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level, | |||
3571 | 3482 | ||
3572 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | 3483 | if (fbc_wm > SNB_FBC_MAX_SRWM) { |
3573 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | 3484 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", |
3574 | fbc_wm, SNB_FBC_MAX_SRWM, level); | 3485 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
3575 | 3486 | ||
3576 | /* fbc has it's own way to disable FBC WM */ | 3487 | /* fbc has it's own way to disable FBC WM */ |
3577 | I915_WRITE(DISP_ARB_CTL, | 3488 | I915_WRITE(DISP_ARB_CTL, |
@@ -3579,15 +3490,15 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level, | |||
3579 | return false; | 3490 | return false; |
3580 | } | 3491 | } |
3581 | 3492 | ||
3582 | if (display_wm > SNB_DISPLAY_MAX_SRWM) { | 3493 | if (display_wm > display->max_wm) { |
3583 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | 3494 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", |
3584 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | 3495 | display_wm, SNB_DISPLAY_MAX_SRWM, level); |
3585 | return false; | 3496 | return false; |
3586 | } | 3497 | } |
3587 | 3498 | ||
3588 | if (cursor_wm > SNB_CURSOR_MAX_SRWM) { | 3499 | if (cursor_wm > cursor->max_wm) { |
3589 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | 3500 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", |
3590 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | 3501 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); |
3591 | return false; | 3502 | return false; |
3592 | } | 3503 | } |
3593 | 3504 | ||
@@ -3602,16 +3513,18 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level, | |||
3602 | /* | 3513 | /* |
3603 | * Compute watermark values of WM[1-3], | 3514 | * Compute watermark values of WM[1-3], |
3604 | */ | 3515 | */ |
3605 | static bool sandybridge_compute_srwm(struct drm_device *dev, int level, | 3516 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, |
3606 | int hdisplay, int htotal, int pixel_size, | 3517 | int hdisplay, int htotal, |
3607 | int clock, int latency_ns, int *fbc_wm, | 3518 | int pixel_size, int clock, int latency_ns, |
3608 | int *display_wm, int *cursor_wm) | 3519 | const struct intel_watermark_params *display, |
3520 | const struct intel_watermark_params *cursor, | ||
3521 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
3609 | { | 3522 | { |
3610 | 3523 | ||
3611 | unsigned long line_time_us; | 3524 | unsigned long line_time_us; |
3525 | int line_count, line_size; | ||
3612 | int small, large; | 3526 | int small, large; |
3613 | int entries; | 3527 | int entries; |
3614 | int line_count, line_size; | ||
3615 | 3528 | ||
3616 | if (!latency_ns) { | 3529 | if (!latency_ns) { |
3617 | *fbc_wm = *display_wm = *cursor_wm = 0; | 3530 | *fbc_wm = *display_wm = *cursor_wm = 0; |
@@ -3626,24 +3539,110 @@ static bool sandybridge_compute_srwm(struct drm_device *dev, int level, | |||
3626 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | 3539 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
3627 | large = line_count * line_size; | 3540 | large = line_count * line_size; |
3628 | 3541 | ||
3629 | entries = DIV_ROUND_UP(min(small, large), | 3542 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
3630 | sandybridge_display_srwm_info.cacheline_size); | 3543 | *display_wm = entries + display->guard_size; |
3631 | *display_wm = entries + sandybridge_display_srwm_info.guard_size; | ||
3632 | 3544 | ||
3633 | /* | 3545 | /* |
3634 | * Spec said: | 3546 | * Spec says: |
3635 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | 3547 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 |
3636 | */ | 3548 | */ |
3637 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | 3549 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; |
3638 | 3550 | ||
3639 | /* calculate the self-refresh watermark for display cursor */ | 3551 | /* calculate the self-refresh watermark for display cursor */ |
3640 | entries = line_count * pixel_size * 64; | 3552 | entries = line_count * pixel_size * 64; |
3641 | entries = DIV_ROUND_UP(entries, | 3553 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3642 | sandybridge_cursor_srwm_info.cacheline_size); | 3554 | *cursor_wm = entries + cursor->guard_size; |
3643 | *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size; | ||
3644 | 3555 | ||
3645 | return sandybridge_check_srwm(dev, level, | 3556 | return ironlake_check_srwm(dev, level, |
3646 | *fbc_wm, *display_wm, *cursor_wm); | 3557 | *fbc_wm, *display_wm, *cursor_wm, |
3558 | display, cursor); | ||
3559 | } | ||
3560 | |||
3561 | static void ironlake_update_wm(struct drm_device *dev, | ||
3562 | int planea_clock, int planeb_clock, | ||
3563 | int hdisplay, int htotal, | ||
3564 | int pixel_size) | ||
3565 | { | ||
3566 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3567 | int fbc_wm, plane_wm, cursor_wm, enabled; | ||
3568 | int clock; | ||
3569 | |||
3570 | enabled = 0; | ||
3571 | if (ironlake_compute_wm0(dev, 0, | ||
3572 | &ironlake_display_wm_info, | ||
3573 | ILK_LP0_PLANE_LATENCY, | ||
3574 | &ironlake_cursor_wm_info, | ||
3575 | ILK_LP0_CURSOR_LATENCY, | ||
3576 | &plane_wm, &cursor_wm)) { | ||
3577 | I915_WRITE(WM0_PIPEA_ILK, | ||
3578 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3579 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
3580 | " plane %d, " "cursor: %d\n", | ||
3581 | plane_wm, cursor_wm); | ||
3582 | enabled++; | ||
3583 | } | ||
3584 | |||
3585 | if (ironlake_compute_wm0(dev, 1, | ||
3586 | &ironlake_display_wm_info, | ||
3587 | ILK_LP0_PLANE_LATENCY, | ||
3588 | &ironlake_cursor_wm_info, | ||
3589 | ILK_LP0_CURSOR_LATENCY, | ||
3590 | &plane_wm, &cursor_wm)) { | ||
3591 | I915_WRITE(WM0_PIPEB_ILK, | ||
3592 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3593 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
3594 | " plane %d, cursor: %d\n", | ||
3595 | plane_wm, cursor_wm); | ||
3596 | enabled++; | ||
3597 | } | ||
3598 | |||
3599 | /* | ||
3600 | * Calculate and update the self-refresh watermark only when one | ||
3601 | * display plane is used. | ||
3602 | */ | ||
3603 | I915_WRITE(WM3_LP_ILK, 0); | ||
3604 | I915_WRITE(WM2_LP_ILK, 0); | ||
3605 | I915_WRITE(WM1_LP_ILK, 0); | ||
3606 | |||
3607 | if (enabled != 1) | ||
3608 | return; | ||
3609 | |||
3610 | clock = planea_clock ? planea_clock : planeb_clock; | ||
3611 | |||
3612 | /* WM1 */ | ||
3613 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | ||
3614 | clock, ILK_READ_WM1_LATENCY() * 500, | ||
3615 | &ironlake_display_srwm_info, | ||
3616 | &ironlake_cursor_srwm_info, | ||
3617 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3618 | return; | ||
3619 | |||
3620 | I915_WRITE(WM1_LP_ILK, | ||
3621 | WM1_LP_SR_EN | | ||
3622 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3623 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3624 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3625 | cursor_wm); | ||
3626 | |||
3627 | /* WM2 */ | ||
3628 | if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, | ||
3629 | clock, ILK_READ_WM2_LATENCY() * 500, | ||
3630 | &ironlake_display_srwm_info, | ||
3631 | &ironlake_cursor_srwm_info, | ||
3632 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3633 | return; | ||
3634 | |||
3635 | I915_WRITE(WM2_LP_ILK, | ||
3636 | WM2_LP_EN | | ||
3637 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3638 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3639 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3640 | cursor_wm); | ||
3641 | |||
3642 | /* | ||
3643 | * WM3 is unsupported on ILK, probably because we don't have latency | ||
3644 | * data for that power state | ||
3645 | */ | ||
3647 | } | 3646 | } |
3648 | 3647 | ||
3649 | static void sandybridge_update_wm(struct drm_device *dev, | 3648 | static void sandybridge_update_wm(struct drm_device *dev, |
@@ -3652,7 +3651,7 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3652 | int pixel_size) | 3651 | int pixel_size) |
3653 | { | 3652 | { |
3654 | struct drm_i915_private *dev_priv = dev->dev_private; | 3653 | struct drm_i915_private *dev_priv = dev->dev_private; |
3655 | int latency = SNB_READ_WM0_LATENCY(); | 3654 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
3656 | int fbc_wm, plane_wm, cursor_wm, enabled; | 3655 | int fbc_wm, plane_wm, cursor_wm, enabled; |
3657 | int clock; | 3656 | int clock; |
3658 | 3657 | ||
@@ -3701,9 +3700,11 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3701 | clock = planea_clock ? planea_clock : planeb_clock; | 3700 | clock = planea_clock ? planea_clock : planeb_clock; |
3702 | 3701 | ||
3703 | /* WM1 */ | 3702 | /* WM1 */ |
3704 | if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | 3703 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, |
3705 | clock, SNB_READ_WM1_LATENCY() * 500, | 3704 | clock, SNB_READ_WM1_LATENCY() * 500, |
3706 | &fbc_wm, &plane_wm, &cursor_wm)) | 3705 | &sandybridge_display_srwm_info, |
3706 | &sandybridge_cursor_srwm_info, | ||
3707 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3707 | return; | 3708 | return; |
3708 | 3709 | ||
3709 | I915_WRITE(WM1_LP_ILK, | 3710 | I915_WRITE(WM1_LP_ILK, |
@@ -3714,10 +3715,12 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3714 | cursor_wm); | 3715 | cursor_wm); |
3715 | 3716 | ||
3716 | /* WM2 */ | 3717 | /* WM2 */ |
3717 | if (!sandybridge_compute_srwm(dev, 2, | 3718 | if (!ironlake_compute_srwm(dev, 2, |
3718 | hdisplay, htotal, pixel_size, | 3719 | hdisplay, htotal, pixel_size, |
3719 | clock, SNB_READ_WM2_LATENCY() * 500, | 3720 | clock, SNB_READ_WM2_LATENCY() * 500, |
3720 | &fbc_wm, &plane_wm, &cursor_wm)) | 3721 | &sandybridge_display_srwm_info, |
3722 | &sandybridge_cursor_srwm_info, | ||
3723 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3721 | return; | 3724 | return; |
3722 | 3725 | ||
3723 | I915_WRITE(WM2_LP_ILK, | 3726 | I915_WRITE(WM2_LP_ILK, |
@@ -3728,10 +3731,12 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3728 | cursor_wm); | 3731 | cursor_wm); |
3729 | 3732 | ||
3730 | /* WM3 */ | 3733 | /* WM3 */ |
3731 | if (!sandybridge_compute_srwm(dev, 3, | 3734 | if (!ironlake_compute_srwm(dev, 3, |
3732 | hdisplay, htotal, pixel_size, | 3735 | hdisplay, htotal, pixel_size, |
3733 | clock, SNB_READ_WM3_LATENCY() * 500, | 3736 | clock, SNB_READ_WM3_LATENCY() * 500, |
3734 | &fbc_wm, &plane_wm, &cursor_wm)) | 3737 | &sandybridge_display_srwm_info, |
3738 | &sandybridge_cursor_srwm_info, | ||
3739 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3735 | return; | 3740 | return; |
3736 | 3741 | ||
3737 | I915_WRITE(WM3_LP_ILK, | 3742 | I915_WRITE(WM3_LP_ILK, |
@@ -3951,7 +3956,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3951 | int lane = 0, link_bw, bpp; | 3956 | int lane = 0, link_bw, bpp; |
3952 | /* CPU eDP doesn't require FDI link, so just set DP M/N | 3957 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3953 | according to current link config */ | 3958 | according to current link config */ |
3954 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { | 3959 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3955 | target_clock = mode->clock; | 3960 | target_clock = mode->clock; |
3956 | intel_edp_link_config(has_edp_encoder, | 3961 | intel_edp_link_config(has_edp_encoder, |
3957 | &lane, &link_bw); | 3962 | &lane, &link_bw); |
@@ -5038,8 +5043,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
5038 | drm_i915_private_t *dev_priv = dev->dev_private; | 5043 | drm_i915_private_t *dev_priv = dev->dev_private; |
5039 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5044 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5040 | int pipe = intel_crtc->pipe; | 5045 | int pipe = intel_crtc->pipe; |
5041 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5046 | int dpll_reg = DPLL(pipe); |
5042 | int dpll = I915_READ(dpll_reg); | 5047 | int dpll; |
5043 | 5048 | ||
5044 | if (HAS_PCH_SPLIT(dev)) | 5049 | if (HAS_PCH_SPLIT(dev)) |
5045 | return; | 5050 | return; |
@@ -5047,17 +5052,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
5047 | if (!dev_priv->lvds_downclock_avail) | 5052 | if (!dev_priv->lvds_downclock_avail) |
5048 | return; | 5053 | return; |
5049 | 5054 | ||
5055 | dpll = I915_READ(dpll_reg); | ||
5050 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 5056 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
5051 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 5057 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
5052 | 5058 | ||
5053 | /* Unlock panel regs */ | 5059 | /* Unlock panel regs */ |
5054 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | 5060 | I915_WRITE(PP_CONTROL, |
5055 | PANEL_UNLOCK_REGS); | 5061 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
5056 | 5062 | ||
5057 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5063 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
5058 | I915_WRITE(dpll_reg, dpll); | 5064 | I915_WRITE(dpll_reg, dpll); |
5059 | dpll = I915_READ(dpll_reg); | 5065 | POSTING_READ(dpll_reg); |
5060 | intel_wait_for_vblank(dev, pipe); | 5066 | intel_wait_for_vblank(dev, pipe); |
5067 | |||
5061 | dpll = I915_READ(dpll_reg); | 5068 | dpll = I915_READ(dpll_reg); |
5062 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5069 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
5063 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5070 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -5802,6 +5809,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5802 | encoder->base.possible_clones = | 5809 | encoder->base.possible_clones = |
5803 | intel_encoder_clones(dev, encoder->clone_mask); | 5810 | intel_encoder_clones(dev, encoder->clone_mask); |
5804 | } | 5811 | } |
5812 | |||
5813 | intel_panel_setup_backlight(dev); | ||
5805 | } | 5814 | } |
5806 | 5815 | ||
5807 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 5816 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -6145,6 +6154,10 @@ void intel_init_emon(struct drm_device *dev) | |||
6145 | 6154 | ||
6146 | void gen6_enable_rps(struct drm_i915_private *dev_priv) | 6155 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
6147 | { | 6156 | { |
6157 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
6158 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
6159 | u32 pcu_mbox; | ||
6160 | int cur_freq, min_freq, max_freq; | ||
6148 | int i; | 6161 | int i; |
6149 | 6162 | ||
6150 | /* Here begins a magic sequence of register writes to enable | 6163 | /* Here begins a magic sequence of register writes to enable |
@@ -6216,6 +6229,29 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6216 | 500)) | 6229 | 500)) |
6217 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | 6230 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
6218 | 6231 | ||
6232 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
6233 | max_freq = rp_state_cap & 0xff; | ||
6234 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
6235 | |||
6236 | /* Check for overclock support */ | ||
6237 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6238 | 500)) | ||
6239 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
6240 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
6241 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
6242 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6243 | 500)) | ||
6244 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
6245 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
6246 | max_freq = pcu_mbox & 0xff; | ||
6247 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); | ||
6248 | } | ||
6249 | |||
6250 | /* In units of 100MHz */ | ||
6251 | dev_priv->max_delay = max_freq; | ||
6252 | dev_priv->min_delay = min_freq; | ||
6253 | dev_priv->cur_delay = cur_freq; | ||
6254 | |||
6219 | /* requires MSI enabled */ | 6255 | /* requires MSI enabled */ |
6220 | I915_WRITE(GEN6_PMIER, | 6256 | I915_WRITE(GEN6_PMIER, |
6221 | GEN6_PM_MBOX_EVENT | | 6257 | GEN6_PM_MBOX_EVENT | |
@@ -6386,42 +6422,6 @@ void intel_enable_clock_gating(struct drm_device *dev) | |||
6386 | } else if (IS_I830(dev)) { | 6422 | } else if (IS_I830(dev)) { |
6387 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 6423 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
6388 | } | 6424 | } |
6389 | |||
6390 | /* | ||
6391 | * GPU can automatically power down the render unit if given a page | ||
6392 | * to save state. | ||
6393 | */ | ||
6394 | if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */ | ||
6395 | if (dev_priv->renderctx == NULL) | ||
6396 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6397 | if (dev_priv->renderctx) { | ||
6398 | struct drm_i915_gem_object *obj = dev_priv->renderctx; | ||
6399 | if (BEGIN_LP_RING(4) == 0) { | ||
6400 | OUT_RING(MI_SET_CONTEXT); | ||
6401 | OUT_RING(obj->gtt_offset | | ||
6402 | MI_MM_SPACE_GTT | | ||
6403 | MI_SAVE_EXT_STATE_EN | | ||
6404 | MI_RESTORE_EXT_STATE_EN | | ||
6405 | MI_RESTORE_INHIBIT); | ||
6406 | OUT_RING(MI_NOOP); | ||
6407 | OUT_RING(MI_FLUSH); | ||
6408 | ADVANCE_LP_RING(); | ||
6409 | } | ||
6410 | } else | ||
6411 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
6412 | "Disable RC6\n"); | ||
6413 | } | ||
6414 | |||
6415 | if (IS_GEN4(dev) && IS_MOBILE(dev)) { | ||
6416 | if (dev_priv->pwrctx == NULL) | ||
6417 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6418 | if (dev_priv->pwrctx) { | ||
6419 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; | ||
6420 | I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); | ||
6421 | I915_WRITE(MCHBAR_RENDER_STANDBY, | ||
6422 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
6423 | } | ||
6424 | } | ||
6425 | } | 6425 | } |
6426 | 6426 | ||
6427 | void intel_disable_clock_gating(struct drm_device *dev) | 6427 | void intel_disable_clock_gating(struct drm_device *dev) |
@@ -6451,6 +6451,57 @@ void intel_disable_clock_gating(struct drm_device *dev) | |||
6451 | } | 6451 | } |
6452 | } | 6452 | } |
6453 | 6453 | ||
6454 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
6455 | { | ||
6456 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6457 | |||
6458 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
6459 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
6460 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
6461 | 10); | ||
6462 | POSTING_READ(CCID); | ||
6463 | I915_WRITE(PWRCTXA, 0); | ||
6464 | POSTING_READ(PWRCTXA); | ||
6465 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
6466 | POSTING_READ(RSTDBYCTL); | ||
6467 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6468 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6469 | dev_priv->renderctx = NULL; | ||
6470 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6471 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6472 | dev_priv->pwrctx = NULL; | ||
6473 | } | ||
6474 | |||
6475 | void ironlake_enable_rc6(struct drm_device *dev) | ||
6476 | { | ||
6477 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6478 | int ret; | ||
6479 | |||
6480 | /* | ||
6481 | * GPU can automatically power down the render unit if given a page | ||
6482 | * to save state. | ||
6483 | */ | ||
6484 | ret = BEGIN_LP_RING(6); | ||
6485 | if (ret) { | ||
6486 | ironlake_disable_rc6(dev); | ||
6487 | return; | ||
6488 | } | ||
6489 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | ||
6490 | OUT_RING(MI_SET_CONTEXT); | ||
6491 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
6492 | MI_MM_SPACE_GTT | | ||
6493 | MI_SAVE_EXT_STATE_EN | | ||
6494 | MI_RESTORE_EXT_STATE_EN | | ||
6495 | MI_RESTORE_INHIBIT); | ||
6496 | OUT_RING(MI_SUSPEND_FLUSH); | ||
6497 | OUT_RING(MI_NOOP); | ||
6498 | OUT_RING(MI_FLUSH); | ||
6499 | ADVANCE_LP_RING(); | ||
6500 | |||
6501 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | ||
6502 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
6503 | } | ||
6504 | |||
6454 | /* Set up chip specific display functions */ | 6505 | /* Set up chip specific display functions */ |
6455 | static void intel_init_display(struct drm_device *dev) | 6506 | static void intel_init_display(struct drm_device *dev) |
6456 | { | 6507 | { |
@@ -6665,12 +6716,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
6665 | dev->mode_config.max_width = 8192; | 6716 | dev->mode_config.max_width = 8192; |
6666 | dev->mode_config.max_height = 8192; | 6717 | dev->mode_config.max_height = 8192; |
6667 | } | 6718 | } |
6668 | 6719 | dev->mode_config.fb_base = dev->agp->base; | |
6669 | /* set memory base */ | ||
6670 | if (IS_GEN2(dev)) | ||
6671 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
6672 | else | ||
6673 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
6674 | 6720 | ||
6675 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 6721 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
6676 | dev_priv->num_pipe = 2; | 6722 | dev_priv->num_pipe = 2; |
@@ -6698,6 +6744,21 @@ void intel_modeset_init(struct drm_device *dev) | |||
6698 | if (IS_GEN6(dev)) | 6744 | if (IS_GEN6(dev)) |
6699 | gen6_enable_rps(dev_priv); | 6745 | gen6_enable_rps(dev_priv); |
6700 | 6746 | ||
6747 | if (IS_IRONLAKE_M(dev)) { | ||
6748 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6749 | if (!dev_priv->renderctx) | ||
6750 | goto skip_rc6; | ||
6751 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6752 | if (!dev_priv->pwrctx) { | ||
6753 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6754 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6755 | dev_priv->renderctx = NULL; | ||
6756 | goto skip_rc6; | ||
6757 | } | ||
6758 | ironlake_enable_rc6(dev); | ||
6759 | } | ||
6760 | |||
6761 | skip_rc6: | ||
6701 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 6762 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6702 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 6763 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6703 | (unsigned long)dev); | 6764 | (unsigned long)dev); |
@@ -6734,7 +6795,8 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6734 | if (IS_GEN6(dev)) | 6795 | if (IS_GEN6(dev)) |
6735 | gen6_disable_rps(dev); | 6796 | gen6_disable_rps(dev); |
6736 | 6797 | ||
6737 | intel_disable_clock_gating(dev); | 6798 | if (IS_IRONLAKE_M(dev)) |
6799 | ironlake_disable_rc6(dev); | ||
6738 | 6800 | ||
6739 | mutex_unlock(&dev->struct_mutex); | 6801 | mutex_unlock(&dev->struct_mutex); |
6740 | 6802 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1dc60408d5b8..1f4242b682c8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) | |||
1153 | static uint32_t | 1153 | static uint32_t |
1154 | intel_gen6_edp_signal_levels(uint8_t train_set) | 1154 | intel_gen6_edp_signal_levels(uint8_t train_set) |
1155 | { | 1155 | { |
1156 | switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { | 1156 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1157 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
1158 | switch (signal_levels) { | ||
1157 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 1159 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1158 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1160 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1161 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1162 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
1163 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; | ||
1159 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 1164 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1160 | return EDP_LINK_TRAIN_400MV_6DB_SNB_B; | 1165 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1166 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; | ||
1161 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 1167 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1162 | return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; | 1168 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1169 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; | ||
1163 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 1170 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1164 | return EDP_LINK_TRAIN_800MV_0DB_SNB_B; | 1171 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
1172 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; | ||
1165 | default: | 1173 | default: |
1166 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); | 1174 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1167 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1175 | "0x%x\n", signal_levels); |
1176 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1168 | } | 1177 | } |
1169 | } | 1178 | } |
1170 | 1179 | ||
@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1334 | struct drm_device *dev = intel_dp->base.base.dev; | 1343 | struct drm_device *dev = intel_dp->base.base.dev; |
1335 | struct drm_i915_private *dev_priv = dev->dev_private; | 1344 | struct drm_i915_private *dev_priv = dev->dev_private; |
1336 | bool channel_eq = false; | 1345 | bool channel_eq = false; |
1337 | int tries; | 1346 | int tries, cr_tries; |
1338 | u32 reg; | 1347 | u32 reg; |
1339 | uint32_t DP = intel_dp->DP; | 1348 | uint32_t DP = intel_dp->DP; |
1340 | 1349 | ||
1341 | /* channel equalization */ | 1350 | /* channel equalization */ |
1342 | tries = 0; | 1351 | tries = 0; |
1352 | cr_tries = 0; | ||
1343 | channel_eq = false; | 1353 | channel_eq = false; |
1344 | for (;;) { | 1354 | for (;;) { |
1345 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1355 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1346 | uint32_t signal_levels; | 1356 | uint32_t signal_levels; |
1347 | 1357 | ||
1358 | if (cr_tries > 5) { | ||
1359 | DRM_ERROR("failed to train DP, aborting\n"); | ||
1360 | intel_dp_link_down(intel_dp); | ||
1361 | break; | ||
1362 | } | ||
1363 | |||
1348 | if (IS_GEN6(dev) && is_edp(intel_dp)) { | 1364 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1349 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1365 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1350 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1366 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1367 | if (!intel_dp_get_link_status(intel_dp)) | 1383 | if (!intel_dp_get_link_status(intel_dp)) |
1368 | break; | 1384 | break; |
1369 | 1385 | ||
1386 | /* Make sure clock is still ok */ | ||
1387 | if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | ||
1388 | intel_dp_start_link_train(intel_dp); | ||
1389 | cr_tries++; | ||
1390 | continue; | ||
1391 | } | ||
1392 | |||
1370 | if (intel_channel_eq_ok(intel_dp)) { | 1393 | if (intel_channel_eq_ok(intel_dp)) { |
1371 | channel_eq = true; | 1394 | channel_eq = true; |
1372 | break; | 1395 | break; |
1373 | } | 1396 | } |
1374 | 1397 | ||
1375 | /* Try 5 times */ | 1398 | /* Try 5 times, then try clock recovery if that fails */ |
1376 | if (tries > 5) | 1399 | if (tries > 5) { |
1377 | break; | 1400 | intel_dp_link_down(intel_dp); |
1401 | intel_dp_start_link_train(intel_dp); | ||
1402 | tries = 0; | ||
1403 | cr_tries++; | ||
1404 | continue; | ||
1405 | } | ||
1378 | 1406 | ||
1379 | /* Compute new intel_dp->train_set as requested by target */ | 1407 | /* Compute new intel_dp->train_set as requested by target */ |
1380 | intel_get_adjust_train(intel_dp); | 1408 | intel_get_adjust_train(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d782ad9fd6db..74db2557d644 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
257 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 257 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
258 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 258 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
259 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 259 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
260 | extern void intel_panel_setup_backlight(struct drm_device *dev); | ||
261 | extern void intel_panel_enable_backlight(struct drm_device *dev); | ||
262 | extern void intel_panel_disable_backlight(struct drm_device *dev); | ||
260 | 263 | ||
261 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 264 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
262 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 265 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 701e830d0012..ee145a257287 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
62 | struct drm_fb_helper_surface_size *sizes) | 62 | struct drm_fb_helper_surface_size *sizes) |
63 | { | 63 | { |
64 | struct drm_device *dev = ifbdev->helper.dev; | 64 | struct drm_device *dev = ifbdev->helper.dev; |
65 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
65 | struct fb_info *info; | 66 | struct fb_info *info; |
66 | struct drm_framebuffer *fb; | 67 | struct drm_framebuffer *fb; |
67 | struct drm_mode_fb_cmd mode_cmd; | 68 | struct drm_mode_fb_cmd mode_cmd; |
@@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
77 | mode_cmd.height = sizes->surface_height; | 78 | mode_cmd.height = sizes->surface_height; |
78 | 79 | ||
79 | mode_cmd.bpp = sizes->surface_bpp; | 80 | mode_cmd.bpp = sizes->surface_bpp; |
80 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); | 81 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); |
81 | mode_cmd.depth = sizes->surface_depth; | 82 | mode_cmd.depth = sizes->surface_depth; |
82 | 83 | ||
83 | size = mode_cmd.pitch * mode_cmd.height; | 84 | size = mode_cmd.pitch * mode_cmd.height; |
@@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
120 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 121 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
121 | info->fbops = &intelfb_ops; | 122 | info->fbops = &intelfb_ops; |
122 | 123 | ||
124 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
125 | if (ret) { | ||
126 | ret = -ENOMEM; | ||
127 | goto out_unpin; | ||
128 | } | ||
123 | /* setup aperture base/size for vesafb takeover */ | 129 | /* setup aperture base/size for vesafb takeover */ |
124 | info->apertures = alloc_apertures(1); | 130 | info->apertures = alloc_apertures(1); |
125 | if (!info->apertures) { | 131 | if (!info->apertures) { |
@@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
127 | goto out_unpin; | 133 | goto out_unpin; |
128 | } | 134 | } |
129 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 135 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
130 | if (!IS_GEN2(dev)) | 136 | info->apertures->ranges[0].size = |
131 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); | 137 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
132 | else | ||
133 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); | ||
134 | 138 | ||
135 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; | 139 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
136 | info->fix.smem_len = size; | 140 | info->fix.smem_len = size; |
@@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
140 | ret = -ENOSPC; | 144 | ret = -ENOSPC; |
141 | goto out_unpin; | 145 | goto out_unpin; |
142 | } | 146 | } |
143 | |||
144 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
145 | if (ret) { | ||
146 | ret = -ENOMEM; | ||
147 | goto out_unpin; | ||
148 | } | ||
149 | info->screen_size = size; | 147 | info->screen_size = size; |
150 | 148 | ||
151 | // memset(info->screen_base, 0, size); | 149 | // memset(info->screen_base, 0, size); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa2307080be2..8f4f6bd33ee9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) | |||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | 106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
107 | POSTING_READ(lvds_reg); | 107 | POSTING_READ(lvds_reg); |
108 | 108 | ||
109 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | 109 | intel_panel_enable_backlight(dev); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void intel_lvds_disable(struct intel_lvds *intel_lvds) | 112 | static void intel_lvds_disable(struct intel_lvds *intel_lvds) |
@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) | |||
123 | lvds_reg = LVDS; | 123 | lvds_reg = LVDS; |
124 | } | 124 | } |
125 | 125 | ||
126 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | 126 | intel_panel_disable_backlight(dev); |
127 | intel_panel_set_backlight(dev, 0); | ||
128 | 127 | ||
129 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | 128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
130 | 129 | ||
@@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
375 | } | 374 | } |
376 | 375 | ||
377 | out: | 376 | out: |
377 | if ((pfit_control & PFIT_ENABLE) == 0) { | ||
378 | pfit_control = 0; | ||
379 | pfit_pgm_ratios = 0; | ||
380 | } | ||
378 | if (pfit_control != intel_lvds->pfit_control || | 381 | if (pfit_control != intel_lvds->pfit_control || |
379 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | 382 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { |
380 | intel_lvds->pfit_control = pfit_control; | 383 | intel_lvds->pfit_control = pfit_control; |
@@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) | |||
398 | struct drm_i915_private *dev_priv = dev->dev_private; | 401 | struct drm_i915_private *dev_priv = dev->dev_private; |
399 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 402 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
400 | 403 | ||
401 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
402 | |||
403 | /* We try to do the minimum that is necessary in order to unlock | 404 | /* We try to do the minimum that is necessary in order to unlock |
404 | * the registers for mode setting. | 405 | * the registers for mode setting. |
405 | * | 406 | * |
@@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder) | |||
430 | struct drm_i915_private *dev_priv = dev->dev_private; | 431 | struct drm_i915_private *dev_priv = dev->dev_private; |
431 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 432 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
432 | 433 | ||
433 | if (dev_priv->backlight_level == 0) | ||
434 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
435 | |||
436 | /* Undo any unlocking done in prepare to prevent accidental | 434 | /* Undo any unlocking done in prepare to prevent accidental |
437 | * adjustment of the registers. | 435 | * adjustment of the registers. |
438 | */ | 436 | */ |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 7350ec2515c6..e00d200df3db 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
250 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; | 250 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; |
251 | I915_WRITE(BLC_PWM_CTL, tmp | level); | 251 | I915_WRITE(BLC_PWM_CTL, tmp | level); |
252 | } | 252 | } |
253 | |||
254 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
255 | { | ||
256 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
257 | |||
258 | if (dev_priv->backlight_enabled) { | ||
259 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
260 | dev_priv->backlight_enabled = false; | ||
261 | } | ||
262 | |||
263 | intel_panel_set_backlight(dev, 0); | ||
264 | } | ||
265 | |||
266 | void intel_panel_enable_backlight(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
269 | |||
270 | if (dev_priv->backlight_level == 0) | ||
271 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
272 | |||
273 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
274 | dev_priv->backlight_enabled = true; | ||
275 | } | ||
276 | |||
277 | void intel_panel_setup_backlight(struct drm_device *dev) | ||
278 | { | ||
279 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
280 | |||
281 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
282 | dev_priv->backlight_enabled = dev_priv->backlight_level != 0; | ||
283 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 56bc95c056dd..03e337072517 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
48 | return seqno; | 48 | return seqno; |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static int |
52 | render_ring_flush(struct intel_ring_buffer *ring, | 52 | render_ring_flush(struct intel_ring_buffer *ring, |
53 | u32 invalidate_domains, | 53 | u32 invalidate_domains, |
54 | u32 flush_domains) | 54 | u32 flush_domains) |
@@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
56 | struct drm_device *dev = ring->dev; | 56 | struct drm_device *dev = ring->dev; |
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 58 | u32 cmd; |
59 | int ret; | ||
59 | 60 | ||
60 | #if WATCH_EXEC | 61 | #if WATCH_EXEC |
61 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 62 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
@@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
116 | #if WATCH_EXEC | 117 | #if WATCH_EXEC |
117 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 118 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
118 | #endif | 119 | #endif |
119 | if (intel_ring_begin(ring, 2) == 0) { | 120 | ret = intel_ring_begin(ring, 2); |
120 | intel_ring_emit(ring, cmd); | 121 | if (ret) |
121 | intel_ring_emit(ring, MI_NOOP); | 122 | return ret; |
122 | intel_ring_advance(ring); | 123 | |
123 | } | 124 | intel_ring_emit(ring, cmd); |
125 | intel_ring_emit(ring, MI_NOOP); | ||
126 | intel_ring_advance(ring); | ||
124 | } | 127 | } |
128 | |||
129 | return 0; | ||
125 | } | 130 | } |
126 | 131 | ||
127 | static void ring_write_tail(struct intel_ring_buffer *ring, | 132 | static void ring_write_tail(struct intel_ring_buffer *ring, |
@@ -480,26 +485,56 @@ pc_render_get_seqno(struct intel_ring_buffer *ring) | |||
480 | return pc->cpu_page[0]; | 485 | return pc->cpu_page[0]; |
481 | } | 486 | } |
482 | 487 | ||
488 | static void | ||
489 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
490 | { | ||
491 | dev_priv->gt_irq_mask &= ~mask; | ||
492 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
493 | POSTING_READ(GTIMR); | ||
494 | } | ||
495 | |||
496 | static void | ||
497 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
498 | { | ||
499 | dev_priv->gt_irq_mask |= mask; | ||
500 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
501 | POSTING_READ(GTIMR); | ||
502 | } | ||
503 | |||
504 | static void | ||
505 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
506 | { | ||
507 | dev_priv->irq_mask &= ~mask; | ||
508 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
509 | POSTING_READ(IMR); | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
514 | { | ||
515 | dev_priv->irq_mask |= mask; | ||
516 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
517 | POSTING_READ(IMR); | ||
518 | } | ||
519 | |||
483 | static bool | 520 | static bool |
484 | render_ring_get_irq(struct intel_ring_buffer *ring) | 521 | render_ring_get_irq(struct intel_ring_buffer *ring) |
485 | { | 522 | { |
486 | struct drm_device *dev = ring->dev; | 523 | struct drm_device *dev = ring->dev; |
524 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
487 | 525 | ||
488 | if (!dev->irq_enabled) | 526 | if (!dev->irq_enabled) |
489 | return false; | 527 | return false; |
490 | 528 | ||
491 | if (atomic_inc_return(&ring->irq_refcount) == 1) { | 529 | spin_lock(&ring->irq_lock); |
492 | drm_i915_private_t *dev_priv = dev->dev_private; | 530 | if (ring->irq_refcount++ == 0) { |
493 | unsigned long irqflags; | ||
494 | |||
495 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
496 | if (HAS_PCH_SPLIT(dev)) | 531 | if (HAS_PCH_SPLIT(dev)) |
497 | ironlake_enable_graphics_irq(dev_priv, | 532 | ironlake_enable_irq(dev_priv, |
498 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | 533 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
499 | else | 534 | else |
500 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 535 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
501 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
502 | } | 536 | } |
537 | spin_unlock(&ring->irq_lock); | ||
503 | 538 | ||
504 | return true; | 539 | return true; |
505 | } | 540 | } |
@@ -508,20 +543,18 @@ static void | |||
508 | render_ring_put_irq(struct intel_ring_buffer *ring) | 543 | render_ring_put_irq(struct intel_ring_buffer *ring) |
509 | { | 544 | { |
510 | struct drm_device *dev = ring->dev; | 545 | struct drm_device *dev = ring->dev; |
546 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
511 | 547 | ||
512 | if (atomic_dec_and_test(&ring->irq_refcount)) { | 548 | spin_lock(&ring->irq_lock); |
513 | drm_i915_private_t *dev_priv = dev->dev_private; | 549 | if (--ring->irq_refcount == 0) { |
514 | unsigned long irqflags; | ||
515 | |||
516 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
517 | if (HAS_PCH_SPLIT(dev)) | 550 | if (HAS_PCH_SPLIT(dev)) |
518 | ironlake_disable_graphics_irq(dev_priv, | 551 | ironlake_disable_irq(dev_priv, |
519 | GT_USER_INTERRUPT | | 552 | GT_USER_INTERRUPT | |
520 | GT_PIPE_NOTIFY); | 553 | GT_PIPE_NOTIFY); |
521 | else | 554 | else |
522 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 555 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
523 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
524 | } | 556 | } |
557 | spin_unlock(&ring->irq_lock); | ||
525 | } | 558 | } |
526 | 559 | ||
527 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | 560 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
@@ -534,19 +567,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
534 | POSTING_READ(mmio); | 567 | POSTING_READ(mmio); |
535 | } | 568 | } |
536 | 569 | ||
537 | static void | 570 | static int |
538 | bsd_ring_flush(struct intel_ring_buffer *ring, | 571 | bsd_ring_flush(struct intel_ring_buffer *ring, |
539 | u32 invalidate_domains, | 572 | u32 invalidate_domains, |
540 | u32 flush_domains) | 573 | u32 flush_domains) |
541 | { | 574 | { |
575 | int ret; | ||
576 | |||
542 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 577 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
543 | return; | 578 | return 0; |
544 | 579 | ||
545 | if (intel_ring_begin(ring, 2) == 0) { | 580 | ret = intel_ring_begin(ring, 2); |
546 | intel_ring_emit(ring, MI_FLUSH); | 581 | if (ret) |
547 | intel_ring_emit(ring, MI_NOOP); | 582 | return ret; |
548 | intel_ring_advance(ring); | 583 | |
549 | } | 584 | intel_ring_emit(ring, MI_FLUSH); |
585 | intel_ring_emit(ring, MI_NOOP); | ||
586 | intel_ring_advance(ring); | ||
587 | return 0; | ||
550 | } | 588 | } |
551 | 589 | ||
552 | static int | 590 | static int |
@@ -577,18 +615,15 @@ static bool | |||
577 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | 615 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) |
578 | { | 616 | { |
579 | struct drm_device *dev = ring->dev; | 617 | struct drm_device *dev = ring->dev; |
618 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
580 | 619 | ||
581 | if (!dev->irq_enabled) | 620 | if (!dev->irq_enabled) |
582 | return false; | 621 | return false; |
583 | 622 | ||
584 | if (atomic_inc_return(&ring->irq_refcount) == 1) { | 623 | spin_lock(&ring->irq_lock); |
585 | drm_i915_private_t *dev_priv = dev->dev_private; | 624 | if (ring->irq_refcount++ == 0) |
586 | unsigned long irqflags; | 625 | ironlake_enable_irq(dev_priv, flag); |
587 | 626 | spin_unlock(&ring->irq_lock); | |
588 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
589 | ironlake_enable_graphics_irq(dev_priv, flag); | ||
590 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
591 | } | ||
592 | 627 | ||
593 | return true; | 628 | return true; |
594 | } | 629 | } |
@@ -597,15 +632,47 @@ static void | |||
597 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) | 632 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) |
598 | { | 633 | { |
599 | struct drm_device *dev = ring->dev; | 634 | struct drm_device *dev = ring->dev; |
635 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
600 | 636 | ||
601 | if (atomic_dec_and_test(&ring->irq_refcount)) { | 637 | spin_lock(&ring->irq_lock); |
602 | drm_i915_private_t *dev_priv = dev->dev_private; | 638 | if (--ring->irq_refcount == 0) |
603 | unsigned long irqflags; | 639 | ironlake_disable_irq(dev_priv, flag); |
640 | spin_unlock(&ring->irq_lock); | ||
641 | } | ||
604 | 642 | ||
605 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 643 | static bool |
606 | ironlake_disable_graphics_irq(dev_priv, flag); | 644 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
607 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 645 | { |
646 | struct drm_device *dev = ring->dev; | ||
647 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
648 | |||
649 | if (!dev->irq_enabled) | ||
650 | return false; | ||
651 | |||
652 | spin_lock(&ring->irq_lock); | ||
653 | if (ring->irq_refcount++ == 0) { | ||
654 | ring->irq_mask &= ~rflag; | ||
655 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
656 | ironlake_enable_irq(dev_priv, gflag); | ||
608 | } | 657 | } |
658 | spin_unlock(&ring->irq_lock); | ||
659 | |||
660 | return true; | ||
661 | } | ||
662 | |||
663 | static void | ||
664 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | ||
665 | { | ||
666 | struct drm_device *dev = ring->dev; | ||
667 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
668 | |||
669 | spin_lock(&ring->irq_lock); | ||
670 | if (--ring->irq_refcount == 0) { | ||
671 | ring->irq_mask |= rflag; | ||
672 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
673 | ironlake_disable_irq(dev_priv, gflag); | ||
674 | } | ||
675 | spin_unlock(&ring->irq_lock); | ||
609 | } | 676 | } |
610 | 677 | ||
611 | static bool | 678 | static bool |
@@ -748,6 +815,9 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
748 | INIT_LIST_HEAD(&ring->request_list); | 815 | INIT_LIST_HEAD(&ring->request_list); |
749 | INIT_LIST_HEAD(&ring->gpu_write_list); | 816 | INIT_LIST_HEAD(&ring->gpu_write_list); |
750 | 817 | ||
818 | spin_lock_init(&ring->irq_lock); | ||
819 | ring->irq_mask = ~0; | ||
820 | |||
751 | if (I915_NEED_GFX_HWS(dev)) { | 821 | if (I915_NEED_GFX_HWS(dev)) { |
752 | ret = init_status_page(ring); | 822 | ret = init_status_page(ring); |
753 | if (ret) | 823 | if (ret) |
@@ -785,6 +855,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
785 | if (ret) | 855 | if (ret) |
786 | goto err_unmap; | 856 | goto err_unmap; |
787 | 857 | ||
858 | /* Workaround an erratum on the i830 which causes a hang if | ||
859 | * the TAIL pointer points to within the last 2 cachelines | ||
860 | * of the buffer. | ||
861 | */ | ||
862 | ring->effective_size = ring->size; | ||
863 | if (IS_I830(ring->dev)) | ||
864 | ring->effective_size -= 128; | ||
865 | |||
788 | return 0; | 866 | return 0; |
789 | 867 | ||
790 | err_unmap: | 868 | err_unmap: |
@@ -827,8 +905,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
827 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | 905 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
828 | { | 906 | { |
829 | unsigned int *virt; | 907 | unsigned int *virt; |
830 | int rem; | 908 | int rem = ring->size - ring->tail; |
831 | rem = ring->size - ring->tail; | ||
832 | 909 | ||
833 | if (ring->space < rem) { | 910 | if (ring->space < rem) { |
834 | int ret = intel_wait_ring_buffer(ring, rem); | 911 | int ret = intel_wait_ring_buffer(ring, rem); |
@@ -895,7 +972,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring, | |||
895 | int n = 4*num_dwords; | 972 | int n = 4*num_dwords; |
896 | int ret; | 973 | int ret; |
897 | 974 | ||
898 | if (unlikely(ring->tail + n > ring->size)) { | 975 | if (unlikely(ring->tail + n > ring->effective_size)) { |
899 | ret = intel_wrap_ring_buffer(ring); | 976 | ret = intel_wrap_ring_buffer(ring); |
900 | if (unlikely(ret)) | 977 | if (unlikely(ret)) |
901 | return ret; | 978 | return ret; |
@@ -973,20 +1050,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | |||
973 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 1050 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
974 | } | 1051 | } |
975 | 1052 | ||
976 | static void gen6_ring_flush(struct intel_ring_buffer *ring, | 1053 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
977 | u32 invalidate_domains, | 1054 | u32 invalidate_domains, |
978 | u32 flush_domains) | 1055 | u32 flush_domains) |
979 | { | 1056 | { |
1057 | int ret; | ||
1058 | |||
980 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1059 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
981 | return; | 1060 | return 0; |
982 | 1061 | ||
983 | if (intel_ring_begin(ring, 4) == 0) { | 1062 | ret = intel_ring_begin(ring, 4); |
984 | intel_ring_emit(ring, MI_FLUSH_DW); | 1063 | if (ret) |
985 | intel_ring_emit(ring, 0); | 1064 | return ret; |
986 | intel_ring_emit(ring, 0); | 1065 | |
987 | intel_ring_emit(ring, 0); | 1066 | intel_ring_emit(ring, MI_FLUSH_DW); |
988 | intel_ring_advance(ring); | 1067 | intel_ring_emit(ring, 0); |
989 | } | 1068 | intel_ring_emit(ring, 0); |
1069 | intel_ring_emit(ring, 0); | ||
1070 | intel_ring_advance(ring); | ||
1071 | return 0; | ||
990 | } | 1072 | } |
991 | 1073 | ||
992 | static int | 1074 | static int |
@@ -1008,15 +1090,35 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
1008 | } | 1090 | } |
1009 | 1091 | ||
1010 | static bool | 1092 | static bool |
1093 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | ||
1094 | { | ||
1095 | return gen6_ring_get_irq(ring, | ||
1096 | GT_USER_INTERRUPT, | ||
1097 | GEN6_RENDER_USER_INTERRUPT); | ||
1098 | } | ||
1099 | |||
1100 | static void | ||
1101 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | ||
1102 | { | ||
1103 | return gen6_ring_put_irq(ring, | ||
1104 | GT_USER_INTERRUPT, | ||
1105 | GEN6_RENDER_USER_INTERRUPT); | ||
1106 | } | ||
1107 | |||
1108 | static bool | ||
1011 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) | 1109 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
1012 | { | 1110 | { |
1013 | return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); | 1111 | return gen6_ring_get_irq(ring, |
1112 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1113 | GEN6_BSD_USER_INTERRUPT); | ||
1014 | } | 1114 | } |
1015 | 1115 | ||
1016 | static void | 1116 | static void |
1017 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | 1117 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
1018 | { | 1118 | { |
1019 | ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); | 1119 | return gen6_ring_put_irq(ring, |
1120 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1121 | GEN6_BSD_USER_INTERRUPT); | ||
1020 | } | 1122 | } |
1021 | 1123 | ||
1022 | /* ring buffer for Video Codec for Gen6+ */ | 1124 | /* ring buffer for Video Codec for Gen6+ */ |
@@ -1040,13 +1142,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
1040 | static bool | 1142 | static bool |
1041 | blt_ring_get_irq(struct intel_ring_buffer *ring) | 1143 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
1042 | { | 1144 | { |
1043 | return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); | 1145 | return gen6_ring_get_irq(ring, |
1146 | GT_BLT_USER_INTERRUPT, | ||
1147 | GEN6_BLITTER_USER_INTERRUPT); | ||
1044 | } | 1148 | } |
1045 | 1149 | ||
1046 | static void | 1150 | static void |
1047 | blt_ring_put_irq(struct intel_ring_buffer *ring) | 1151 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
1048 | { | 1152 | { |
1049 | ring_put_irq(ring, GT_BLT_USER_INTERRUPT); | 1153 | gen6_ring_put_irq(ring, |
1154 | GT_BLT_USER_INTERRUPT, | ||
1155 | GEN6_BLITTER_USER_INTERRUPT); | ||
1050 | } | 1156 | } |
1051 | 1157 | ||
1052 | 1158 | ||
@@ -1115,20 +1221,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring, | |||
1115 | return intel_ring_begin(ring, 4); | 1221 | return intel_ring_begin(ring, 4); |
1116 | } | 1222 | } |
1117 | 1223 | ||
1118 | static void blt_ring_flush(struct intel_ring_buffer *ring, | 1224 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1119 | u32 invalidate_domains, | 1225 | u32 invalidate_domains, |
1120 | u32 flush_domains) | 1226 | u32 flush_domains) |
1121 | { | 1227 | { |
1228 | int ret; | ||
1229 | |||
1122 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1230 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
1123 | return; | 1231 | return 0; |
1124 | 1232 | ||
1125 | if (blt_ring_begin(ring, 4) == 0) { | 1233 | ret = blt_ring_begin(ring, 4); |
1126 | intel_ring_emit(ring, MI_FLUSH_DW); | 1234 | if (ret) |
1127 | intel_ring_emit(ring, 0); | 1235 | return ret; |
1128 | intel_ring_emit(ring, 0); | 1236 | |
1129 | intel_ring_emit(ring, 0); | 1237 | intel_ring_emit(ring, MI_FLUSH_DW); |
1130 | intel_ring_advance(ring); | 1238 | intel_ring_emit(ring, 0); |
1131 | } | 1239 | intel_ring_emit(ring, 0); |
1240 | intel_ring_emit(ring, 0); | ||
1241 | intel_ring_advance(ring); | ||
1242 | return 0; | ||
1132 | } | 1243 | } |
1133 | 1244 | ||
1134 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | 1245 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
@@ -1165,6 +1276,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1165 | *ring = render_ring; | 1276 | *ring = render_ring; |
1166 | if (INTEL_INFO(dev)->gen >= 6) { | 1277 | if (INTEL_INFO(dev)->gen >= 6) { |
1167 | ring->add_request = gen6_add_request; | 1278 | ring->add_request = gen6_add_request; |
1279 | ring->irq_get = gen6_render_ring_get_irq; | ||
1280 | ring->irq_put = gen6_render_ring_put_irq; | ||
1168 | } else if (IS_GEN5(dev)) { | 1281 | } else if (IS_GEN5(dev)) { |
1169 | ring->add_request = pc_render_add_request; | 1282 | ring->add_request = pc_render_add_request; |
1170 | ring->get_seqno = pc_render_get_seqno; | 1283 | ring->get_seqno = pc_render_get_seqno; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8e2e357ad6ee..be9087e4c9be 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -16,21 +16,24 @@ struct intel_hw_status_page { | |||
16 | 16 | ||
17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) |
18 | 18 | ||
19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) | 19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) | 20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
21 | 21 | ||
22 | #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) | 22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) | 23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
24 | 24 | ||
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) | 25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) | 26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
27 | 27 | ||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) | 28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) | 29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
30 | 30 | ||
31 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) | 31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
32 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
33 | #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) | 33 | |
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | ||
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | ||
36 | #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base)) | ||
34 | 37 | ||
35 | struct intel_ring_buffer { | 38 | struct intel_ring_buffer { |
36 | const char *name; | 39 | const char *name; |
@@ -49,12 +52,15 @@ struct intel_ring_buffer { | |||
49 | u32 tail; | 52 | u32 tail; |
50 | int space; | 53 | int space; |
51 | int size; | 54 | int size; |
55 | int effective_size; | ||
52 | struct intel_hw_status_page status_page; | 56 | struct intel_hw_status_page status_page; |
53 | 57 | ||
58 | spinlock_t irq_lock; | ||
59 | u32 irq_refcount; | ||
60 | u32 irq_mask; | ||
54 | u32 irq_seqno; /* last seq seem at irq time */ | 61 | u32 irq_seqno; /* last seq seem at irq time */ |
55 | u32 waiting_seqno; | 62 | u32 waiting_seqno; |
56 | u32 sync_seqno[I915_NUM_RINGS-1]; | 63 | u32 sync_seqno[I915_NUM_RINGS-1]; |
57 | atomic_t irq_refcount; | ||
58 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | 64 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
59 | void (*irq_put)(struct intel_ring_buffer *ring); | 65 | void (*irq_put)(struct intel_ring_buffer *ring); |
60 | 66 | ||
@@ -62,9 +68,9 @@ struct intel_ring_buffer { | |||
62 | 68 | ||
63 | void (*write_tail)(struct intel_ring_buffer *ring, | 69 | void (*write_tail)(struct intel_ring_buffer *ring, |
64 | u32 value); | 70 | u32 value); |
65 | void (*flush)(struct intel_ring_buffer *ring, | 71 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
66 | u32 invalidate_domains, | 72 | u32 invalidate_domains, |
67 | u32 flush_domains); | 73 | u32 flush_domains); |
68 | int (*add_request)(struct intel_ring_buffer *ring, | 74 | int (*add_request)(struct intel_ring_buffer *ring, |
69 | u32 *seqno); | 75 | u32 *seqno); |
70 | u32 (*get_seqno)(struct intel_ring_buffer *ring); | 76 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9d0af36a13ec..45cd37652a37 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1024 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 1024 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1025 | return; | 1025 | return; |
1026 | 1026 | ||
1027 | if (intel_sdvo->has_hdmi_monitor && | 1027 | if (intel_sdvo->has_hdmi_monitor) { |
1028 | !intel_sdvo_set_avi_infoframe(intel_sdvo)) | 1028 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
1029 | return; | 1029 | intel_sdvo_set_colorimetry(intel_sdvo, |
1030 | SDVO_COLORIMETRY_RGB256); | ||
1031 | intel_sdvo_set_avi_infoframe(intel_sdvo); | ||
1032 | } else | ||
1033 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); | ||
1030 | 1034 | ||
1031 | if (intel_sdvo->is_tv && | 1035 | if (intel_sdvo->is_tv && |
1032 | !intel_sdvo_set_tv_format(intel_sdvo)) | 1036 | !intel_sdvo_set_tv_format(intel_sdvo)) |
@@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1398 | 1402 | ||
1399 | intel_sdvo->attached_output = response; | 1403 | intel_sdvo->attached_output = response; |
1400 | 1404 | ||
1405 | intel_sdvo->has_hdmi_monitor = false; | ||
1406 | intel_sdvo->has_hdmi_audio = false; | ||
1407 | |||
1401 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1408 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1402 | ret = connector_status_disconnected; | 1409 | ret = connector_status_disconnected; |
1403 | else if (response & SDVO_TMDS_MASK) | 1410 | else if (response & SDVO_TMDS_MASK) |
@@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1922 | static bool | 1929 | static bool |
1923 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) | 1930 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) |
1924 | { | 1931 | { |
1925 | int is_hdmi; | 1932 | return intel_sdvo_check_supp_encode(intel_sdvo); |
1926 | |||
1927 | if (!intel_sdvo_check_supp_encode(intel_sdvo)) | ||
1928 | return false; | ||
1929 | |||
1930 | if (!intel_sdvo_set_target_output(intel_sdvo, | ||
1931 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) | ||
1932 | return false; | ||
1933 | |||
1934 | is_hdmi = 0; | ||
1935 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) | ||
1936 | return false; | ||
1937 | |||
1938 | return !!is_hdmi; | ||
1939 | } | 1933 | } |
1940 | 1934 | ||
1941 | static u8 | 1935 | static u8 |
@@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2037 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2031 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2038 | 2032 | ||
2039 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { | 2033 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { |
2040 | /* enable hdmi encoding mode if supported */ | ||
2041 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); | ||
2042 | intel_sdvo_set_colorimetry(intel_sdvo, | ||
2043 | SDVO_COLORIMETRY_RGB256); | ||
2044 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2034 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2045 | |||
2046 | intel_sdvo->is_hdmi = true; | 2035 | intel_sdvo->is_hdmi = true; |
2047 | } | 2036 | } |
2048 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2037 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ff652c77a0a5..4c8bfc97fb4c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2963,6 +2963,7 @@ config TILE_NET | |||
2963 | config XEN_NETDEV_FRONTEND | 2963 | config XEN_NETDEV_FRONTEND |
2964 | tristate "Xen network device frontend driver" | 2964 | tristate "Xen network device frontend driver" |
2965 | depends on XEN | 2965 | depends on XEN |
2966 | select XEN_XENBUS_FRONTEND | ||
2966 | default y | 2967 | default y |
2967 | help | 2968 | help |
2968 | The network device frontend driver allows the kernel to | 2969 | The network device frontend driver allows the kernel to |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 5b1630e4e9e3..a9523fdc6911 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -45,6 +45,7 @@ config XEN_PCIDEV_FRONTEND | |||
45 | depends on PCI && X86 && XEN | 45 | depends on PCI && X86 && XEN |
46 | select HOTPLUG | 46 | select HOTPLUG |
47 | select PCI_XEN | 47 | select PCI_XEN |
48 | select XEN_XENBUS_FRONTEND | ||
48 | default y | 49 | default y |
49 | help | 50 | help |
50 | The PCI device frontend driver allows the kernel to import arbitrary | 51 | The PCI device frontend driver allows the kernel to import arbitrary |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 6e6180ccd726..5a48ce996dea 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -29,6 +29,14 @@ config XEN_DEV_EVTCHN | |||
29 | firing. | 29 | firing. |
30 | If in doubt, say yes. | 30 | If in doubt, say yes. |
31 | 31 | ||
32 | config XEN_BACKEND | ||
33 | bool "Backend driver support" | ||
34 | depends on XEN_DOM0 | ||
35 | default y | ||
36 | help | ||
37 | Support for backend device drivers that provide I/O services | ||
38 | to other virtual machines. | ||
39 | |||
32 | config XENFS | 40 | config XENFS |
33 | tristate "Xen filesystem" | 41 | tristate "Xen filesystem" |
34 | default y | 42 | default y |
@@ -62,6 +70,9 @@ config XEN_SYS_HYPERVISOR | |||
62 | virtual environment, /sys/hypervisor will still be present, | 70 | virtual environment, /sys/hypervisor will still be present, |
63 | but will have no xen contents. | 71 | but will have no xen contents. |
64 | 72 | ||
73 | config XEN_XENBUS_FRONTEND | ||
74 | tristate | ||
75 | |||
65 | config XEN_PLATFORM_PCI | 76 | config XEN_PLATFORM_PCI |
66 | tristate "xen platform pci device driver" | 77 | tristate "xen platform pci device driver" |
67 | depends on XEN_PVHVM | 78 | depends on XEN_PVHVM |
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile index 5571f5b84223..8dca685358b4 100644 --- a/drivers/xen/xenbus/Makefile +++ b/drivers/xen/xenbus/Makefile | |||
@@ -5,3 +5,8 @@ xenbus-objs += xenbus_client.o | |||
5 | xenbus-objs += xenbus_comms.o | 5 | xenbus-objs += xenbus_comms.o |
6 | xenbus-objs += xenbus_xs.o | 6 | xenbus-objs += xenbus_xs.o |
7 | xenbus-objs += xenbus_probe.o | 7 | xenbus-objs += xenbus_probe.o |
8 | |||
9 | xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o | ||
10 | xenbus-objs += $(xenbus-be-objs-y) | ||
11 | |||
12 | obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index deb9c4ba3a93..baa65e7fbbc7 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <xen/events.h> | 56 | #include <xen/events.h> |
57 | #include <xen/page.h> | 57 | #include <xen/page.h> |
58 | 58 | ||
59 | #include <xen/platform_pci.h> | ||
60 | #include <xen/hvm.h> | 59 | #include <xen/hvm.h> |
61 | 60 | ||
62 | #include "xenbus_comms.h" | 61 | #include "xenbus_comms.h" |
@@ -73,15 +72,6 @@ static unsigned long xen_store_mfn; | |||
73 | 72 | ||
74 | static BLOCKING_NOTIFIER_HEAD(xenstore_chain); | 73 | static BLOCKING_NOTIFIER_HEAD(xenstore_chain); |
75 | 74 | ||
76 | static void wait_for_devices(struct xenbus_driver *xendrv); | ||
77 | |||
78 | static int xenbus_probe_frontend(const char *type, const char *name); | ||
79 | |||
80 | static void xenbus_dev_shutdown(struct device *_dev); | ||
81 | |||
82 | static int xenbus_dev_suspend(struct device *dev, pm_message_t state); | ||
83 | static int xenbus_dev_resume(struct device *dev); | ||
84 | |||
85 | /* If something in array of ids matches this device, return it. */ | 75 | /* If something in array of ids matches this device, return it. */ |
86 | static const struct xenbus_device_id * | 76 | static const struct xenbus_device_id * |
87 | match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) | 77 | match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) |
@@ -102,34 +92,7 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv) | |||
102 | 92 | ||
103 | return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; | 93 | return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; |
104 | } | 94 | } |
105 | 95 | EXPORT_SYMBOL_GPL(xenbus_match); | |
106 | static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env) | ||
107 | { | ||
108 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
109 | |||
110 | if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) | ||
111 | return -ENOMEM; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* device/<type>/<id> => <type>-<id> */ | ||
117 | static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) | ||
118 | { | ||
119 | nodename = strchr(nodename, '/'); | ||
120 | if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { | ||
121 | printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); | ||
122 | return -EINVAL; | ||
123 | } | ||
124 | |||
125 | strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); | ||
126 | if (!strchr(bus_id, '/')) { | ||
127 | printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | *strchr(bus_id, '/') = '-'; | ||
131 | return 0; | ||
132 | } | ||
133 | 96 | ||
134 | 97 | ||
135 | static void free_otherend_details(struct xenbus_device *dev) | 98 | static void free_otherend_details(struct xenbus_device *dev) |
@@ -149,7 +112,30 @@ static void free_otherend_watch(struct xenbus_device *dev) | |||
149 | } | 112 | } |
150 | 113 | ||
151 | 114 | ||
152 | int read_otherend_details(struct xenbus_device *xendev, | 115 | static int talk_to_otherend(struct xenbus_device *dev) |
116 | { | ||
117 | struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); | ||
118 | |||
119 | free_otherend_watch(dev); | ||
120 | free_otherend_details(dev); | ||
121 | |||
122 | return drv->read_otherend_details(dev); | ||
123 | } | ||
124 | |||
125 | |||
126 | |||
127 | static int watch_otherend(struct xenbus_device *dev) | ||
128 | { | ||
129 | struct xen_bus_type *bus = | ||
130 | container_of(dev->dev.bus, struct xen_bus_type, bus); | ||
131 | |||
132 | return xenbus_watch_pathfmt(dev, &dev->otherend_watch, | ||
133 | bus->otherend_changed, | ||
134 | "%s/%s", dev->otherend, "state"); | ||
135 | } | ||
136 | |||
137 | |||
138 | int xenbus_read_otherend_details(struct xenbus_device *xendev, | ||
153 | char *id_node, char *path_node) | 139 | char *id_node, char *path_node) |
154 | { | 140 | { |
155 | int err = xenbus_gather(XBT_NIL, xendev->nodename, | 141 | int err = xenbus_gather(XBT_NIL, xendev->nodename, |
@@ -174,39 +160,11 @@ int read_otherend_details(struct xenbus_device *xendev, | |||
174 | 160 | ||
175 | return 0; | 161 | return 0; |
176 | } | 162 | } |
163 | EXPORT_SYMBOL_GPL(xenbus_read_otherend_details); | ||
177 | 164 | ||
178 | 165 | void xenbus_otherend_changed(struct xenbus_watch *watch, | |
179 | static int read_backend_details(struct xenbus_device *xendev) | 166 | const char **vec, unsigned int len, |
180 | { | 167 | int ignore_on_shutdown) |
181 | return read_otherend_details(xendev, "backend-id", "backend"); | ||
182 | } | ||
183 | |||
184 | static struct device_attribute xenbus_dev_attrs[] = { | ||
185 | __ATTR_NULL | ||
186 | }; | ||
187 | |||
188 | /* Bus type for frontend drivers. */ | ||
189 | static struct xen_bus_type xenbus_frontend = { | ||
190 | .root = "device", | ||
191 | .levels = 2, /* device/type/<id> */ | ||
192 | .get_bus_id = frontend_bus_id, | ||
193 | .probe = xenbus_probe_frontend, | ||
194 | .bus = { | ||
195 | .name = "xen", | ||
196 | .match = xenbus_match, | ||
197 | .uevent = xenbus_uevent, | ||
198 | .probe = xenbus_dev_probe, | ||
199 | .remove = xenbus_dev_remove, | ||
200 | .shutdown = xenbus_dev_shutdown, | ||
201 | .dev_attrs = xenbus_dev_attrs, | ||
202 | |||
203 | .suspend = xenbus_dev_suspend, | ||
204 | .resume = xenbus_dev_resume, | ||
205 | }, | ||
206 | }; | ||
207 | |||
208 | static void otherend_changed(struct xenbus_watch *watch, | ||
209 | const char **vec, unsigned int len) | ||
210 | { | 168 | { |
211 | struct xenbus_device *dev = | 169 | struct xenbus_device *dev = |
212 | container_of(watch, struct xenbus_device, otherend_watch); | 170 | container_of(watch, struct xenbus_device, otherend_watch); |
@@ -234,11 +192,7 @@ static void otherend_changed(struct xenbus_watch *watch, | |||
234 | * work that can fail e.g., when the rootfs is gone. | 192 | * work that can fail e.g., when the rootfs is gone. |
235 | */ | 193 | */ |
236 | if (system_state > SYSTEM_RUNNING) { | 194 | if (system_state > SYSTEM_RUNNING) { |
237 | struct xen_bus_type *bus = bus; | 195 | if (ignore_on_shutdown && (state == XenbusStateClosing)) |
238 | bus = container_of(dev->dev.bus, struct xen_bus_type, bus); | ||
239 | /* If we're frontend, drive the state machine to Closed. */ | ||
240 | /* This should cause the backend to release our resources. */ | ||
241 | if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) | ||
242 | xenbus_frontend_closed(dev); | 196 | xenbus_frontend_closed(dev); |
243 | return; | 197 | return; |
244 | } | 198 | } |
@@ -246,25 +200,7 @@ static void otherend_changed(struct xenbus_watch *watch, | |||
246 | if (drv->otherend_changed) | 200 | if (drv->otherend_changed) |
247 | drv->otherend_changed(dev, state); | 201 | drv->otherend_changed(dev, state); |
248 | } | 202 | } |
249 | 203 | EXPORT_SYMBOL_GPL(xenbus_otherend_changed); | |
250 | |||
251 | static int talk_to_otherend(struct xenbus_device *dev) | ||
252 | { | ||
253 | struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); | ||
254 | |||
255 | free_otherend_watch(dev); | ||
256 | free_otherend_details(dev); | ||
257 | |||
258 | return drv->read_otherend_details(dev); | ||
259 | } | ||
260 | |||
261 | |||
262 | static int watch_otherend(struct xenbus_device *dev) | ||
263 | { | ||
264 | return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, | ||
265 | "%s/%s", dev->otherend, "state"); | ||
266 | } | ||
267 | |||
268 | 204 | ||
269 | int xenbus_dev_probe(struct device *_dev) | 205 | int xenbus_dev_probe(struct device *_dev) |
270 | { | 206 | { |
@@ -308,8 +244,9 @@ int xenbus_dev_probe(struct device *_dev) | |||
308 | fail: | 244 | fail: |
309 | xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); | 245 | xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); |
310 | xenbus_switch_state(dev, XenbusStateClosed); | 246 | xenbus_switch_state(dev, XenbusStateClosed); |
311 | return -ENODEV; | 247 | return err; |
312 | } | 248 | } |
249 | EXPORT_SYMBOL_GPL(xenbus_dev_probe); | ||
313 | 250 | ||
314 | int xenbus_dev_remove(struct device *_dev) | 251 | int xenbus_dev_remove(struct device *_dev) |
315 | { | 252 | { |
@@ -327,8 +264,9 @@ int xenbus_dev_remove(struct device *_dev) | |||
327 | xenbus_switch_state(dev, XenbusStateClosed); | 264 | xenbus_switch_state(dev, XenbusStateClosed); |
328 | return 0; | 265 | return 0; |
329 | } | 266 | } |
267 | EXPORT_SYMBOL_GPL(xenbus_dev_remove); | ||
330 | 268 | ||
331 | static void xenbus_dev_shutdown(struct device *_dev) | 269 | void xenbus_dev_shutdown(struct device *_dev) |
332 | { | 270 | { |
333 | struct xenbus_device *dev = to_xenbus_device(_dev); | 271 | struct xenbus_device *dev = to_xenbus_device(_dev); |
334 | unsigned long timeout = 5*HZ; | 272 | unsigned long timeout = 5*HZ; |
@@ -349,6 +287,7 @@ static void xenbus_dev_shutdown(struct device *_dev) | |||
349 | out: | 287 | out: |
350 | put_device(&dev->dev); | 288 | put_device(&dev->dev); |
351 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); | ||
352 | 291 | ||
353 | int xenbus_register_driver_common(struct xenbus_driver *drv, | 292 | int xenbus_register_driver_common(struct xenbus_driver *drv, |
354 | struct xen_bus_type *bus, | 293 | struct xen_bus_type *bus, |
@@ -362,25 +301,7 @@ int xenbus_register_driver_common(struct xenbus_driver *drv, | |||
362 | 301 | ||
363 | return driver_register(&drv->driver); | 302 | return driver_register(&drv->driver); |
364 | } | 303 | } |
365 | 304 | EXPORT_SYMBOL_GPL(xenbus_register_driver_common); | |
366 | int __xenbus_register_frontend(struct xenbus_driver *drv, | ||
367 | struct module *owner, const char *mod_name) | ||
368 | { | ||
369 | int ret; | ||
370 | |||
371 | drv->read_otherend_details = read_backend_details; | ||
372 | |||
373 | ret = xenbus_register_driver_common(drv, &xenbus_frontend, | ||
374 | owner, mod_name); | ||
375 | if (ret) | ||
376 | return ret; | ||
377 | |||
378 | /* If this driver is loaded as a module wait for devices to attach. */ | ||
379 | wait_for_devices(drv); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(__xenbus_register_frontend); | ||
384 | 305 | ||
385 | void xenbus_unregister_driver(struct xenbus_driver *drv) | 306 | void xenbus_unregister_driver(struct xenbus_driver *drv) |
386 | { | 307 | { |
@@ -551,24 +472,7 @@ fail: | |||
551 | kfree(xendev); | 472 | kfree(xendev); |
552 | return err; | 473 | return err; |
553 | } | 474 | } |
554 | 475 | EXPORT_SYMBOL_GPL(xenbus_probe_node); | |
555 | /* device/<typename>/<name> */ | ||
556 | static int xenbus_probe_frontend(const char *type, const char *name) | ||
557 | { | ||
558 | char *nodename; | ||
559 | int err; | ||
560 | |||
561 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", | ||
562 | xenbus_frontend.root, type, name); | ||
563 | if (!nodename) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | DPRINTK("%s", nodename); | ||
567 | |||
568 | err = xenbus_probe_node(&xenbus_frontend, type, nodename); | ||
569 | kfree(nodename); | ||
570 | return err; | ||
571 | } | ||
572 | 476 | ||
573 | static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) | 477 | static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) |
574 | { | 478 | { |
@@ -582,10 +486,11 @@ static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) | |||
582 | return PTR_ERR(dir); | 486 | return PTR_ERR(dir); |
583 | 487 | ||
584 | for (i = 0; i < dir_n; i++) { | 488 | for (i = 0; i < dir_n; i++) { |
585 | err = bus->probe(type, dir[i]); | 489 | err = bus->probe(bus, type, dir[i]); |
586 | if (err) | 490 | if (err) |
587 | break; | 491 | break; |
588 | } | 492 | } |
493 | |||
589 | kfree(dir); | 494 | kfree(dir); |
590 | return err; | 495 | return err; |
591 | } | 496 | } |
@@ -605,9 +510,11 @@ int xenbus_probe_devices(struct xen_bus_type *bus) | |||
605 | if (err) | 510 | if (err) |
606 | break; | 511 | break; |
607 | } | 512 | } |
513 | |||
608 | kfree(dir); | 514 | kfree(dir); |
609 | return err; | 515 | return err; |
610 | } | 516 | } |
517 | EXPORT_SYMBOL_GPL(xenbus_probe_devices); | ||
611 | 518 | ||
612 | static unsigned int char_count(const char *str, char c) | 519 | static unsigned int char_count(const char *str, char c) |
613 | { | 520 | { |
@@ -670,32 +577,18 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) | |||
670 | } | 577 | } |
671 | EXPORT_SYMBOL_GPL(xenbus_dev_changed); | 578 | EXPORT_SYMBOL_GPL(xenbus_dev_changed); |
672 | 579 | ||
673 | static void frontend_changed(struct xenbus_watch *watch, | 580 | int xenbus_dev_suspend(struct device *dev, pm_message_t state) |
674 | const char **vec, unsigned int len) | ||
675 | { | ||
676 | DPRINTK(""); | ||
677 | |||
678 | xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); | ||
679 | } | ||
680 | |||
681 | /* We watch for devices appearing and vanishing. */ | ||
682 | static struct xenbus_watch fe_watch = { | ||
683 | .node = "device", | ||
684 | .callback = frontend_changed, | ||
685 | }; | ||
686 | |||
687 | static int xenbus_dev_suspend(struct device *dev, pm_message_t state) | ||
688 | { | 581 | { |
689 | int err = 0; | 582 | int err = 0; |
690 | struct xenbus_driver *drv; | 583 | struct xenbus_driver *drv; |
691 | struct xenbus_device *xdev; | 584 | struct xenbus_device *xdev |
585 | = container_of(dev, struct xenbus_device, dev); | ||
692 | 586 | ||
693 | DPRINTK(""); | 587 | DPRINTK("%s", xdev->nodename); |
694 | 588 | ||
695 | if (dev->driver == NULL) | 589 | if (dev->driver == NULL) |
696 | return 0; | 590 | return 0; |
697 | drv = to_xenbus_driver(dev->driver); | 591 | drv = to_xenbus_driver(dev->driver); |
698 | xdev = container_of(dev, struct xenbus_device, dev); | ||
699 | if (drv->suspend) | 592 | if (drv->suspend) |
700 | err = drv->suspend(xdev, state); | 593 | err = drv->suspend(xdev, state); |
701 | if (err) | 594 | if (err) |
@@ -703,21 +596,20 @@ static int xenbus_dev_suspend(struct device *dev, pm_message_t state) | |||
703 | "xenbus: suspend %s failed: %i\n", dev_name(dev), err); | 596 | "xenbus: suspend %s failed: %i\n", dev_name(dev), err); |
704 | return 0; | 597 | return 0; |
705 | } | 598 | } |
599 | EXPORT_SYMBOL_GPL(xenbus_dev_suspend); | ||
706 | 600 | ||
707 | static int xenbus_dev_resume(struct device *dev) | 601 | int xenbus_dev_resume(struct device *dev) |
708 | { | 602 | { |
709 | int err; | 603 | int err; |
710 | struct xenbus_driver *drv; | 604 | struct xenbus_driver *drv; |
711 | struct xenbus_device *xdev; | 605 | struct xenbus_device *xdev |
606 | = container_of(dev, struct xenbus_device, dev); | ||
712 | 607 | ||
713 | DPRINTK(""); | 608 | DPRINTK("%s", xdev->nodename); |
714 | 609 | ||
715 | if (dev->driver == NULL) | 610 | if (dev->driver == NULL) |
716 | return 0; | 611 | return 0; |
717 | |||
718 | drv = to_xenbus_driver(dev->driver); | 612 | drv = to_xenbus_driver(dev->driver); |
719 | xdev = container_of(dev, struct xenbus_device, dev); | ||
720 | |||
721 | err = talk_to_otherend(xdev); | 613 | err = talk_to_otherend(xdev); |
722 | if (err) { | 614 | if (err) { |
723 | printk(KERN_WARNING | 615 | printk(KERN_WARNING |
@@ -748,6 +640,7 @@ static int xenbus_dev_resume(struct device *dev) | |||
748 | 640 | ||
749 | return 0; | 641 | return 0; |
750 | } | 642 | } |
643 | EXPORT_SYMBOL_GPL(xenbus_dev_resume); | ||
751 | 644 | ||
752 | /* A flag to determine if xenstored is 'ready' (i.e. has started) */ | 645 | /* A flag to determine if xenstored is 'ready' (i.e. has started) */ |
753 | int xenstored_ready = 0; | 646 | int xenstored_ready = 0; |
@@ -776,11 +669,6 @@ void xenbus_probe(struct work_struct *unused) | |||
776 | { | 669 | { |
777 | xenstored_ready = 1; | 670 | xenstored_ready = 1; |
778 | 671 | ||
779 | /* Enumerate devices in xenstore and watch for changes. */ | ||
780 | xenbus_probe_devices(&xenbus_frontend); | ||
781 | register_xenbus_watch(&fe_watch); | ||
782 | xenbus_backend_probe_and_watch(); | ||
783 | |||
784 | /* Notify others that xenstore is up */ | 672 | /* Notify others that xenstore is up */ |
785 | blocking_notifier_call_chain(&xenstore_chain, 0, NULL); | 673 | blocking_notifier_call_chain(&xenstore_chain, 0, NULL); |
786 | } | 674 | } |
@@ -809,16 +697,7 @@ static int __init xenbus_init(void) | |||
809 | 697 | ||
810 | err = -ENODEV; | 698 | err = -ENODEV; |
811 | if (!xen_domain()) | 699 | if (!xen_domain()) |
812 | goto out_error; | 700 | return err; |
813 | |||
814 | /* Register ourselves with the kernel bus subsystem */ | ||
815 | err = bus_register(&xenbus_frontend.bus); | ||
816 | if (err) | ||
817 | goto out_error; | ||
818 | |||
819 | err = xenbus_backend_bus_register(); | ||
820 | if (err) | ||
821 | goto out_unreg_front; | ||
822 | 701 | ||
823 | /* | 702 | /* |
824 | * Domain0 doesn't have a store_evtchn or store_mfn yet. | 703 | * Domain0 doesn't have a store_evtchn or store_mfn yet. |
@@ -874,7 +753,7 @@ static int __init xenbus_init(void) | |||
874 | if (err) { | 753 | if (err) { |
875 | printk(KERN_WARNING | 754 | printk(KERN_WARNING |
876 | "XENBUS: Error initializing xenstore comms: %i\n", err); | 755 | "XENBUS: Error initializing xenstore comms: %i\n", err); |
877 | goto out_unreg_back; | 756 | goto out_error; |
878 | } | 757 | } |
879 | 758 | ||
880 | #ifdef CONFIG_XEN_COMPAT_XENFS | 759 | #ifdef CONFIG_XEN_COMPAT_XENFS |
@@ -887,133 +766,13 @@ static int __init xenbus_init(void) | |||
887 | 766 | ||
888 | return 0; | 767 | return 0; |
889 | 768 | ||
890 | out_unreg_back: | ||
891 | xenbus_backend_bus_unregister(); | ||
892 | |||
893 | out_unreg_front: | ||
894 | bus_unregister(&xenbus_frontend.bus); | ||
895 | |||
896 | out_error: | 769 | out_error: |
897 | if (page != 0) | 770 | if (page != 0) |
898 | free_page(page); | 771 | free_page(page); |
772 | |||
899 | return err; | 773 | return err; |
900 | } | 774 | } |
901 | 775 | ||
902 | postcore_initcall(xenbus_init); | 776 | postcore_initcall(xenbus_init); |
903 | 777 | ||
904 | MODULE_LICENSE("GPL"); | 778 | MODULE_LICENSE("GPL"); |
905 | |||
906 | static int is_device_connecting(struct device *dev, void *data) | ||
907 | { | ||
908 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
909 | struct device_driver *drv = data; | ||
910 | struct xenbus_driver *xendrv; | ||
911 | |||
912 | /* | ||
913 | * A device with no driver will never connect. We care only about | ||
914 | * devices which should currently be in the process of connecting. | ||
915 | */ | ||
916 | if (!dev->driver) | ||
917 | return 0; | ||
918 | |||
919 | /* Is this search limited to a particular driver? */ | ||
920 | if (drv && (dev->driver != drv)) | ||
921 | return 0; | ||
922 | |||
923 | xendrv = to_xenbus_driver(dev->driver); | ||
924 | return (xendev->state < XenbusStateConnected || | ||
925 | (xendev->state == XenbusStateConnected && | ||
926 | xendrv->is_ready && !xendrv->is_ready(xendev))); | ||
927 | } | ||
928 | |||
929 | static int exists_connecting_device(struct device_driver *drv) | ||
930 | { | ||
931 | return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
932 | is_device_connecting); | ||
933 | } | ||
934 | |||
935 | static int print_device_status(struct device *dev, void *data) | ||
936 | { | ||
937 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
938 | struct device_driver *drv = data; | ||
939 | |||
940 | /* Is this operation limited to a particular driver? */ | ||
941 | if (drv && (dev->driver != drv)) | ||
942 | return 0; | ||
943 | |||
944 | if (!dev->driver) { | ||
945 | /* Information only: is this too noisy? */ | ||
946 | printk(KERN_INFO "XENBUS: Device with no driver: %s\n", | ||
947 | xendev->nodename); | ||
948 | } else if (xendev->state < XenbusStateConnected) { | ||
949 | enum xenbus_state rstate = XenbusStateUnknown; | ||
950 | if (xendev->otherend) | ||
951 | rstate = xenbus_read_driver_state(xendev->otherend); | ||
952 | printk(KERN_WARNING "XENBUS: Timeout connecting " | ||
953 | "to device: %s (local state %d, remote state %d)\n", | ||
954 | xendev->nodename, xendev->state, rstate); | ||
955 | } | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | /* We only wait for device setup after most initcalls have run. */ | ||
961 | static int ready_to_wait_for_devices; | ||
962 | |||
963 | /* | ||
964 | * On a 5-minute timeout, wait for all devices currently configured. We need | ||
965 | * to do this to guarantee that the filesystems and / or network devices | ||
966 | * needed for boot are available, before we can allow the boot to proceed. | ||
967 | * | ||
968 | * This needs to be on a late_initcall, to happen after the frontend device | ||
969 | * drivers have been initialised, but before the root fs is mounted. | ||
970 | * | ||
971 | * A possible improvement here would be to have the tools add a per-device | ||
972 | * flag to the store entry, indicating whether it is needed at boot time. | ||
973 | * This would allow people who knew what they were doing to accelerate their | ||
974 | * boot slightly, but of course needs tools or manual intervention to set up | ||
975 | * those flags correctly. | ||
976 | */ | ||
977 | static void wait_for_devices(struct xenbus_driver *xendrv) | ||
978 | { | ||
979 | unsigned long start = jiffies; | ||
980 | struct device_driver *drv = xendrv ? &xendrv->driver : NULL; | ||
981 | unsigned int seconds_waited = 0; | ||
982 | |||
983 | if (!ready_to_wait_for_devices || !xen_domain()) | ||
984 | return; | ||
985 | |||
986 | while (exists_connecting_device(drv)) { | ||
987 | if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { | ||
988 | if (!seconds_waited) | ||
989 | printk(KERN_WARNING "XENBUS: Waiting for " | ||
990 | "devices to initialise: "); | ||
991 | seconds_waited += 5; | ||
992 | printk("%us...", 300 - seconds_waited); | ||
993 | if (seconds_waited == 300) | ||
994 | break; | ||
995 | } | ||
996 | |||
997 | schedule_timeout_interruptible(HZ/10); | ||
998 | } | ||
999 | |||
1000 | if (seconds_waited) | ||
1001 | printk("\n"); | ||
1002 | |||
1003 | bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
1004 | print_device_status); | ||
1005 | } | ||
1006 | |||
1007 | #ifndef MODULE | ||
1008 | static int __init boot_wait_for_devices(void) | ||
1009 | { | ||
1010 | if (xen_hvm_domain() && !xen_platform_pci_unplug) | ||
1011 | return -ENODEV; | ||
1012 | |||
1013 | ready_to_wait_for_devices = 1; | ||
1014 | wait_for_devices(NULL); | ||
1015 | return 0; | ||
1016 | } | ||
1017 | |||
1018 | late_initcall(boot_wait_for_devices); | ||
1019 | #endif | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h index 6c5e3185a6a2..24665812316a 100644 --- a/drivers/xen/xenbus/xenbus_probe.h +++ b/drivers/xen/xenbus/xenbus_probe.h | |||
@@ -36,26 +36,15 @@ | |||
36 | 36 | ||
37 | #define XEN_BUS_ID_SIZE 20 | 37 | #define XEN_BUS_ID_SIZE 20 |
38 | 38 | ||
39 | #ifdef CONFIG_XEN_BACKEND | ||
40 | extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); | ||
41 | extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); | ||
42 | extern void xenbus_backend_probe_and_watch(void); | ||
43 | extern int xenbus_backend_bus_register(void); | ||
44 | extern void xenbus_backend_bus_unregister(void); | ||
45 | #else | ||
46 | static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} | ||
47 | static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} | ||
48 | static inline void xenbus_backend_probe_and_watch(void) {} | ||
49 | static inline int xenbus_backend_bus_register(void) { return 0; } | ||
50 | static inline void xenbus_backend_bus_unregister(void) {} | ||
51 | #endif | ||
52 | |||
53 | struct xen_bus_type | 39 | struct xen_bus_type |
54 | { | 40 | { |
55 | char *root; | 41 | char *root; |
56 | unsigned int levels; | 42 | unsigned int levels; |
57 | int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); | 43 | int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); |
58 | int (*probe)(const char *type, const char *dir); | 44 | int (*probe)(struct xen_bus_type *bus, const char *type, |
45 | const char *dir); | ||
46 | void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, | ||
47 | unsigned int len); | ||
59 | struct bus_type bus; | 48 | struct bus_type bus; |
60 | }; | 49 | }; |
61 | 50 | ||
@@ -73,4 +62,16 @@ extern int xenbus_probe_devices(struct xen_bus_type *bus); | |||
73 | 62 | ||
74 | extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); | 63 | extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); |
75 | 64 | ||
65 | extern void xenbus_dev_shutdown(struct device *_dev); | ||
66 | |||
67 | extern int xenbus_dev_suspend(struct device *dev, pm_message_t state); | ||
68 | extern int xenbus_dev_resume(struct device *dev); | ||
69 | |||
70 | extern void xenbus_otherend_changed(struct xenbus_watch *watch, | ||
71 | const char **vec, unsigned int len, | ||
72 | int ignore_on_shutdown); | ||
73 | |||
74 | extern int xenbus_read_otherend_details(struct xenbus_device *xendev, | ||
75 | char *id_node, char *path_node); | ||
76 | |||
76 | #endif | 77 | #endif |
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c new file mode 100644 index 000000000000..6cf467bf63ec --- /dev/null +++ b/drivers/xen/xenbus/xenbus_probe_backend.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /****************************************************************************** | ||
2 | * Talks to Xen Store to figure out what devices we have (backend half). | ||
3 | * | ||
4 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
5 | * Copyright (C) 2005 Mike Wray, Hewlett-Packard | ||
6 | * Copyright (C) 2005, 2006 XenSource Ltd | ||
7 | * Copyright (C) 2007 Solarflare Communications, Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #define DPRINTK(fmt, args...) \ | ||
35 | pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ | ||
36 | __func__, __LINE__, ##args) | ||
37 | |||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/err.h> | ||
40 | #include <linux/string.h> | ||
41 | #include <linux/ctype.h> | ||
42 | #include <linux/fcntl.h> | ||
43 | #include <linux/mm.h> | ||
44 | #include <linux/notifier.h> | ||
45 | |||
46 | #include <asm/page.h> | ||
47 | #include <asm/pgtable.h> | ||
48 | #include <asm/xen/hypervisor.h> | ||
49 | #include <asm/hypervisor.h> | ||
50 | #include <xen/xenbus.h> | ||
51 | #include <xen/features.h> | ||
52 | |||
53 | #include "xenbus_comms.h" | ||
54 | #include "xenbus_probe.h" | ||
55 | |||
56 | /* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */ | ||
57 | static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) | ||
58 | { | ||
59 | int domid, err; | ||
60 | const char *devid, *type, *frontend; | ||
61 | unsigned int typelen; | ||
62 | |||
63 | type = strchr(nodename, '/'); | ||
64 | if (!type) | ||
65 | return -EINVAL; | ||
66 | type++; | ||
67 | typelen = strcspn(type, "/"); | ||
68 | if (!typelen || type[typelen] != '/') | ||
69 | return -EINVAL; | ||
70 | |||
71 | devid = strrchr(nodename, '/') + 1; | ||
72 | |||
73 | err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, | ||
74 | "frontend", NULL, &frontend, | ||
75 | NULL); | ||
76 | if (err) | ||
77 | return err; | ||
78 | if (strlen(frontend) == 0) | ||
79 | err = -ERANGE; | ||
80 | if (!err && !xenbus_exists(XBT_NIL, frontend, "")) | ||
81 | err = -ENOENT; | ||
82 | kfree(frontend); | ||
83 | |||
84 | if (err) | ||
85 | return err; | ||
86 | |||
87 | if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", | ||
88 | typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) | ||
89 | return -ENOSPC; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int xenbus_uevent_backend(struct device *dev, | ||
94 | struct kobj_uevent_env *env) | ||
95 | { | ||
96 | struct xenbus_device *xdev; | ||
97 | struct xenbus_driver *drv; | ||
98 | struct xen_bus_type *bus; | ||
99 | |||
100 | DPRINTK(""); | ||
101 | |||
102 | if (dev == NULL) | ||
103 | return -ENODEV; | ||
104 | |||
105 | xdev = to_xenbus_device(dev); | ||
106 | bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); | ||
107 | if (xdev == NULL) | ||
108 | return -ENODEV; | ||
109 | |||
110 | /* stuff we want to pass to /sbin/hotplug */ | ||
111 | if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) | ||
112 | return -ENOMEM; | ||
113 | |||
114 | if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) | ||
115 | return -ENOMEM; | ||
116 | |||
117 | if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | if (dev->driver) { | ||
121 | drv = to_xenbus_driver(dev->driver); | ||
122 | if (drv && drv->uevent) | ||
123 | return drv->uevent(xdev, env); | ||
124 | } | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /* backend/<typename>/<frontend-uuid>/<name> */ | ||
130 | static int xenbus_probe_backend_unit(struct xen_bus_type *bus, | ||
131 | const char *dir, | ||
132 | const char *type, | ||
133 | const char *name) | ||
134 | { | ||
135 | char *nodename; | ||
136 | int err; | ||
137 | |||
138 | nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); | ||
139 | if (!nodename) | ||
140 | return -ENOMEM; | ||
141 | |||
142 | DPRINTK("%s\n", nodename); | ||
143 | |||
144 | err = xenbus_probe_node(bus, type, nodename); | ||
145 | kfree(nodename); | ||
146 | return err; | ||
147 | } | ||
148 | |||
149 | /* backend/<typename>/<frontend-domid> */ | ||
150 | static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, | ||
151 | const char *domid) | ||
152 | { | ||
153 | char *nodename; | ||
154 | int err = 0; | ||
155 | char **dir; | ||
156 | unsigned int i, dir_n = 0; | ||
157 | |||
158 | DPRINTK(""); | ||
159 | |||
160 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); | ||
161 | if (!nodename) | ||
162 | return -ENOMEM; | ||
163 | |||
164 | dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); | ||
165 | if (IS_ERR(dir)) { | ||
166 | kfree(nodename); | ||
167 | return PTR_ERR(dir); | ||
168 | } | ||
169 | |||
170 | for (i = 0; i < dir_n; i++) { | ||
171 | err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); | ||
172 | if (err) | ||
173 | break; | ||
174 | } | ||
175 | kfree(dir); | ||
176 | kfree(nodename); | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static void frontend_changed(struct xenbus_watch *watch, | ||
181 | const char **vec, unsigned int len) | ||
182 | { | ||
183 | xenbus_otherend_changed(watch, vec, len, 0); | ||
184 | } | ||
185 | |||
186 | static struct device_attribute xenbus_backend_dev_attrs[] = { | ||
187 | __ATTR_NULL | ||
188 | }; | ||
189 | |||
190 | static struct xen_bus_type xenbus_backend = { | ||
191 | .root = "backend", | ||
192 | .levels = 3, /* backend/type/<frontend>/<id> */ | ||
193 | .get_bus_id = backend_bus_id, | ||
194 | .probe = xenbus_probe_backend, | ||
195 | .otherend_changed = frontend_changed, | ||
196 | .bus = { | ||
197 | .name = "xen-backend", | ||
198 | .match = xenbus_match, | ||
199 | .uevent = xenbus_uevent_backend, | ||
200 | .probe = xenbus_dev_probe, | ||
201 | .remove = xenbus_dev_remove, | ||
202 | .shutdown = xenbus_dev_shutdown, | ||
203 | .dev_attrs = xenbus_backend_dev_attrs, | ||
204 | }, | ||
205 | }; | ||
206 | |||
207 | static void backend_changed(struct xenbus_watch *watch, | ||
208 | const char **vec, unsigned int len) | ||
209 | { | ||
210 | DPRINTK(""); | ||
211 | |||
212 | xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); | ||
213 | } | ||
214 | |||
215 | static struct xenbus_watch be_watch = { | ||
216 | .node = "backend", | ||
217 | .callback = backend_changed, | ||
218 | }; | ||
219 | |||
220 | static int read_frontend_details(struct xenbus_device *xendev) | ||
221 | { | ||
222 | return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); | ||
223 | } | ||
224 | |||
225 | int xenbus_dev_is_online(struct xenbus_device *dev) | ||
226 | { | ||
227 | int rc, val; | ||
228 | |||
229 | rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); | ||
230 | if (rc != 1) | ||
231 | val = 0; /* no online node present */ | ||
232 | |||
233 | return val; | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(xenbus_dev_is_online); | ||
236 | |||
237 | int __xenbus_register_backend(struct xenbus_driver *drv, | ||
238 | struct module *owner, const char *mod_name) | ||
239 | { | ||
240 | drv->read_otherend_details = read_frontend_details; | ||
241 | |||
242 | return xenbus_register_driver_common(drv, &xenbus_backend, | ||
243 | owner, mod_name); | ||
244 | } | ||
245 | EXPORT_SYMBOL_GPL(__xenbus_register_backend); | ||
246 | |||
247 | static int backend_probe_and_watch(struct notifier_block *notifier, | ||
248 | unsigned long event, | ||
249 | void *data) | ||
250 | { | ||
251 | /* Enumerate devices in xenstore and watch for changes. */ | ||
252 | xenbus_probe_devices(&xenbus_backend); | ||
253 | register_xenbus_watch(&be_watch); | ||
254 | |||
255 | return NOTIFY_DONE; | ||
256 | } | ||
257 | |||
258 | static int __init xenbus_probe_backend_init(void) | ||
259 | { | ||
260 | static struct notifier_block xenstore_notifier = { | ||
261 | .notifier_call = backend_probe_and_watch | ||
262 | }; | ||
263 | int err; | ||
264 | |||
265 | DPRINTK(""); | ||
266 | |||
267 | /* Register ourselves with the kernel bus subsystem */ | ||
268 | err = bus_register(&xenbus_backend.bus); | ||
269 | if (err) | ||
270 | return err; | ||
271 | |||
272 | register_xenstore_notifier(&xenstore_notifier); | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | subsys_initcall(xenbus_probe_backend_init); | ||
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c new file mode 100644 index 000000000000..5bcc2d6cf129 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c | |||
@@ -0,0 +1,294 @@ | |||
1 | #define DPRINTK(fmt, args...) \ | ||
2 | pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ | ||
3 | __func__, __LINE__, ##args) | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/err.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/ctype.h> | ||
9 | #include <linux/fcntl.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/proc_fs.h> | ||
12 | #include <linux/notifier.h> | ||
13 | #include <linux/kthread.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/io.h> | ||
16 | |||
17 | #include <asm/page.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/xen/hypervisor.h> | ||
20 | #include <xen/xenbus.h> | ||
21 | #include <xen/events.h> | ||
22 | #include <xen/page.h> | ||
23 | |||
24 | #include <xen/platform_pci.h> | ||
25 | |||
26 | #include "xenbus_comms.h" | ||
27 | #include "xenbus_probe.h" | ||
28 | |||
29 | |||
30 | /* device/<type>/<id> => <type>-<id> */ | ||
31 | static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) | ||
32 | { | ||
33 | nodename = strchr(nodename, '/'); | ||
34 | if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { | ||
35 | printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); | ||
36 | return -EINVAL; | ||
37 | } | ||
38 | |||
39 | strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); | ||
40 | if (!strchr(bus_id, '/')) { | ||
41 | printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | *strchr(bus_id, '/') = '-'; | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /* device/<typename>/<name> */ | ||
49 | static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, | ||
50 | const char *name) | ||
51 | { | ||
52 | char *nodename; | ||
53 | int err; | ||
54 | |||
55 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); | ||
56 | if (!nodename) | ||
57 | return -ENOMEM; | ||
58 | |||
59 | DPRINTK("%s", nodename); | ||
60 | |||
61 | err = xenbus_probe_node(bus, type, nodename); | ||
62 | kfree(nodename); | ||
63 | return err; | ||
64 | } | ||
65 | |||
66 | static int xenbus_uevent_frontend(struct device *_dev, | ||
67 | struct kobj_uevent_env *env) | ||
68 | { | ||
69 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
70 | |||
71 | if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) | ||
72 | return -ENOMEM; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | |||
78 | static void backend_changed(struct xenbus_watch *watch, | ||
79 | const char **vec, unsigned int len) | ||
80 | { | ||
81 | xenbus_otherend_changed(watch, vec, len, 1); | ||
82 | } | ||
83 | |||
84 | static struct device_attribute xenbus_frontend_dev_attrs[] = { | ||
85 | __ATTR_NULL | ||
86 | }; | ||
87 | |||
88 | static struct xen_bus_type xenbus_frontend = { | ||
89 | .root = "device", | ||
90 | .levels = 2, /* device/type/<id> */ | ||
91 | .get_bus_id = frontend_bus_id, | ||
92 | .probe = xenbus_probe_frontend, | ||
93 | .otherend_changed = backend_changed, | ||
94 | .bus = { | ||
95 | .name = "xen", | ||
96 | .match = xenbus_match, | ||
97 | .uevent = xenbus_uevent_frontend, | ||
98 | .probe = xenbus_dev_probe, | ||
99 | .remove = xenbus_dev_remove, | ||
100 | .shutdown = xenbus_dev_shutdown, | ||
101 | .dev_attrs = xenbus_frontend_dev_attrs, | ||
102 | |||
103 | .suspend = xenbus_dev_suspend, | ||
104 | .resume = xenbus_dev_resume, | ||
105 | }, | ||
106 | }; | ||
107 | |||
108 | static void frontend_changed(struct xenbus_watch *watch, | ||
109 | const char **vec, unsigned int len) | ||
110 | { | ||
111 | DPRINTK(""); | ||
112 | |||
113 | xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); | ||
114 | } | ||
115 | |||
116 | |||
117 | /* We watch for devices appearing and vanishing. */ | ||
118 | static struct xenbus_watch fe_watch = { | ||
119 | .node = "device", | ||
120 | .callback = frontend_changed, | ||
121 | }; | ||
122 | |||
123 | static int read_backend_details(struct xenbus_device *xendev) | ||
124 | { | ||
125 | return xenbus_read_otherend_details(xendev, "backend-id", "backend"); | ||
126 | } | ||
127 | |||
128 | static int is_device_connecting(struct device *dev, void *data) | ||
129 | { | ||
130 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
131 | struct device_driver *drv = data; | ||
132 | struct xenbus_driver *xendrv; | ||
133 | |||
134 | /* | ||
135 | * A device with no driver will never connect. We care only about | ||
136 | * devices which should currently be in the process of connecting. | ||
137 | */ | ||
138 | if (!dev->driver) | ||
139 | return 0; | ||
140 | |||
141 | /* Is this search limited to a particular driver? */ | ||
142 | if (drv && (dev->driver != drv)) | ||
143 | return 0; | ||
144 | |||
145 | xendrv = to_xenbus_driver(dev->driver); | ||
146 | return (xendev->state < XenbusStateConnected || | ||
147 | (xendev->state == XenbusStateConnected && | ||
148 | xendrv->is_ready && !xendrv->is_ready(xendev))); | ||
149 | } | ||
150 | |||
151 | static int exists_connecting_device(struct device_driver *drv) | ||
152 | { | ||
153 | return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
154 | is_device_connecting); | ||
155 | } | ||
156 | |||
157 | static int print_device_status(struct device *dev, void *data) | ||
158 | { | ||
159 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
160 | struct device_driver *drv = data; | ||
161 | |||
162 | /* Is this operation limited to a particular driver? */ | ||
163 | if (drv && (dev->driver != drv)) | ||
164 | return 0; | ||
165 | |||
166 | if (!dev->driver) { | ||
167 | /* Information only: is this too noisy? */ | ||
168 | printk(KERN_INFO "XENBUS: Device with no driver: %s\n", | ||
169 | xendev->nodename); | ||
170 | } else if (xendev->state < XenbusStateConnected) { | ||
171 | enum xenbus_state rstate = XenbusStateUnknown; | ||
172 | if (xendev->otherend) | ||
173 | rstate = xenbus_read_driver_state(xendev->otherend); | ||
174 | printk(KERN_WARNING "XENBUS: Timeout connecting " | ||
175 | "to device: %s (local state %d, remote state %d)\n", | ||
176 | xendev->nodename, xendev->state, rstate); | ||
177 | } | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | /* We only wait for device setup after most initcalls have run. */ | ||
183 | static int ready_to_wait_for_devices; | ||
184 | |||
185 | /* | ||
186 | * On a 5-minute timeout, wait for all devices currently configured. We need | ||
187 | * to do this to guarantee that the filesystems and / or network devices | ||
188 | * needed for boot are available, before we can allow the boot to proceed. | ||
189 | * | ||
190 | * This needs to be on a late_initcall, to happen after the frontend device | ||
191 | * drivers have been initialised, but before the root fs is mounted. | ||
192 | * | ||
193 | * A possible improvement here would be to have the tools add a per-device | ||
194 | * flag to the store entry, indicating whether it is needed at boot time. | ||
195 | * This would allow people who knew what they were doing to accelerate their | ||
196 | * boot slightly, but of course needs tools or manual intervention to set up | ||
197 | * those flags correctly. | ||
198 | */ | ||
199 | static void wait_for_devices(struct xenbus_driver *xendrv) | ||
200 | { | ||
201 | unsigned long start = jiffies; | ||
202 | struct device_driver *drv = xendrv ? &xendrv->driver : NULL; | ||
203 | unsigned int seconds_waited = 0; | ||
204 | |||
205 | if (!ready_to_wait_for_devices || !xen_domain()) | ||
206 | return; | ||
207 | |||
208 | while (exists_connecting_device(drv)) { | ||
209 | if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { | ||
210 | if (!seconds_waited) | ||
211 | printk(KERN_WARNING "XENBUS: Waiting for " | ||
212 | "devices to initialise: "); | ||
213 | seconds_waited += 5; | ||
214 | printk("%us...", 300 - seconds_waited); | ||
215 | if (seconds_waited == 300) | ||
216 | break; | ||
217 | } | ||
218 | |||
219 | schedule_timeout_interruptible(HZ/10); | ||
220 | } | ||
221 | |||
222 | if (seconds_waited) | ||
223 | printk("\n"); | ||
224 | |||
225 | bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
226 | print_device_status); | ||
227 | } | ||
228 | |||
229 | int __xenbus_register_frontend(struct xenbus_driver *drv, | ||
230 | struct module *owner, const char *mod_name) | ||
231 | { | ||
232 | int ret; | ||
233 | |||
234 | drv->read_otherend_details = read_backend_details; | ||
235 | |||
236 | ret = xenbus_register_driver_common(drv, &xenbus_frontend, | ||
237 | owner, mod_name); | ||
238 | if (ret) | ||
239 | return ret; | ||
240 | |||
241 | /* If this driver is loaded as a module wait for devices to attach. */ | ||
242 | wait_for_devices(drv); | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(__xenbus_register_frontend); | ||
247 | |||
248 | static int frontend_probe_and_watch(struct notifier_block *notifier, | ||
249 | unsigned long event, | ||
250 | void *data) | ||
251 | { | ||
252 | /* Enumerate devices in xenstore and watch for changes. */ | ||
253 | xenbus_probe_devices(&xenbus_frontend); | ||
254 | register_xenbus_watch(&fe_watch); | ||
255 | |||
256 | return NOTIFY_DONE; | ||
257 | } | ||
258 | |||
259 | |||
260 | static int __init xenbus_probe_frontend_init(void) | ||
261 | { | ||
262 | static struct notifier_block xenstore_notifier = { | ||
263 | .notifier_call = frontend_probe_and_watch | ||
264 | }; | ||
265 | int err; | ||
266 | |||
267 | DPRINTK(""); | ||
268 | |||
269 | /* Register ourselves with the kernel bus subsystem */ | ||
270 | err = bus_register(&xenbus_frontend.bus); | ||
271 | if (err) | ||
272 | return err; | ||
273 | |||
274 | register_xenstore_notifier(&xenstore_notifier); | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | subsys_initcall(xenbus_probe_frontend_init); | ||
279 | |||
280 | #ifndef MODULE | ||
281 | static int __init boot_wait_for_devices(void) | ||
282 | { | ||
283 | if (xen_hvm_domain() && !xen_platform_pci_unplug) | ||
284 | return -ENODEV; | ||
285 | |||
286 | ready_to_wait_for_devices = 1; | ||
287 | wait_for_devices(NULL); | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | late_initcall(boot_wait_for_devices); | ||
292 | #endif | ||
293 | |||
294 | MODULE_LICENSE("GPL"); | ||